Bug Summary

File:/volatile/halld/gluex/nightly/2024-03-18/Linux_CentOS7.7-x86_64-gcc4.8.5/halld_recon/src/libraries/FCAL/DFCALCluster_factory_Island.cc
Location:line 907, column 16
Description:Value stored to 'ndf_old' during its initialization is never read

Annotated Source Code

1// $Id$
2//
3// File: DFCALCluster_factory_Island.cc
4// Created: Fri Dec 4 08:25:47 EST 2020
5// Creator: staylor (on Linux ifarm1802.jlab.org 3.10.0-1062.4.1.el7.x86_64 x86_64)
6//
7
8
9#include <iostream>
10#include <iomanip>
11using namespace std;
12
13#include "DFCALCluster_factory_Island.h"
14using namespace jana;
15
16inline bool FCALHit_E_cmp(const DFCALHit *a,const DFCALHit *b){
17 return (a->E>b->E);
18}
19
20inline bool peak_E_cmp(DFCALCluster_factory_Island::PeakInfo a,
21 DFCALCluster_factory_Island::PeakInfo b){
22 return (a.E>b.E);
23}
24
25//------------------
26// init
27//------------------
28jerror_t DFCALCluster_factory_Island::init(void)
29{
30 TIME_CUT=15.;
31 gPARMS->SetDefaultParameter("FCAL:TIME_CUT",TIME_CUT,"time cut for associating FCAL hits together into a cluster");
32
33 MAX_HITS_FOR_CLUSTERING = 250;
34 gPARMS->SetDefaultParameter("FCAL:MAX_HITS_FOR_CLUSTERING", MAX_HITS_FOR_CLUSTERING);
35 MIN_EXCESS_SEED_ENERGY=35.*k_MeV;
36 gPARMS->SetDefaultParameter("FCAL:MIN_EXCESS_SEED_ENERGY",
37 MIN_EXCESS_SEED_ENERGY);
38 MIN_CLUSTER_SEED_ENERGY=35.*k_MeV;
39 gPARMS->SetDefaultParameter("FCAL:MIN_CLUSTER_SEED_ENERGY",
40 MIN_CLUSTER_SEED_ENERGY);
41 SHOWER_ENERGY_THRESHOLD = 50*k_MeV;
42 gPARMS->SetDefaultParameter("FCAL:SHOWER_ENERGY_THRESHOLD", SHOWER_ENERGY_THRESHOLD);
43
44 SHOWER_WIDTH_PAR0=0.6356;
45 gPARMS->SetDefaultParameter("FCAL:SHOWER_WIDTH_PAR0",SHOWER_WIDTH_PAR0);
46 SHOWER_WIDTH_PAR1=-1.516e-6;
47 gPARMS->SetDefaultParameter("FCAL:SHOWER_WIDTH_PAR1",SHOWER_WIDTH_PAR1);
48 SHOWER_WIDTH_PAR2=4.845e-5;
49 gPARMS->SetDefaultParameter("FCAL:SHOWER_WIDTH_PAR2",SHOWER_WIDTH_PAR2);
50
51 INSERT_SHOWER_WIDTH_PAR0=0.3284;
52 gPARMS->SetDefaultParameter("FCAL:INSERT_SHOWER_WIDTH_PAR0",
53 INSERT_SHOWER_WIDTH_PAR0);
54 INSERT_SHOWER_WIDTH_PAR1=0.000886;
55 gPARMS->SetDefaultParameter("FCAL:INSERT_SHOWER_WIDTH_PAR1",
56 INSERT_SHOWER_WIDTH_PAR1);
57 MIN_CUTDOWN_FRACTION=0.1;
58 gPARMS->SetDefaultParameter("FCAL:MIN_CUTDOWN_FRACTION",
59 MIN_CUTDOWN_FRACTION);
60
61 DEBUG_HISTS=false;
62 gPARMS->SetDefaultParameter("FCAL:DEBUG_HISTS",DEBUG_HISTS);
63
64 CHISQ_MARGIN=12.5;
65 gPARMS->SetDefaultParameter("FCAL:CHISQ_MARGIN",CHISQ_MARGIN);
66
67 SPLIT_PEAKS=false;
68 gPARMS->SetDefaultParameter("FCAL:SPLIT_PEAKS",SPLIT_PEAKS);
69
70 MERGE_HITS_AT_BOUNDARY=true;
71 gPARMS->SetDefaultParameter("FCAL:MERGE_HITS_AT_BOUNDARY",MERGE_HITS_AT_BOUNDARY);
72
73 ENERGY_SHARING_CUTOFF=0.9;
74 gPARMS->SetDefaultParameter("FCAL:ENERGY_SHARING_CUTOFF",ENERGY_SHARING_CUTOFF);
75 APPLY_S_CURVE_CORRECTION=true;
76 gPARMS->SetDefaultParameter("FCAL:APPLY_S_CURVE_CORRECTION",
77 APPLY_S_CURVE_CORRECTION);
78
79 S_CURVE_PAR1=-0.0517;
80 gPARMS->SetDefaultParameter("FCAL:S_CURVE_PAR1",S_CURVE_PAR1);
81 S_CURVE_PAR2=0.02565;
82 gPARMS->SetDefaultParameter("FCAL:S_CURVE_PAR2",S_CURVE_PAR2);
83 INSERT_S_CURVE_PAR1=-0.1593;
84 gPARMS->SetDefaultParameter("FCAL:INSERT_S_CURVE_PAR1",INSERT_S_CURVE_PAR1);
85 INSERT_S_CURVE_PAR2=0.02337;
86 gPARMS->SetDefaultParameter("FCAL:INSERT_S_CURVE_PAR2",INSERT_S_CURVE_PAR2);
87
88 HistdE=new TH2D("HistdE",";E [GeV];#deltaE [GeV]",100,0,10,201,-0.25,0.25);
89 HistProb=new TH1D("HistProb",";CL",100,0,1);
90
91 return NOERROR;
92}
93
94//------------------
95// brun
96//------------------
97jerror_t DFCALCluster_factory_Island::brun(jana::JEventLoop *eventLoop, int32_t runnumber)
98{
99 DApplication *dapp = dynamic_cast<DApplication*>(eventLoop->GetJApplication());
100 const DGeometry *geom = dapp->GetDGeometry(runnumber);
101
102 double targetZ=0.;
103 geom->GetTargetZ(targetZ);
104 eventLoop->GetSingle(dFCALGeom);
105 m_zdiff=dFCALGeom->fcalFrontZ()-targetZ;
106
107 m_insert_Eres[0]=0.0003;
108 m_insert_Eres[1]=0.00025;
109 m_insert_Eres[2]=4.4e-5;
110
111 m_Eres[0]=0.0006;
112 m_Eres[1]=0.0025;
113 m_Eres[2]=0.0009;
114
115 return NOERROR;
116}
117
118//------------------
119// evnt
120//------------------
121jerror_t DFCALCluster_factory_Island::evnt(JEventLoop *loop, uint64_t eventnumber)
122{
123 vector<const DFCALHit*>fcal_hits;
124 loop->Get(fcal_hits);
125 if (fcal_hits.size()==0) return OBJECT_NOT_AVAILABLE;
126
127 // LED events will have hits in nearly every channel. Do NOT
128 // try clusterizing if more than 250 hits in FCAL
129 if(fcal_hits.size() > MAX_HITS_FOR_CLUSTERING) return VALUE_OUT_OF_RANGE;
130
131 // Sort the hits according to energy.
132 stable_sort(fcal_hits.begin(),fcal_hits.end(),FCALHit_E_cmp);
133
134 // Associate groups of adjacent hits into cluster candidates
135 vector<vector<const DFCALHit*>>clusterCandidates;
136 FindClusterCandidates(fcal_hits,clusterCandidates);
137
138 for (unsigned int i=0;i<clusterCandidates.size();i++){
139 // Skip over single hit "clusterCandidates"
140 if (clusterCandidates[i].size()==1) continue;
141
142 // Mininum number of hits to make a shower = 2
143 if (clusterCandidates[i].size()<4){
144 // Create a new DFCALCluster object and add it to the _data list
145 DFCALCluster *myCluster= new DFCALCluster(0);
146 vector<const DFCALHit*>clusterCandidate=clusterCandidates[i];
147
148 double Etot=0.,t=0,x=0,y=0;
149 for (unsigned int k=0;k<clusterCandidate.size();k++){
150 double E=clusterCandidate[k]->E;
151 Etot+=E;
152 t+=E*clusterCandidate[k]->t;
153 x+=E*clusterCandidate[k]->x;
154 y+=E*clusterCandidate[k]->y;
155
156 myCluster->AddAssociatedObject(clusterCandidate[k]);
157 }
158 x/=Etot;
159 y/=Etot;
160 t/=Etot;
161
162 myCluster->setEnergy(Etot);
163 myCluster->setTimeEWeight(t);
164
165 int channel=dFCALGeom->channel(clusterCandidate[0]->row,
166 clusterCandidate[0]->column);
167 myCluster->setChannelEmax(channel);
168
169 if (APPLY_S_CURVE_CORRECTION){
170 DVector2 blockPos=dFCALGeom->positionOnFace(channel);
171 double dx=blockPos.X()-x;
172 double dy=blockPos.Y()-y;
173 if (dFCALGeom->inInsert(channel)==false){
174 x-=0.12*dx*(4.0157*4.0157/4-dx*dx);
175 y-=0.12*dy*(4.0157*4.0157/4-dy*dy);
176 }
177 else{
178 double diffx=2.05*2.05/4-dx*dx;
179 double diffy=2.05*2.05/4-dy*dy;
180 x-=1.055*dx*diffx*diffx;
181 y-=1.055*dy*diffy*diffy;
182 }
183 }
184
185 myCluster->setCentroid(x,y);
186
187 _data.push_back(myCluster);
188
189 continue;
190 }
191
192 //------------------------------------------------------------------------
193 // Handle cluster candidates containing more than 3 hits
194 //------------------------------------------------------------------------
195 //cout <<"Finding peaks for cluster " << i << endl;
196
197 vector<const DFCALHit*>clusterHits=clusterCandidates[i];
198 vector<PeakInfo>peaks;
199
200 // Create weight matrix for hits
201 unsigned int num_hits=clusterHits.size();
202 TMatrixD W(num_hits,num_hits);
203 double Esum=0.;
204 double R=0.; // simple average radius of cluster
205 for (unsigned int j=0;j<num_hits;j++){
206 const DFCALHit *hit=clusterHits[j];
207 double E=hit->E;
208 double varE=0.;
209 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
210 varE=m_insert_Eres[0]+m_insert_Eres[1]*E+m_insert_Eres[2]*E*E;
211 }
212 else{
213 varE=m_Eres[0]+m_Eres[1]*E+m_Eres[2]*E*E;
214 }
215 W(j,j)=1./varE;
216 Esum+=E;
217 R+=sqrt(hit->x*hit->x+hit->y*hit->y);
218 }
219 R/=double(num_hits);
220
221 double chisq=1e6,chisq_old=1e6;
222 unsigned int ndf=1,ndf_old=1;
223 // Find the minimum and maximum row and column numbers
224 int min_row=1000,min_col=1000,max_row=0,max_col=0;
225 for (unsigned int j=0;j<num_hits;j++){
226 if (clusterHits[j]->row<min_row) min_row=clusterHits[j]->row;
227 if (clusterHits[j]->column<min_col) min_col=clusterHits[j]->column;
228 if (clusterHits[j]->row>max_row) max_row=clusterHits[j]->row;
229 if (clusterHits[j]->column>max_col) max_col=clusterHits[j]->column;
230 }
231 // Create arrays to represent the cluster of hits to aid in peak search
232 int num_rows=max_row-min_row+3;
233 int num_cols=max_col-min_col+3;
234 vector<vector<double>>Emap(num_cols,vector<double>(num_rows));
235 vector<vector<unsigned int>>imap(num_cols,vector<unsigned int>(num_rows));
236 // Info for block with maximum energy
237 unsigned int jmax=0;
238 double Emax=0.;
239 for (unsigned int j=0;j<num_hits;j++){
240 int ic=clusterHits[j]->column-min_col+1;
241 int ir=clusterHits[j]->row-min_row+1;
242 Emap[ic][ir]=clusterHits[j]->E;
243 imap[ic][ir]=j;
244 if (clusterHits[j]->E>Emax){
245 Emax=clusterHits[j]->E;
246 jmax=j;
247 }
248 }
249
250 // Compute estimate for shower shape parameter b
251 double b=0.;
252 if (dFCALGeom->isInsertBlock(clusterHits[jmax]->row,
253 clusterHits[jmax]->column)){
254 b=INSERT_SHOWER_WIDTH_PAR0+INSERT_SHOWER_WIDTH_PAR1*R;
255 }
256 else{
257 b=SHOWER_WIDTH_PAR0+SHOWER_WIDTH_PAR1*R+SHOWER_WIDTH_PAR2*R*R;
258 }
259
260 if (MERGE_HITS_AT_BOUNDARY && min_row<100 && max_row>=100){
261 //cout << "!!! overlap " << Emax << endl;
262 // Handle the interface between the insert and the lead glass blocks
263 double Esum=0.;
264 for (unsigned int j=0;j<num_hits;j++){
265 double Ej=clusterHits[j]->E;
266 Esum+=Ej;
267 }
268 if (Emax>MIN_CLUSTER_SEED_ENERGY){
269 PeakInfo myPeak(Esum,clusterHits[jmax]->x,clusterHits[jmax]->y,0,0,
270 num_hits);
271 bool good_fit=FitPeaks(W,b,clusterHits,peaks,myPeak,chisq,ndf);
272 if (good_fit){
273 peaks.push_back(myPeak);
274 // cout << "Transition region " << peaks.size() << endl;
275 }
276 else{
277 // Use energy-weighted average for position and put myPeak in peak
278 // list
279 myPeak.x=0.;
280 myPeak.y=0.;
281 for (unsigned int j=0;j<num_hits;j++){
282 double Ej=clusterHits[j]->E;
283 myPeak.x+=Ej*clusterHits[j]->x;
284 myPeak.y+=Ej*clusterHits[j]->y;
285 }
286 myPeak.x/=Esum;
287 myPeak.y/=Esum;
288 peaks.push_back(myPeak);
289 //cout << "Fit did not work " << endl;
290
291 // Compute chisq estimate just in case we need to make a split
292 chisq=0.;
293 ndf=num_hits-3;
294 for (unsigned int j=0;j<num_hits;j++){
295 double dE=clusterHits[j]->E
296 -Esum*CalcClusterEDeriv(b,clusterHits[j],myPeak);
297 chisq+=W(j,j)*dE*dE;
298 }
299 }
300 }
301 }
302 else {
303 // Handle clusters fully in insert or in lead glass region
304 // Loop over hits looking for peaks.
305 vector<PeakInfo>peak_candidates;
306 for (int ic=1;ic<num_cols-1;ic++){
307 for (int ir=1;ir<num_rows-1;ir++){
308 double E=Emap[ic][ir];
309 double Esum=E;
310 if (E>MIN_CLUSTER_SEED_ENERGY
311 && num_hits>3*(peak_candidates.size()+1)
312 ){
313 int nhits_in_peak=1;
314
315 bool got_peak=true;
316 int lo_col=ic-1;
317 int hi_col=ic+1;
318 int lo_row=ir-1;
319 int hi_row=ir+1;
320 for (int j=lo_col;j<=hi_col;j++){
321 for (int k=lo_row;k<=hi_row;k++){
322 if (j==ic && k==ir) continue;
323
324 double Ejk=Emap[j][k];
325 if (Ejk<0.001) continue;
326
327 got_peak=(E-Ejk>0)&&got_peak;
328 if (got_peak){
329 // Accumulate energy
330 Esum+=Ejk;
331 nhits_in_peak++;
332 }
333 }
334 }
335 if (got_peak){
336 double x=clusterHits[imap[ic][ir]]->x;
337 double y=clusterHits[imap[ic][ir]]->y;
338 peak_candidates.push_back(PeakInfo(Esum,x,y,ic,ir,nhits_in_peak));
339 }
340 }// cut on minimum energy of central block
341 } // loop over rows
342 } // loop over columns
343
344 // Sort peak candidates by energy
345 //cout << "---> Candidates: "<< peak_candidates.size() << endl;
346 if (peak_candidates.size()>0){
347 sort(peak_candidates.begin(),peak_candidates.end(),peak_E_cmp);
348 }
349
350 // Loop over peak candidates to perform the peak fit and add good
351 // candidates to the output list.
352
353 for (size_t i=0;i<peak_candidates.size();i++){
354 PeakInfo myPeak=peak_candidates[i];
355 if (peaks.size()>0){
356 int ir=myPeak.ir;
357 int ic=myPeak.ic;
358 int lo_col=ic-1;
359 int hi_col=ic+1;
360 int lo_row=ir-1;
361 int hi_row=ir+1;
362
363 // Subtract background due to the set of peaks already in the
364 // list from the current peak
365 for (unsigned int m=0;m<peaks.size();m++){
366 double frac=0.;
367 for (int j=lo_col;j<=hi_col;j++){
368 for (int k=lo_row;k<=hi_row;k++){
369 if (Emap[j][k]>0.){
370 frac+=CalcClusterEDeriv(b,clusterHits[imap[j][k]],peaks[m]);
371 }
372 }
373 }
374 myPeak.E-=peaks[m].E*frac;
375 }
376 }
377 //cout << " E guess: " << myPeak.E << endl;
378 if (myPeak.E>MIN_CLUSTER_SEED_ENERGY){
379 // Save the current peak list
380 vector<PeakInfo>saved_peaks=peaks;
381 PeakInfo peak_guess=myPeak;
382
383 // Fit the data to find the best current guesses for the shower
384 // parameters for each peak within this group of FCAL hits.
385 chisq_old=chisq;
386 ndf_old=ndf;
387 bool good_fit=FitPeaks(W,b,clusterHits,peaks,myPeak,chisq,ndf);
388 //cout << "Main fits " << chisq/ndf << " " << chisq_old/ndf_old<<endl;
389 if (good_fit && chisq/ndf<chisq_old/ndf_old){
390 peaks.push_back(myPeak);
391 }
392 else if (peaks.size()==0){
393 //cout << " Adding guess" << endl;
394 // Always add the first peak, even if the fit failed.
395 // Use the initial guess for the peak info.
396 peaks.push_back(peak_guess);
397
398 // Compute chisq estimate just in case we need to make a split
399 chisq=0.;
400 ndf=num_hits-3;
401 for (unsigned int j=0;j<num_hits;j++){
402 double dE=clusterHits[j]->E
403 -Esum*CalcClusterEDeriv(b,clusterHits[j],peak_guess);
404 chisq+=W(j,j)*dE*dE;
405 }
406 }
407 else{
408 // No improvement from adding the new peak. Restore the old
409 // list
410 peaks=saved_peaks;
411 chisq=chisq_old;
412 ndf=ndf_old;
413 }
414 } // check threshold
415 } // loop over peak candidates
416
417 if (DEBUG_HISTS&&num_hits>3&&peaks.size()==1){
418 HistProb->Fill(TMath::Prob(chisq,num_hits-3));
419
420 PeakInfo myPeakInfo=peaks[0];
421 for (unsigned int k=0;k<clusterHits.size();k++){
422 double Ecalc=0.,E=clusterHits[k]->E;
423 Ecalc+=myPeakInfo.E*CalcClusterEDeriv(b,clusterHits[k],myPeakInfo);
424 HistdE->Fill(E,E-Ecalc);
425 }
426 }
427 }
428
429 while (num_hits>3*(peaks.size()+1)){
430 // Subtract the energy due to the fitted peaks from the energy of each
431 // hit to see if we have excess energy that has not been accounted for
432 vector<double>Elist(clusterHits.size());
433 for (unsigned int m=0;m<clusterHits.size();m++){
434 Elist[m]=clusterHits[m]->E;
435 for (unsigned int k=0;k<peaks.size();k++){
436 Elist[m]-=peaks[k].E*CalcClusterEDeriv(b,clusterHits[m],peaks[k]);
437 }
438 }
439 double Emax=0.;
440 // Find the maximum of the peak-subtracted hit distribution
441 unsigned int mmax=0;
442 for (unsigned int m=0;m<Elist.size();m++){
443 if (Elist[m]>Emax){
444 Emax=Elist[m];
445 mmax=m;
446 }
447 }
448 if (Emax>MIN_EXCESS_SEED_ENERGY){
449 // Make a peak candidate out of the excess energy in the cluster of hits
450 int ic=clusterHits[mmax]->column-min_col+1;
451 int ir=clusterHits[mmax]->row-min_row+1;
452 int lo_col=ic-1;
453 int hi_col=ic+1;
454 int lo_row=ir-1;
455 int hi_row=ir+1;
456 int num_peak_hits=0;
457 double excessE=0.;
458 for (int j=lo_col;j<=hi_col;j++){
459 for (int k=lo_row;k<=hi_row;k++){
460 int index=imap[j][k];
461 if (Elist[index]>0){
462 excessE+=Elist[index];
463 num_peak_hits++;
464 }
465 }
466 }
467 double x=clusterHits[mmax]->x;
468 double y=clusterHits[mmax]->y;
469 PeakInfo myPeak(excessE,x,y,ic,ir,num_peak_hits);
470
471 // Save the current list of peaks
472 vector<PeakInfo>saved_peaks=peaks;
473
474 // Add the new peak to the fit to see if the fit quality improves
475 chisq_old=chisq;
476 ndf_old=ndf;
477 bool good_fit=FitPeaks(W,b,clusterHits,peaks,myPeak,chisq,ndf);
478 if (good_fit && chisq/ndf+CHISQ_MARGIN<chisq_old/ndf_old){
479 peaks.push_back(myPeak);
480 }
481 else {
482 // Chisq did not improve. Restore the old list of peaks.
483 peaks=saved_peaks;
484 chisq=chisq_old;
485 ndf=ndf_old;
486 break;
487 }
488 }
489 else break;
490
491 }
492 if (SPLIT_PEAKS){
493 // Try to split the peaks further using moments of the hit distribution
494 SplitPeaks(W,b,clusterHits,peaks,chisq,ndf);
495 }
496
497 // Estimate fraction of "seen" energy for each peak
498 size_t npeaks=peaks.size();
499 vector<double>peak_fractions(npeaks);
500 double Ecalc_sum=0.;
501 for (unsigned int k=0;k<npeaks;k++){
502 Ecalc_sum+=peaks[k].E;
503 }
504 for (unsigned int k=0;k<npeaks;k++){
505 peak_fractions[k]=peaks[k].E/Ecalc_sum;
506 }
507
508 // Add the clusters to the output of the factory
509 for (unsigned int k=0;k<npeaks;k++){
510 // using the measured energy gives slightly better energy resolution
511 double E=peak_fractions[k]*Esum;
512 //cout << "E " << E << " " << peaks[k].E << endl;
513 if (E>SHOWER_ENERGY_THRESHOLD){
514 DFCALCluster *myCluster= new DFCALCluster(0);
515 myCluster->setEnergy(E);
516
517 // Compute energy-weighted time and find channel corresponding to
518 // peak position and add hits to the cluster object
519 unsigned int jmax=0.;
520 double fsum=0.,fmax=0.,t=0.;
521 for (unsigned int j=0;j<clusterHits.size();j++){
522 double f=CalcClusterEDeriv(b,clusterHits[j],peaks[k]);
523 t+=f*clusterHits[j]->t;
524 fsum+=f;
525 if (f>fmax){
526 fmax=f;
527 jmax=j;
528 }
529 }
530
531 myCluster->setTimeEWeight(t/fsum);
532 int channel=dFCALGeom->channel(clusterHits[jmax]->row,
533 clusterHits[jmax]->column);
534 myCluster->setChannelEmax(channel);
535
536 double xc=peaks[k].x,yc=peaks[k].y;
537 if (APPLY_S_CURVE_CORRECTION){
538 DVector2 blockPos=dFCALGeom->positionOnFace(channel);
539 double dx=blockPos.X()-xc;
540 double dy=blockPos.Y()-yc;
541 if (dFCALGeom->inInsert(channel)==false){
542 xc+=S_CURVE_PAR1*dx*(4.0157*4.0157/4-dx*dx)+S_CURVE_PAR2;
543 yc+=S_CURVE_PAR1*dy*(4.0157*4.0157/4-dy*dy)+S_CURVE_PAR2;
544 }
545 else{
546 xc+=INSERT_S_CURVE_PAR1*dx*(2.05*2.05/4-dx*dx)+INSERT_S_CURVE_PAR2;
547 yc+=INSERT_S_CURVE_PAR1*dy*(2.05*2.05/4-dy*dy)+INSERT_S_CURVE_PAR2;
548 }
549 }
550 myCluster->setCentroid(xc,yc);
551
552 // Add hits to the cluster object as associated objects
553 for (unsigned int j=0;j<clusterHits.size();j++){
554 if (npeaks==1){
555 myCluster->AddAssociatedObject(clusterHits[j]);
556 }
557 else{
558 // Output hits surrounding peak position
559 double dx=clusterHits[j]->x-clusterHits[jmax]->x;
560 double dy=clusterHits[j]->y-clusterHits[jmax]->y;
561 double d=dFCALGeom->blockSize();
562 if (dFCALGeom->inInsert(channel)){
563 d=dFCALGeom->insertBlockSize();
564 }
565 double dcut=2.5*d;
566 if (fabs(dx)<dcut && fabs(dy)<dcut){
567 myCluster->AddAssociatedObject(clusterHits[j]);
568 }
569 }
570 }
571
572 _data.push_back(myCluster);
573 }
574 }
575 }
576
577 return NOERROR;
578}
579
580//------------------
581// erun
582//------------------
583jerror_t DFCALCluster_factory_Island::erun(void)
584{
585 return NOERROR;
586}
587
588//------------------
589// fini
590//------------------
591jerror_t DFCALCluster_factory_Island::fini(void)
592{
593 return NOERROR;
594}
595
596// Make a list of potential clusters, each consisting of a "central" block
597// and all adjacent blocks, starting with the block with the highest energy.
598// Each successive cluster candidate starts with the block with the next
599// highest energy that has not already been included in another cluster.
600void DFCALCluster_factory_Island::FindClusterCandidates(vector<const DFCALHit*>&fcal_hits,
601 vector<vector<const DFCALHit*>>&clusterCandidates) const {
602 vector<const DFCALHit*>clusterCandidate;
603 clusterCandidate.push_back(fcal_hits[0]);
604
605 vector<bool>used_hits(fcal_hits.size());
606 used_hits[0]=true;
607
608 int hits_left=fcal_hits.size()-1;
609 int begin_i=1;
610 while (hits_left>0){
611 for (size_t i=begin_i;i<fcal_hits.size();i++){
612 if (used_hits[i]==true) continue;
613
614 double dt=clusterCandidate[0]->t-fcal_hits[i]->t;
615 if (fabs(dt)>TIME_CUT) continue;
616
617 int drow=clusterCandidate[0]->row-fcal_hits[i]->row;
618 int dcol=clusterCandidate[0]->column-fcal_hits[i]->column;
619 if (abs(drow)<=1 && abs(dcol)<=1){
620 clusterCandidate.push_back(fcal_hits[i]);
621
622 used_hits[i]=true;
623 hits_left--;
624 }
625 }
626
627 clusterCandidates.push_back(clusterCandidate);
628 clusterCandidate.clear();
629 for (size_t i=begin_i;i<fcal_hits.size();i++){
630 if (used_hits[i]==false){
631 begin_i=i+1;
632 clusterCandidate.push_back(fcal_hits[i]);
633
634 used_hits[i]=true;
635 hits_left--;
636
637 break;
638 }
639 }
640 }
641 if (clusterCandidate.size()>0) clusterCandidates.push_back(clusterCandidate);
642
643 // At this point we do not necessarily have complete clusters.
644 // Merge cluster candidates that appear belong to a single cluster, again
645 // looking for adjacent hits.
646 double borderCut=0.5*dFCALGeom->blockSize()+dFCALGeom->insertBlockSize();
647 vector<vector<const DFCALHit*>>::iterator iter=clusterCandidates.begin();
648 while (iter!=clusterCandidates.end()){
649 bool matched=false;
650 vector<vector<const DFCALHit*>>::iterator iter2=iter+1;
651 for (;iter2!=clusterCandidates.end();iter2++){
652 for (unsigned int i=0;i<(*iter).size();i++){
653 for (unsigned int j=0;j<(*iter2).size();j++){
654 double dt=(*iter)[i]->t-(*iter2)[j]->t;
655 if (fabs(dt)>TIME_CUT) continue;
656
657 int row1=(*iter)[i]->row;
658 int col1=(*iter)[i]->column;
659 int row2=(*iter2)[j]->row;
660 int col2=(*iter2)[j]->column;
661
662 if (abs(row1-row2)<=1 && abs(col1-col2)<=1){
663 matched=true;
664 break;
665 }
666
667 // look for adjacent clusters between the lead glass and the insert,
668 // if present
669 if (MERGE_HITS_AT_BOUNDARY
670 && dFCALGeom->hitPairHasInsertHit(row1,row2)){
671 double dx=(*iter)[i]->x-(*iter2)[j]->x;
672 double dy=(*iter)[i]->y-(*iter2)[j]->y;
673 if (fabs(dx)<borderCut && fabs(dy)<borderCut){
674 matched=true;
675 break;
676 }
677 }
678 }
679 if (matched){
680 // Add the hits from this cluster to the cluster to which it is
681 // matched and erase it from the list.
682 for (unsigned int j=0;j<(*iter2).size();j++){
683 (*iter).push_back((*iter2)[j]);
684 }
685 clusterCandidates.erase(iter2);
686 break;
687 }
688 }
689 if (matched==true){
690 iter=clusterCandidates.begin();
691 break;
692 }
693 }
694 if (matched==false) iter++;
695 }
696 //cout << "Finished finding clusters" << endl;
697}
698
699// Fit peaks within a cluster containing a list of hits contained in hitList
700bool DFCALCluster_factory_Island::FitPeaks(const TMatrixD &W,double b,
701 vector<const DFCALHit*>&hitList,
702 vector<PeakInfo>&peaks,
703 PeakInfo &myNewPeak,double &chisq,
704 unsigned int &ndf
705 ) const {
706 size_t nhits=hitList.size();
707 size_t npeaks=peaks.size();
708 // Save the current peak info
709 vector<PeakInfo>saved_peaks=peaks;
710 PeakInfo saved_new_peak=myNewPeak;
711
712 // Iterate to find best shower energy and position
713 chisq=1e6;
714 unsigned int min_iter=10,max_iter=200;
715 double min_frac=MIN_CUTDOWN_FRACTION;
716 double cutdown_scale=(1.-min_frac)/double(min_iter); //for cut-down for parameter adjustment
717 for (unsigned int k=0;k<max_iter;k++){
718 // _DBG_ << "E " << myNewPeak.E << endl;
719 // Make sure the energy is positive!
720 if (myNewPeak.E<0){
721 return false;
722 }
723 //Check that the new peak positions are still within the fiducial area of
724 // the detector
725 if (dFCALGeom->isFiducial(myNewPeak.x,myNewPeak.y)==false){
726 //_DBG_ << myNewPeak.x << " " << myNewPeak.y << endl;
727 return false;
728 }
729
730 // Matrix of per-block differences between measured and calculated energies
731 TMatrixD dE(nhits,1);
732 // Matrix containing partical derivatives of the shower profile function
733 // with respect to the three parameters (E, xc, yc): Jacobian matrix
734 TMatrixD A(nhits,3*npeaks+3);
735 // Loop over all the hits to fill dE and A
736 for (unsigned int i=0;i<nhits;i++){
737 double Ecalc=0.,df_dE=0;
738 for (unsigned int j=0;j<npeaks;j++){
739 PeakInfo myPeakInfo=peaks[j];
740
741 // Make sure the energy is positive!
742 if (myPeakInfo.E<0){
743 return false;
744 }
745
746 //Check that the new peak positions are still within the fiducial
747 //area of the detector
748 if (dFCALGeom->isFiducial(myPeakInfo.x,myPeakInfo.y)==false){
749 //_DBG_<< myPeakInfo.x << " " << myPeakInfo.y << endl;
750 return false;
751 }
752
753 // Compute the Jacobian matrix elements
754 df_dE=CalcClusterEDeriv(b,hitList[i],myPeakInfo);
755 A(i,3*j+0)=df_dE;
756 A(i,3*j+1)=CalcClusterXYDeriv(true,b,hitList[i],myPeakInfo);
757 A(i,3*j+2)=CalcClusterXYDeriv(false,b,hitList[i],myPeakInfo);
758
759 Ecalc+=myPeakInfo.E*df_dE;
760 }
761 // Add contributions from the peak we wish to add
762 df_dE=CalcClusterEDeriv(b,hitList[i],myNewPeak);
763 A(i,3*npeaks+0)=df_dE;
764 A(i,3*npeaks+1)=CalcClusterXYDeriv(true,b,hitList[i],myNewPeak);
765 A(i,3*npeaks+2)=CalcClusterXYDeriv(false,b,hitList[i],myNewPeak);
766
767 Ecalc+=myNewPeak.E*df_dE;
768
769 double Ediff=hitList[i]->E-Ecalc;
770 dE(i,0)=Ediff;
771 //cout << " Ediff " << Ediff << endl;
772 }
773 // Compute chi^2 for this iteration
774 double chisq_new=0.;
775 for (unsigned int i=0;i<nhits;i++) chisq_new+=W(i,i)*dE(i,0)*dE(i,0);
776 // cout << endl;
777 //cout << k << " chisq "<< chisq_new << " " << (chisq_new-chisq)/chisq <<endl;
778 //cout << endl;
779 if (k>=min_iter){
780 if (fabs(chisq_new-chisq)/chisq<0.0001) break;
781 if (chisq_new>chisq) break;
782 }
783
784 // Save the current value of chisq
785 chisq=chisq_new;
786
787 // Save the current best peak values
788 saved_new_peak=myNewPeak;
789 saved_peaks.assign(peaks.begin(),peaks.end());
790
791 // Determine the set of corrections needed to bring the computed shower
792 // shape closer to the measurements
793 TMatrixD A_T(TMatrixD::kTransposed,A);
794 TMatrixD AT_A=A_T*W*A;
795 TMatrixD InvATA(TMatrixD::kInverted,AT_A);
796 // The function describing the cluster profile is rather nastily non-linear,
797 // so we perform a cut-down on the maximum shift vector.
798 double frac=min_frac+cutdown_scale*double(k);
799 TMatrixD dPar=frac*(InvATA*A_T*W*dE);
800
801 // Update the peak parameters
802 for (unsigned int i=0;i<npeaks;i++){
803 peaks[i].E+=dPar(3*i+0,0);
804 peaks[i].x+=dPar(3*i+1,0);
805 peaks[i].y+=dPar(3*i+2,0);
806 }
807 myNewPeak.E+=dPar(3*npeaks+0,0);
808 myNewPeak.x+=dPar(3*npeaks+1,0);
809 myNewPeak.y+=dPar(3*npeaks+2,0);
810 }
811
812 // Number of degrees of freedom
813 ndf=nhits-3*(peaks.size()+1);
814
815 // Peak info for output of the routine. At this stage myNewPeak, for example,
816 // has been adjusted away from the solution with the best chisq value.
817 myNewPeak=saved_new_peak;
818 peaks.assign(saved_peaks.begin(),saved_peaks.end());
819
820 // Sanity check for peak position: does it correspond to one of the hit
821 // blocks?
822 vector<int>peak_channels;
823 peak_channels.push_back(dFCALGeom->channel(myNewPeak.x,myNewPeak.y));
824 for (unsigned int i=0;i<peaks.size();i++){
825 peak_channels.push_back(dFCALGeom->channel(peaks[i].x,peaks[i].y));
826 }
827 vector<int>hit_channels(nhits);
828 for (unsigned int i=0;i<nhits;i++){
829 hit_channels[i]=dFCALGeom->channel(hitList[i]->row,hitList[i]->column);
830 }
831 for (unsigned int j=0;j<peak_channels.size();j++){
832 int my_peak_channel=peak_channels[j];
833 if (find(hit_channels.begin(),hit_channels.end(),my_peak_channel)==hit_channels.end()){
834 return false;
835 }
836 }
837
838 return true;
839}
840
841// Routine to compute the partial derivatives of the shower profile function
842// with respect to the peak position parameters (xpeak,ypeak)
843double DFCALCluster_factory_Island::CalcClusterXYDeriv(bool isXDeriv,double b,
844 const DFCALHit *hit,
845 const PeakInfo &myPeakInfo) const {
846 double sign1[4]={1,1,-1,-1};
847 double sign2[4]={1,-1,-1,1};
848 double half_block=0.5*dFCALGeom->blockSize();
849 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
850 half_block=0.5*dFCALGeom->insertBlockSize();
851 }
852 double b2=b*b;
853 double f=0.;
854 double dxc=hit->x-myPeakInfo.x;
855 double dyc=hit->y-myPeakInfo.y;
856 double Ec=myPeakInfo.E;
857 for (int i=0;i<4;i++){
858 double dx=dxc+sign1[i]*half_block;
859 double dy=dyc+sign2[i]*half_block;
860 double dx2=dx*dx;
861 double dy2=dy*dy;
862 double factor=-sign1[i]*sign2[i]*Ec*b/(2*M_PI3.14159265358979323846)/sqrt(b2*dx2+dy2);
863 if (isXDeriv){
864 f+=factor*dy/(b2+dx2);
865 }
866 else {
867 f+=factor*dx/(b2+dy2);
868 }
869 }
870 return f;
871}
872
873// Shower profile function from Bland, et al., Instruments and Experimental
874// Techniques, 2008, Vol. 51, No. 3, pp. 342-350, eq. 6. Note that the
875// normalization of this function in this equation does not appear to be
876// correct (it's off by 1/sqrt(2pi)).
877double DFCALCluster_factory_Island::CalcClusterEDeriv(double b,
878 const DFCALHit *hit,
879 const PeakInfo &myPeakInfo) const {
880 double sign1[4]={1,1,-1,-1};
881 double sign2[4]={1,-1,-1,1};
882 double half_block=0.5*dFCALGeom->blockSize();
883 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
884 half_block=0.5*dFCALGeom->insertBlockSize();
885 }
886 double f=0.;
887 double dxc=hit->x-myPeakInfo.x;
888 double dyc=hit->y-myPeakInfo.y;
889 for (int i=0;i<4;i++){
890 double dx=dxc+sign1[i]*half_block;
891 double dy=dyc+sign2[i]*half_block;
892 f+=sign1[i]*sign2[i]/(2*M_PI3.14159265358979323846)*atan(dx*dy/(b*sqrt(b*b+dx*dx+dy*dy)));
893 }
894 return f;
895}
896
897// Try to split peaks into two following the algorithm (barely) described in
898// Lednev, IHEP 93-153.
899void DFCALCluster_factory_Island::SplitPeaks(const TMatrixD &W,double b,
900 vector<const DFCALHit*>&hits,
901 vector<PeakInfo>&peaks,
902 double &chisq,unsigned int &ndf) const{
903 unsigned int npeaks=peaks.size(),nhits=hits.size();
904 vector<PeakInfo>saved_peaks=peaks;
905
906 double chisq_old=chisq;
907 unsigned int ndf_old=ndf;
Value stored to 'ndf_old' during its initialization is never read
908 // Find the centroid of the hits for each peak region
909 for (unsigned int i=0;i<npeaks;i++){
910 double E0=0.,x0=0.,y0=0.;
911 vector<double>Elist(nhits);
912 for (unsigned int j=0;j<nhits;j++){
913 double Ecalc=0.;
914 for (unsigned int k=0;k<peaks.size();k++){
915 if (i==k) continue;
916 Ecalc+=peaks[k].E*CalcClusterEDeriv(b,hits[j],peaks[k]);
917 }
918 Elist[j]=hits[j]->E-Ecalc;
919 E0+=Elist[j];
920 x0+=Elist[j]*hits[j]->x;
921 y0+=Elist[j]*hits[j]->y;
922 }
923 x0/=E0;
924 y0/=E0;
925
926 // Find the second moments about the center of the cluster of hits
927 double xx=0.,yy=0.,yx=0.;
928 for (unsigned int j=0;j<nhits;j++){
929 double dx=hits[j]->x-x0;
930 double dy=hits[j]->y-y0;
931 xx+=Elist[j]*dx*dx;
932 yy+=Elist[j]*dy*dy;
933 yx+=Elist[j]*dx*dy;
934 }
935
936 // Determine the parameter alpha=(E1-E2)/E0 specifying the energy sharing
937 // between the two candidate peaks
938 double dxy=xx-yy;
939 double rsq2 = dxy*dxy + 4.*yx*yx;
940 if( rsq2 < 1.e-20 ) rsq2 = 1.e-20;
941 double rsq = sqrt(rsq2);
942 double dxc = -sqrt((rsq+dxy)*2.);
943 double dyc = sqrt((rsq-dxy)*2.);
944 if( yx >= 0. ) dyc = -dyc;
945 double r = sqrt(dxc*dxc + dyc*dyc);
946 double alpha = 0.;
947 for(unsigned int i=0;i<nhits;i++) {
948 double u=(hits[i]->x-x0)*dxc/r + (hits[i]->y-y0)*dyc/r;
949 alpha-=Elist[i]*u*fabs(u);
950 }
951 alpha/=E0*rsq;
952 // Make sure alpha is not too small or too big
953 if (alpha<-ENERGY_SHARING_CUTOFF) alpha=-ENERGY_SHARING_CUTOFF;
954 if (alpha> ENERGY_SHARING_CUTOFF) alpha= ENERGY_SHARING_CUTOFF;
955
956 // Find the first guess for the energies of the two potential peaks
957 double alpha_plus_factor=0.5*(1.+alpha);
958 double alpha_minus_factor=0.5*(1.-alpha);
959 double E1=E0*alpha_plus_factor;
960 double E2=E0*alpha_minus_factor;
961 if (E1<SHOWER_ENERGY_THRESHOLD || E2<SHOWER_ENERGY_THRESHOLD){
962 // Bail if either of the energies is below the shower threshold
963 continue;
964 }
965
966 // Scale dxc and dyc to find estimates for the peak separation in x and y
967 double scale=1./sqrt(1.-alpha*alpha);
968 dxc*=scale;
969 dyc*=scale;
970
971 // Estimates for peak positions
972 double x1=x0+dxc*alpha_plus_factor;
973 double y1=y0+dyc*alpha_plus_factor;
974 double x2=x0+dxc*alpha_minus_factor;
975 double y2=y0+dyc*alpha_minus_factor;
976
977 PeakInfo myNewPeak(E2,x2,y2,0,0,0);
978 peaks[i].E=E1;
979 peaks[i].x=x1;
980 peaks[i].y=y1;
981
982 // Refit with the split peaks
983 chisq_old=chisq;
984 ndf_old=ndf;
985 bool good_fit=FitPeaks(W,b,hits,peaks,myNewPeak,chisq,ndf);
986 if (good_fit && chisq/ndf+CHISQ_MARGIN<chisq_old/ndf_old){
987 //cout << "used splitter" << endl;
988 peaks.push_back(myNewPeak);
989 }
990 else {
991 // No improvement from adding the new peak. Restore the old list.
992 peaks=saved_peaks;
993 chisq=chisq_old;
994 ndf=ndf_old;
995 }
996 }
997}