Bug Summary

File:libraries/FCAL/DFCALCluster_factory_Island.cc
Location:line 835, column 10
Description:Value stored to 'chisq_old' during its initialization is never read

Annotated Source Code

1// $Id$
2//
3// File: DFCALCluster_factory_Island.cc
4// Created: Fri Dec 4 08:25:47 EST 2020
5// Creator: staylor (on Linux ifarm1802.jlab.org 3.10.0-1062.4.1.el7.x86_64 x86_64)
6//
7
8
9#include <iostream>
10#include <iomanip>
11using namespace std;
12
13#include "DFCALCluster_factory_Island.h"
14using namespace jana;
15
16inline bool FCALHit_E_cmp(const DFCALHit *a,const DFCALHit *b){
17 return (a->E>b->E);
18}
19
20inline bool peak_E_cmp(DFCALCluster_factory_Island::PeakInfo a,
21 DFCALCluster_factory_Island::PeakInfo b){
22 return (a.E>b.E);
23}
24
25//------------------
26// init
27//------------------
28jerror_t DFCALCluster_factory_Island::init(void)
29{
30 TIME_CUT=15.;
31 gPARMS->SetDefaultParameter("FCAL:TIME_CUT",TIME_CUT,"time cut for associating FCAL hits together into a cluster");
32
33 MAX_HITS_FOR_CLUSTERING = 250;
34 gPARMS->SetDefaultParameter("FCAL:MAX_HITS_FOR_CLUSTERING", MAX_HITS_FOR_CLUSTERING);
35
36 MIN_CLUSTER_SEED_ENERGY=35.*k_MeV;
37 gPARMS->SetDefaultParameter("FCAL:MIN_CLUSTER_SEED_ENERGY",
38 MIN_CLUSTER_SEED_ENERGY);
39 SHOWER_ENERGY_THRESHOLD = 50*k_MeV;
40 gPARMS->SetDefaultParameter("FCAL:SHOWER_ENERGY_THRESHOLD", SHOWER_ENERGY_THRESHOLD);
41
42 SHOWER_WIDTH_PARAMETER=0.72;
43 gPARMS->SetDefaultParameter("FCAL:SHOWER_WIDTH_PARAMETER",
44 SHOWER_WIDTH_PARAMETER);
45 INSERT_SHOWER_WIDTH_PARAMETER=0.31;
46 gPARMS->SetDefaultParameter("FCAL:INSERT_SHOWER_WIDTH_PARAMETER",
47 INSERT_SHOWER_WIDTH_PARAMETER);
48 MIN_CUTDOWN_FRACTION=0.1;
49 gPARMS->SetDefaultParameter("FCAL:MIN_CUTDOWN_FRACTION",
50 MIN_CUTDOWN_FRACTION);
51
52 DEBUG_HISTS=false;
53 gPARMS->SetDefaultParameter("FCAL:DEBUG_HISTS",DEBUG_HISTS);
54
55 CHISQ_MARGIN=5.;
56 gPARMS->SetDefaultParameter("FCAL:CHISQ_MARGIN",CHISQ_MARGIN);
57
58 MASS_CUT=5e-5;
59 gPARMS->SetDefaultParameter("FCAL:MASS_CUT",MASS_CUT);
60
61 ENERGY_SHARING_CUTOFF=0.9;
62 gPARMS->SetDefaultParameter("FCAL:ENERGY_SHARING_CUTOFF",ENERGY_SHARING_CUTOFF);
63
64 HistdE=new TH2D("HistdE",";E [GeV];#deltaE [GeV]",100,0,10,201,-0.25,0.25);
65 HistProb=new TH1D("HistProb",";CL",100,0,1);
66
67 return NOERROR;
68}
69
70//------------------
71// brun
72//------------------
73jerror_t DFCALCluster_factory_Island::brun(jana::JEventLoop *eventLoop, int32_t runnumber)
74{
75 DApplication *dapp = dynamic_cast<DApplication*>(eventLoop->GetJApplication());
76 const DGeometry *geom = dapp->GetDGeometry(runnumber);
77
78 double targetZ=0.;
79 geom->GetTargetZ(targetZ);
80 eventLoop->GetSingle(dFCALGeom);
81 m_zdiff=dFCALGeom->fcalFrontZ()-targetZ;
82
83 m_insert_Eres[0]=0.0003;
84 m_insert_Eres[1]=0.00025;
85 m_insert_Eres[2]=4.4e-5;
86
87 m_Eres[0]=0.0006;
88 m_Eres[1]=0.0025;
89 m_Eres[2]=0.0009;
90
91 return NOERROR;
92}
93
94//------------------
95// evnt
96//------------------
97jerror_t DFCALCluster_factory_Island::evnt(JEventLoop *loop, uint64_t eventnumber)
98{
99 vector<const DFCALHit*>fcal_hits;
100 loop->Get(fcal_hits);
101 if (fcal_hits.size()==0) return OBJECT_NOT_AVAILABLE;
102
103 // LED events will have hits in nearly every channel. Do NOT
104 // try clusterizing if more than 250 hits in FCAL
105 if(fcal_hits.size() > MAX_HITS_FOR_CLUSTERING) return VALUE_OUT_OF_RANGE;
106
107 // Sort the hits according to energy.
108 stable_sort(fcal_hits.begin(),fcal_hits.end(),FCALHit_E_cmp);
109
110 // Associate groups of adjacent hits into cluster candidates
111 vector<vector<const DFCALHit*>>clusterCandidates;
112 FindClusterCandidates(fcal_hits,clusterCandidates);
113
114 for (unsigned int i=0;i<clusterCandidates.size();i++){
115 // Skip over single hit "clusterCandidates"
116 if (clusterCandidates[i].size()==1) continue;
117
118 // Mininum number of hits to make a shower = 2
119 if (clusterCandidates[i].size()<4){
120 // Create a new DFCALCluster object and add it to the _data list
121 DFCALCluster *myCluster= new DFCALCluster(0);
122 vector<const DFCALHit*>clusterCandidate=clusterCandidates[i];
123
124 double Etot=0.,t=0,x=0,y=0;
125 for (unsigned int k=0;k<clusterCandidate.size();k++){
126 double E=clusterCandidate[k]->E;
127 Etot+=E;
128 t+=E*clusterCandidate[k]->t;
129 x+=E*clusterCandidate[k]->x;
130 y+=E*clusterCandidate[k]->y;
131
132 myCluster->AddAssociatedObject(clusterCandidate[k]);
133 }
134 x/=Etot;
135 y/=Etot;
136 t/=Etot;
137
138 myCluster->setEnergy(Etot);
139 myCluster->setTimeEWeight(t);
140
141 int channel=dFCALGeom->channel(clusterCandidate[0]->row,
142 clusterCandidate[0]->column);
143 myCluster->setChannelEmax(channel);
144 myCluster->setCentroid(x,y);
145
146 _data.push_back(myCluster);
147
148 continue;
149 }
150
151 //------------------------------------------------------------------------
152 // Handle cluster candidates containing more than 3 hits
153 //------------------------------------------------------------------------
154 vector<const DFCALHit*>clusterHits=clusterCandidates[i];
155 vector<PeakInfo>peaks;
156
157 // Create weight matrix for hits
158 unsigned int num_hits=clusterHits.size();
159 TMatrixD W(num_hits,num_hits);
160 double Esum=0.;
161 for (unsigned int i=0;i<num_hits;i++){
162 const DFCALHit *hit=clusterHits[i];
163 double E=hit->E;
164 double varE=0.;
165 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
166 varE=m_insert_Eres[0]+m_insert_Eres[1]*E+m_insert_Eres[2]*E*E;
167 }
168 else{
169 varE=m_Eres[0]+m_Eres[1]*E+m_Eres[2]*E*E;
170 }
171 W(i,i)=1./varE;
172 Esum+=E;
173 }
174
175 double chisq=1e6,chisq_old=1e6;
176 unsigned int ndf=1,ndf_old=1;
177 // Find the minimum and maximum row and column numbers
178 int min_row=1000,min_col=1000,max_row=0,max_col=0;
179 for (unsigned int j=0;j<num_hits;j++){
180 if (clusterHits[j]->row<min_row) min_row=clusterHits[j]->row;
181 if (clusterHits[j]->column<min_col) min_col=clusterHits[j]->column;
182 if (clusterHits[j]->row>max_row) max_row=clusterHits[j]->row;
183 if (clusterHits[j]->column>max_col) max_col=clusterHits[j]->column;
184 }
185 // Create arrays to represent the cluster of hits to aid in peak search
186 int num_rows=max_row-min_row+3;
187 int num_cols=max_col-min_col+3;
188 vector<vector<double>>Emap(num_cols,vector<double>(num_rows));
189 vector<vector<unsigned int>>imap(num_cols,vector<unsigned int>(num_rows));
190 for (unsigned int j=0;j<num_hits;j++){
191 int ic=clusterHits[j]->column-min_col+1;
192 int ir=clusterHits[j]->row-min_row+1;
193 Emap[ic][ir]=clusterHits[j]->E;
194 imap[ic][ir]=j;
195 }
196
197 if (min_row<100 && max_row>=100){
198 // Handle the interface between the insert and the lead glass blocks
199 // Find the block with the maximum energy and make a peak object
200 double Emax=0.,Esum=0.;
201 unsigned int jmax=0;
202 for (unsigned int j=0;j<num_hits;j++){
203 double Ej=clusterHits[j]->E;
204 Esum+=Ej;
205 if (Ej>Emax){
206 Emax=Ej;
207 jmax=j;
208 }
209 }
210 if (Emax>MIN_CLUSTER_SEED_ENERGY){
211 PeakInfo myPeak(Esum,clusterHits[jmax]->x,clusterHits[jmax]->y,0,0,
212 num_hits);
213 bool good_fit=FitPeaks(W,clusterHits,peaks,myPeak,chisq,ndf);
214 if (good_fit){
215 peaks.push_back(myPeak);
216 }
217 else{
218 // Use energy-weighted average for position and put myPeak in peak
219 // list
220 myPeak.x=0.;
221 myPeak.y=0.;
222 for (unsigned int j=0;j<num_hits;j++){
223 double Ej=clusterHits[j]->E;
224 myPeak.x+=Ej*clusterHits[j]->x;
225 myPeak.y+=Ej*clusterHits[j]->y;
226 }
227 myPeak.x/=Esum;
228 myPeak.y/=Esum;
229 peaks.push_back(myPeak);
230
231 // Compute chisq estimate just in case we need to make a split
232 chisq=0.;
233 ndf=num_hits-3;
234 for (unsigned int j=0;j<num_hits;j++){
235 double dE=clusterHits[j]->E
236 -Esum*CalcClusterEDeriv(clusterHits[j],myPeak);
237 chisq+=W(j,j)*dE*dE;
238 }
239 }
240 }
241 }
242 else {
243 // Handle clusters fully in insert or in lead glass region
244 // Loop over hits looking for peaks.
245 vector<PeakInfo>peak_candidates;
246 for (int ic=1;ic<num_cols-1;ic++){
247 for (int ir=1;ir<num_rows-1;ir++){
248 double E=Emap[ic][ir];
249 double Esum=E;
250 if (E>MIN_CLUSTER_SEED_ENERGY
251 && num_hits>3*(peak_candidates.size()+1)
252 ){
253 int nhits_in_peak=1;
254
255 bool got_peak=true;
256 int lo_col=ic-1;
257 int hi_col=ic+1;
258 int lo_row=ir-1;
259 int hi_row=ir+1;
260 for (int j=lo_col;j<=hi_col;j++){
261 for (int k=lo_row;k<=hi_row;k++){
262 if (j==ic && k==ir) continue;
263
264 double Ejk=Emap[j][k];
265 if (Ejk<0.001) continue;
266
267 got_peak=(E-Ejk>0)&&got_peak;
268 if (got_peak){
269 // Accumulate energy
270 Esum+=Ejk;
271 nhits_in_peak++;
272 }
273 }
274 }
275 if (got_peak){
276 double x=clusterHits[imap[ic][ir]]->x;
277 double y=clusterHits[imap[ic][ir]]->y;
278 peak_candidates.push_back(PeakInfo(Esum,x,y,ic,ir,nhits_in_peak));
279 }
280 }// cut on minimum energy of central block
281 } // loop over rows
282 } // loop over columns
283
284 // Sort peak candidates by energy
285 if (peak_candidates.size()>0){
286 sort(peak_candidates.begin(),peak_candidates.end(),peak_E_cmp);
287 }
288
289 // Loop over peak candidates to perform the peak fit and add good
290 // candidates to the output list.
291 for (size_t i=0;i<peak_candidates.size();i++){
292 PeakInfo myPeak=peak_candidates[i];
293 if (peaks.size()>0){
294 int ir=myPeak.ir;
295 int ic=myPeak.ic;
296 int lo_col=ic-1;
297 int hi_col=ic+1;
298 int lo_row=ir-1;
299 int hi_row=ir+1;
300
301 // Subtract background due to the set of peaks already in the
302 // list from the current peak
303 for (unsigned int m=0;m<peaks.size();m++){
304 double frac=0.;
305 for (int j=lo_col;j<=hi_col;j++){
306 for (int k=lo_row;k<=hi_row;k++){
307 if (Emap[j][k]>0.){
308 frac+=CalcClusterEDeriv(clusterHits[imap[j][k]],peaks[m]);
309 }
310 }
311 }
312 myPeak.E-=peaks[m].E*frac;
313 }
314 }
315
316 if (myPeak.E>MIN_CLUSTER_SEED_ENERGY){
317 // Save the current peak list
318 vector<PeakInfo>saved_peaks=peaks;
319 PeakInfo peak_guess=myPeak;
320
321 // Fit the data to find the best current guesses for the shower
322 // parameters for each peak within this group of FCAL hits.
323 chisq_old=chisq;
324 ndf_old=ndf;
325 bool good_fit=FitPeaks(W,clusterHits,peaks,myPeak,chisq,ndf);
326 if (good_fit && chisq/ndf<chisq_old/ndf_old){
327 // Check that the difference between this candidate and the peaks
328 // in the list is physical (i.e., could be compatible with a pi0)
329 bool keep_photon_candidate=CheckPeak(peaks,myPeak);
330 if (keep_photon_candidate){
331 peaks.push_back(myPeak);
332 }
333 else {
334 // Difference between this photon and the others in the list is
335 // not physical. Restore the old list.
336 peaks=saved_peaks;
337 chisq=chisq_old;
338 ndf=ndf_old;
339 }
340 }
341 else if (peaks.size()==0){
342 // Always add the first peak, even if the fit failed.
343 // Use the initial guess for the peak info.
344 peaks.push_back(peak_guess);
345
346 // Compute chisq estimate just in case we need to make a split
347 chisq=0.;
348 ndf=num_hits-3;
349 for (unsigned int j=0;j<num_hits;j++){
350 double dE=clusterHits[j]->E
351 -Esum*CalcClusterEDeriv(clusterHits[j],peak_guess);
352 chisq+=W(j,j)*dE*dE;
353 }
354 }
355 else{
356 // No improvement from adding the new peak. Restore the old
357 // list
358 peaks=saved_peaks;
359 chisq=chisq_old;
360 ndf=ndf_old;
361 }
362 } // check threshold
363 } // loop over peak candidates
364
365 if (DEBUG_HISTS&&num_hits>3&&peaks.size()==1){
366 HistProb->Fill(TMath::Prob(chisq,num_hits-3));
367
368 PeakInfo myPeakInfo=peaks[0];
369 for (unsigned int k=0;k<clusterHits.size();k++){
370 double Ecalc=0.,E=clusterHits[k]->E;
371 Ecalc+=myPeakInfo.E*CalcClusterEDeriv(clusterHits[k],myPeakInfo);
372 HistdE->Fill(E,E-Ecalc);
373 }
374 }
375 }
376
377 if (num_hits>3*(peaks.size()+1)){
378 // Subtract the energy due to the fitted peaks from the energy of each
379 // hit to see if we have excess energy that has not been accounted for
380 vector<double>Elist(clusterHits.size());
381 for (unsigned int m=0;m<clusterHits.size();m++){
382 Elist[m]=clusterHits[m]->E;
383 for (unsigned int k=0;k<peaks.size();k++){
384 Elist[m]-=peaks[k].E*CalcClusterEDeriv(clusterHits[m],peaks[k]);
385 }
386 }
387 double Emax=0.;
388 // Find the maximum of the peak-subtracted hit distribution
389 unsigned int mmax=0;
390 for (unsigned int m=0;m<Elist.size();m++){
391 if (Elist[m]>Emax){
392 Emax=Elist[m];
393 mmax=m;
394 }
395 }
396 if (Emax>MIN_CLUSTER_SEED_ENERGY){
397 // Make a peak candidate out of the excess energy in the cluster of hits
398 int ic=clusterHits[mmax]->column-min_col+1;
399 int ir=clusterHits[mmax]->row-min_row+1;
400 int lo_col=ic-1;
401 int hi_col=ic+1;
402 int lo_row=ir-1;
403 int hi_row=ir+1;
404 int num_hits=0;
405 double Esum=0.;
406 for (int j=lo_col;j<=hi_col;j++){
407 for (int k=lo_row;k<=hi_row;k++){
408 int index=imap[j][k];
409 if (Elist[index]>0){
410 Esum+=Elist[index];
411 num_hits++;
412 }
413 }
414 }
415 double x=clusterHits[mmax]->x;
416 double y=clusterHits[mmax]->y;
417 PeakInfo myPeak(Esum,x,y,ic,ir,num_hits);
418
419 // Save the current list of peaks
420 vector<PeakInfo>saved_peaks=peaks;
421
422 // Add the new peak to the fit to see if the fit quality improves
423 chisq_old=chisq;
424 ndf_old=ndf;
425 bool good_fit=FitPeaks(W,clusterHits,peaks,myPeak,chisq,ndf);
426 if (good_fit && chisq/ndf+CHISQ_MARGIN<chisq_old/ndf_old
427 && CheckPeak(peaks,myPeak)
428 ){
429 peaks.push_back(myPeak);
430 }
431 else {
432 // Chisq did not improve. Restore the old list of peaks.
433 peaks=saved_peaks;
434 chisq=chisq_old;
435 ndf=ndf_old;
436 }
437 }
438 // Try to split the peaks further using moments of the hit distribution
439 SplitPeaks(W,clusterHits,peaks,chisq,ndf);
440 }
441
442 // Estimate fraction of "seen" energy for each peak
443 size_t npeaks=peaks.size();
444 vector<double>peak_fractions(npeaks);
445 double Ecalc_sum=0.;
446 for (unsigned int k=0;k<npeaks;k++){
447 Ecalc_sum+=peaks[k].E;
448 }
449 for (unsigned int k=0;k<npeaks;k++){
450 peak_fractions[k]=peaks[k].E/Ecalc_sum;
451 }
452
453 // Add the clusters to the output of the factory
454 for (unsigned int k=0;k<npeaks;k++){
455 // using the measured energy gives slightly better energy resolution
456 double E=peak_fractions[k]*Esum;
457 //cout << "E " << E << " " << peaks[k].E << endl;
458 if (E>SHOWER_ENERGY_THRESHOLD){
459 DFCALCluster *myCluster= new DFCALCluster(0);
460 myCluster->setEnergy(E);
461
462 // Compute energy-weighted time and find channel corresponding to
463 // peak position and add hits to the cluster object
464 unsigned int jmax=0.;
465 double fsum=0.,fmax=0.,t=0.;
466 for (unsigned int j=0;j<clusterHits.size();j++){
467 double f=CalcClusterEDeriv(clusterHits[j],peaks[k]);
468 t+=f*clusterHits[j]->t;
469 fsum+=f;
470 if (f>fmax){
471 fmax=f;
472 jmax=j;
473 }
474 }
475
476 myCluster->setTimeEWeight(t/fsum);
477 int channel=dFCALGeom->channel(clusterHits[jmax]->row,
478 clusterHits[jmax]->column);
479 myCluster->setChannelEmax(channel);
480 myCluster->setCentroid(peaks[k].x,peaks[k].y);
481
482 // Add hits to the cluster object as associated objects
483 for (unsigned int j=0;j<clusterHits.size();j++){
484 if (npeaks==1){
485 myCluster->AddAssociatedObject(clusterHits[j]);
486 }
487 else{
488 // Output hits surrounding peak position
489 double dx=clusterHits[j]->x-clusterHits[jmax]->x;
490 double dy=clusterHits[j]->y-clusterHits[jmax]->y;
491 double d=dFCALGeom->blockSize();
492 if (dFCALGeom->inInsert(channel)){
493 d=dFCALGeom->insertBlockSize();
494 }
495 double dcut=2.5*d;
496 if (fabs(dx)<dcut && fabs(dy)<dcut){
497 myCluster->AddAssociatedObject(clusterHits[j]);
498 }
499 }
500 }
501
502 _data.push_back(myCluster);
503 }
504 }
505 }
506
507 return NOERROR;
508}
509
510//------------------
511// erun
512//------------------
513jerror_t DFCALCluster_factory_Island::erun(void)
514{
515 return NOERROR;
516}
517
518//------------------
519// fini
520//------------------
521jerror_t DFCALCluster_factory_Island::fini(void)
522{
523 return NOERROR;
524}
525
526// Make a list of potential clusters, each consisting of a "central" block
527// and all adjacent blocks, starting with the block with the highest energy.
528// Each successive cluster candidate starts with the block with the next
529// highest energy that has not already been included in another cluster.
530void DFCALCluster_factory_Island::FindClusterCandidates(vector<const DFCALHit*>&fcal_hits,
531 vector<vector<const DFCALHit*>>&clusterCandidates) const {
532 vector<const DFCALHit*>clusterCandidate;
533 clusterCandidate.push_back(fcal_hits[0]);
534
535 vector<bool>used_hits(fcal_hits.size());
536 used_hits[0]=true;
537
538 int hits_left=fcal_hits.size()-1;
539 int begin_i=1;
540 while (hits_left>0){
541 for (size_t i=begin_i;i<fcal_hits.size();i++){
542 if (used_hits[i]==true) continue;
543
544 double dt=clusterCandidate[0]->t-fcal_hits[i]->t;
545 if (fabs(dt)>TIME_CUT) continue;
546
547 int drow=clusterCandidate[0]->row-fcal_hits[i]->row;
548 int dcol=clusterCandidate[0]->column-fcal_hits[i]->column;
549 if (abs(drow)<=1 && abs(dcol)<=1){
550 clusterCandidate.push_back(fcal_hits[i]);
551
552 used_hits[i]=true;
553 hits_left--;
554 }
555 }
556
557 clusterCandidates.push_back(clusterCandidate);
558 clusterCandidate.clear();
559 for (size_t i=begin_i;i<fcal_hits.size();i++){
560 if (used_hits[i]==false){
561 begin_i=i+1;
562 clusterCandidate.push_back(fcal_hits[i]);
563
564 used_hits[i]=true;
565 hits_left--;
566
567 break;
568 }
569 }
570 }
571 if (clusterCandidate.size()>0) clusterCandidates.push_back(clusterCandidate);
572
573 // At this point we do not necessarily have complete clusters.
574 // Merge cluster candidates that appear belong to a single cluster, again
575 // looking for adjacent hits.
576 double borderCut=0.5*dFCALGeom->blockSize()+dFCALGeom->insertBlockSize();
577 vector<vector<const DFCALHit*>>::iterator iter=clusterCandidates.begin();
578 while (iter!=clusterCandidates.end()){
579 bool matched=false;
580 vector<vector<const DFCALHit*>>::iterator iter2=iter+1;
581 for (;iter2!=clusterCandidates.end();iter2++){
582 for (unsigned int i=0;i<(*iter).size();i++){
583 for (unsigned int j=0;j<(*iter2).size();j++){
584 double dt=(*iter)[i]->t-(*iter2)[j]->t;
585 if (fabs(dt)>TIME_CUT) continue;
586
587 int row1=(*iter)[i]->row;
588 int col1=(*iter)[i]->column;
589 int row2=(*iter2)[j]->row;
590 int col2=(*iter2)[j]->column;
591
592 if (abs(row1-row2)<=1 && abs(col1-col2)<=1){
593 matched=true;
594 break;
595 }
596
597 // look for adjacent clusters between the lead glass and the insert,
598 // if present
599 if (dFCALGeom->hitPairHasInsertHit(row1,row2)){
600 double dx=(*iter)[i]->x-(*iter2)[j]->x;
601 double dy=(*iter)[i]->y-(*iter2)[j]->y;
602 if (fabs(dx)<borderCut && fabs(dy)<borderCut){
603 matched=true;
604 break;
605 }
606 }
607 }
608 if (matched){
609 // Add the hits from this cluster to the cluster to which it is
610 // matched and erase it from the list.
611 for (unsigned int j=0;j<(*iter2).size();j++){
612 (*iter).push_back((*iter2)[j]);
613 }
614 clusterCandidates.erase(iter2);
615 break;
616 }
617 }
618 if (matched==true){
619 iter=clusterCandidates.begin();
620 break;
621 }
622 }
623 if (matched==false) iter++;
624 }
625}
626
627// Fit peaks within a cluster containing a list of hits contained in hitList
628bool DFCALCluster_factory_Island::FitPeaks(const TMatrixD &W,
629 vector<const DFCALHit*>&hitList,
630 vector<PeakInfo>&peaks,
631 PeakInfo &myNewPeak,double &chisq,
632 unsigned int &ndf
633 ) const {
634 size_t nhits=hitList.size();
635 size_t npeaks=peaks.size();
636 // Save the current peak info
637 vector<PeakInfo>saved_peaks=peaks;
638 PeakInfo saved_new_peak=myNewPeak;
639
640 // Iterate to find best shower energy and position
641 chisq=1e6;
642 unsigned int max_iter=200;
643 double min_frac=MIN_CUTDOWN_FRACTION;
644 double cutdown_scale=(1.-min_frac)/double(max_iter); //for cut-down for parameter adjustment
645 for (unsigned int k=0;k<max_iter;k++){
646 // Make sure the energy is positive!
647 if (myNewPeak.E<0){
648 return false;
649 }
650 //Check that the new peak positions are still within the fiducial area of
651 // the detector
652 if (dFCALGeom->isFiducial(myNewPeak.x,myNewPeak.y)==false){
653 return false;
654 }
655
656 // Matrix of per-block differences between measured and calculated energies
657 TMatrixD dE(nhits,1);
658 // Matrix containing partical derivatives of the shower profile function
659 // with respect to the three parameters (E, xc, yc): Jacobian matrix
660 TMatrixD A(nhits,3*npeaks+3);
661 // Loop over all the hits to fill dE and A
662 for (unsigned int i=0;i<nhits;i++){
663 double Ecalc=0.,df_dE=0;
664 for (unsigned int j=0;j<npeaks;j++){
665 PeakInfo myPeakInfo=peaks[j];
666
667 // Make sure the energy is positive!
668 if (myPeakInfo.E<0){
669 //_DBG_ <<endl;
670 return false;
671 }
672
673 //Check that the new peak positions are still within the fiducial
674 //area of the detector
675 if (dFCALGeom->isFiducial(myPeakInfo.x,myPeakInfo.y)==false){
676 //_DBG_ << myPeakInfo.x << " " << myPeakInfo.y << endl;
677 return false;
678 }
679
680 // Compute the Jacobian matrix elements
681 df_dE=CalcClusterEDeriv(hitList[i],myPeakInfo);
682 A(i,3*j+0)=df_dE;
683 A(i,3*j+1)=CalcClusterXYDeriv(true,hitList[i],myPeakInfo);
684 A(i,3*j+2)=CalcClusterXYDeriv(false,hitList[i],myPeakInfo);
685
686 Ecalc+=myPeakInfo.E*df_dE;
687 }
688 // Add contributions from the peak we wish to add
689 df_dE=CalcClusterEDeriv(hitList[i],myNewPeak);
690 A(i,3*npeaks+0)=df_dE;
691 A(i,3*npeaks+1)=CalcClusterXYDeriv(true,hitList[i],myNewPeak);
692 A(i,3*npeaks+2)=CalcClusterXYDeriv(false,hitList[i],myNewPeak);
693
694 Ecalc+=myNewPeak.E*df_dE;
695
696 double Ediff=hitList[i]->E-Ecalc;
697 dE(i,0)=Ediff;
698 //cout << " Ediff " << Ediff << endl;
699 }
700 // Compute chi^2 for this iteration
701 double chisq_new=0.;
702 for (unsigned int i=0;i<nhits;i++) chisq_new+=W(i,i)*dE(i,0)*dE(i,0);
703 // cout << endl;
704 // cout << k << " chisq "<< chisq_new << " " << (chisq_new-chisq_old)/chisq_old <<endl;
705 //cout << endl;
706 if (fabs(chisq_new-chisq)/chisq<0.0001) break;
707 if (chisq_new>chisq) break;
708
709 // Save the current value of chisq
710 chisq=chisq_new;
711
712 // Save the current best peak values
713 saved_new_peak=myNewPeak;
714 saved_peaks.assign(peaks.begin(),peaks.end());
715
716 // Determine the set of corrections needed to bring the computed shower
717 // shape closer to the measurements
718 TMatrixD A_T(TMatrixD::kTransposed,A);
719 TMatrixD AT_A=A_T*W*A;
720 TMatrixD InvATA(TMatrixD::kInverted,AT_A);
721 // The function describing the cluster profile is rather nastily non-linear,
722 // so we perform a cut-down on the maximum shift vector.
723 double frac=min_frac+cutdown_scale*double(k);
724 TMatrixD dPar=frac*(InvATA*A_T*W*dE);
725
726 // Update the peak parameters
727 for (unsigned int i=0;i<npeaks;i++){
728 peaks[i].E+=dPar(3*i+0,0);
729 peaks[i].x+=dPar(3*i+1,0);
730 peaks[i].y+=dPar(3*i+2,0);
731 }
732 myNewPeak.E+=dPar(3*npeaks+0,0);
733 myNewPeak.x+=dPar(3*npeaks+1,0);
734 myNewPeak.y+=dPar(3*npeaks+2,0);
735 }
736
737 // Number of degrees of freedom
738 ndf=nhits-3*(peaks.size()+1);
739
740 // Peak info for output of the routine. At this stage myNewPeak, for example,
741 // has been adjusted away from the solution with the best chisq value.
742 myNewPeak=saved_new_peak;
743 peaks.assign(saved_peaks.begin(),saved_peaks.end());
744
745 // Sanity check for peak position: does it correspond to one of the hit
746 // blocks?
747 vector<int>peak_channels;
748 peak_channels.push_back(dFCALGeom->channel(myNewPeak.x,myNewPeak.y));
749 for (unsigned int i=0;i<peaks.size();i++){
750 peak_channels.push_back(dFCALGeom->channel(peaks[i].x,peaks[i].y));
751 }
752 vector<int>hit_channels(nhits);
753 for (unsigned int i=0;i<nhits;i++){
754 hit_channels[i]=dFCALGeom->channel(hitList[i]->row,hitList[i]->column);
755 }
756 for (unsigned int j=0;j<peak_channels.size();j++){
757 int my_peak_channel=peak_channels[j];
758 if (find(hit_channels.begin(),hit_channels.end(),my_peak_channel)==hit_channels.end()){
759 //_DBG_ << endl;
760 return false;
761 }
762 }
763
764 return true;
765}
766
767// Routine to compute the partial derivatives of the shower profile function
768// with respect to the peak position parameters (xpeak,ypeak)
769double DFCALCluster_factory_Island::CalcClusterXYDeriv(bool isXDeriv,
770 const DFCALHit *hit,
771 const PeakInfo &myPeakInfo) const {
772 double sign1[4]={1,1,-1,-1};
773 double sign2[4]={1,-1,-1,1};
774 double half_block=0.5*dFCALGeom->blockSize();
775 double b=SHOWER_WIDTH_PARAMETER; // cm
776 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
777 half_block=0.5*dFCALGeom->insertBlockSize();
778 b=INSERT_SHOWER_WIDTH_PARAMETER;
779 }
780 double b2=b*b;
781 double f=0.;
782 double dxc=hit->x-myPeakInfo.x;
783 double dyc=hit->y-myPeakInfo.y;
784 double Ec=myPeakInfo.E;
785 for (int i=0;i<4;i++){
786 double dx=dxc+sign1[i]*half_block;
787 double dy=dyc+sign2[i]*half_block;
788 double dx2=dx*dx;
789 double dy2=dy*dy;
790 double factor=-sign1[i]*sign2[i]*Ec*b/(2*M_PI3.14159265358979323846)/sqrt(b2*dx2+dy2);
791 if (isXDeriv){
792 f+=factor*dy/(b2+dx2);
793 }
794 else {
795 f+=factor*dx/(b2+dy2);
796 }
797 }
798 return f;
799}
800
801// Shower profile function from Bland, et al., Instruments and Experimental
802// Techniques, 2008, Vol. 51, No. 3, pp. 342-350, eq. 6. Note that the
803// normalization of this function in this equation does not appear to be
804// correct (it's off by 1/sqrt(2pi)).
805double DFCALCluster_factory_Island::CalcClusterEDeriv(const DFCALHit *hit,
806 const PeakInfo &myPeakInfo) const {
807 double sign1[4]={1,1,-1,-1};
808 double sign2[4]={1,-1,-1,1};
809 double half_block=0.5*dFCALGeom->blockSize();
810 double b=SHOWER_WIDTH_PARAMETER; // cm
811 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
812 half_block=0.5*dFCALGeom->insertBlockSize();
813 b=INSERT_SHOWER_WIDTH_PARAMETER;
814 }
815 double f=0.;
816 double dxc=hit->x-myPeakInfo.x;
817 double dyc=hit->y-myPeakInfo.y;
818 for (int i=0;i<4;i++){
819 double dx=dxc+sign1[i]*half_block;
820 double dy=dyc+sign2[i]*half_block;
821 f+=sign1[i]*sign2[i]/(2*M_PI3.14159265358979323846)*atan(dx*dy/(b*sqrt(b*b+dx*dx+dy*dy)));
822 }
823 return f;
824}
825
826// Try to split peaks into two following the algorithm (barely) described in
827// Lednev, IHEP 93-153.
828void DFCALCluster_factory_Island::SplitPeaks(const TMatrixD &W,
829 vector<const DFCALHit*>&hits,
830 vector<PeakInfo>&peaks,
831 double &chisq,unsigned int &ndf) const{
832 unsigned int npeaks=peaks.size(),nhits=hits.size();
833 vector<PeakInfo>saved_peaks=peaks;
834
835 double chisq_old=chisq;
Value stored to 'chisq_old' during its initialization is never read
836 unsigned int ndf_old=ndf;
837 // Find the centroid of the hits for each peak region
838 for (unsigned int i=0;i<npeaks;i++){
839 double E0=0.,x0=0.,y0=0.;
840 vector<double>Elist(nhits);
841 for (unsigned int j=0;j<nhits;j++){
842 double Ecalc=0.;
843 for (unsigned int k=0;k<peaks.size();k++){
844 if (i==k) continue;
845 Ecalc+=peaks[k].E*CalcClusterEDeriv(hits[j],peaks[k]);
846 }
847 Elist[j]=hits[j]->E-Ecalc;
848 E0+=Elist[j];
849 x0+=Elist[j]*hits[j]->x;
850 y0+=Elist[j]*hits[j]->y;
851 }
852 x0/=E0;
853 y0/=E0;
854
855 // Find the second moments about the center of the cluster of hits
856 double xx=0.,yy=0.,yx=0.;
857 for (unsigned int j=0;j<nhits;j++){
858 double dx=hits[j]->x-x0;
859 double dy=hits[j]->y-y0;
860 xx+=Elist[j]*dx*dx;
861 yy+=Elist[j]*dy*dy;
862 yx+=Elist[j]*dx*dy;
863 }
864
865 // Determine the parameter alpha=(E1-E2)/E0 specifying the energy sharing
866 // between the two candidate peaks
867 double dxy=xx-yy;
868 double rsq2 = dxy*dxy + 4.*yx*yx;
869 if( rsq2 < 1.e-20 ) rsq2 = 1.e-20;
870 double rsq = sqrt(rsq2);
871 double dxc = -sqrt((rsq+dxy)*2.);
872 double dyc = sqrt((rsq-dxy)*2.);
873 if( yx >= 0. ) dyc = -dyc;
874 double r = sqrt(dxc*dxc + dyc*dyc);
875 double alpha = 0.;
876 for(unsigned int i=0;i<nhits;i++) {
877 double u=(hits[i]->x-x0)*dxc/r + (hits[i]->y-y0)*dyc/r;
878 alpha-=Elist[i]*u*fabs(u);
879 }
880 alpha/=E0*rsq;
881 // Make sure alpha is not too small or too big
882 if (alpha<-ENERGY_SHARING_CUTOFF) alpha=-ENERGY_SHARING_CUTOFF;
883 if (alpha> ENERGY_SHARING_CUTOFF) alpha= ENERGY_SHARING_CUTOFF;
884
885 // Find the first guess for the energies of the two potential peaks
886 double alpha_plus_factor=0.5*(1.+alpha);
887 double alpha_minus_factor=0.5*(1.-alpha);
888 double E1=E0*alpha_plus_factor;
889 double E2=E0*alpha_minus_factor;
890 if (E1<SHOWER_ENERGY_THRESHOLD || E2<SHOWER_ENERGY_THRESHOLD){
891 // Bail if either of the energies is below the shower threshold
892 continue;
893 }
894
895 // Scale dxc and dyc to find estimates for the peak separation in x and y
896 double scale=1./sqrt(1.-alpha*alpha);
897 dxc*=scale;
898 dyc*=scale;
899
900 // Estimates for peak positions
901 double x1=x0+dxc*alpha_plus_factor;
902 double y1=y0+dyc*alpha_plus_factor;
903 double x2=x0+dxc*alpha_minus_factor;
904 double y2=y0+dyc*alpha_minus_factor;
905
906 PeakInfo myNewPeak(E2,x2,y2,0,0,0);
907 peaks[i].E=E1;
908 peaks[i].x=x1;
909 peaks[i].y=y1;
910
911 // Refit with the split peaks
912 chisq_old=chisq;
913 ndf_old=ndf;
914 bool good_fit=FitPeaks(W,hits,peaks,myNewPeak,chisq,ndf);
915 if (good_fit && chisq/ndf+CHISQ_MARGIN<chisq_old/ndf_old){
916 // Check that the difference between this candidate and the peaks in the
917 // list is physical (i.e., could be compatible with a pi0)
918 bool keep_photon_candidate=CheckPeak(peaks,myNewPeak);
919 if (keep_photon_candidate){
920 peaks.push_back(myNewPeak);
921 }
922 else {
923 // Difference between this photon and the others in the list is not
924 // physical. Restore the old list.
925 peaks=saved_peaks;
926 chisq=chisq_old;
927 ndf=ndf_old;
928 }
929 }
930 else {
931 // No improvement from adding the new peak. Restore the old list.
932 peaks=saved_peaks;
933 chisq=chisq_old;
934 ndf=ndf_old;
935 }
936 }
937}
938
939// Check that the difference between this candidate and the peaks in the list
940//is physical (i.e., could be compatible with a pi0)
941bool DFCALCluster_factory_Island::CheckPeak(const vector<PeakInfo>&peaks,const PeakInfo &myNewPeak) const{
942 DVector3 dir1(myNewPeak.x,myNewPeak.y,m_zdiff);
943 dir1.SetMag(1.);
944 for (unsigned int m=0;m<peaks.size();m++){
945 DVector3 dir2(peaks[m].x,peaks[m].y,m_zdiff);
946 dir2.SetMag(1.);
947 double m2=peaks[m].E*myNewPeak.E*(1.-dir1.Dot(dir2));
948 if (m2<MASS_CUT){
949 return false;
950 }
951 }
952
953 return true;
954}