Bug Summary

File:libraries/FCAL/DFCALCluster_factory_Island.cc
Location:line 700, column 10
Description:Value stored to 'chisq_old' during its initialization is never read

Annotated Source Code

1// $Id$
2//
3// File: DFCALCluster_factory_Island.cc
4// Created: Fri Dec 4 08:25:47 EST 2020
5// Creator: staylor (on Linux ifarm1802.jlab.org 3.10.0-1062.4.1.el7.x86_64 x86_64)
6//
7
8
9#include <iostream>
10#include <iomanip>
11using namespace std;
12
13#include "DFCALCluster_factory_Island.h"
14using namespace jana;
15
16inline bool FCALHit_E_cmp(const DFCALHit *a,const DFCALHit *b){
17 return (a->E>b->E);
18}
19
20
21//------------------
22// init
23//------------------
24jerror_t DFCALCluster_factory_Island::init(void)
25{
26 TIME_CUT=15.;
27 gPARMS->SetDefaultParameter("FCAL:TIME_CUT",TIME_CUT,"time cut for associating FCAL hits together into a cluster");
28
29 MIN_CLUSTER_SEED_ENERGY=35.*k_MeV;
30 gPARMS->SetDefaultParameter("FCAL:MIN_CLUSTER_SEED_ENERGY",
31 MIN_CLUSTER_SEED_ENERGY);
32 SHOWER_ENERGY_THRESHOLD = 50*k_MeV;
33 gPARMS->SetDefaultParameter("FCAL:SHOWER_ENERGY_THRESHOLD", SHOWER_ENERGY_THRESHOLD);
34
35 SHOWER_WIDTH_PARAMETER=0.625;
36 gPARMS->SetDefaultParameter("FCAL:SHOWER_WIDTH_PARAMETER",
37 SHOWER_WIDTH_PARAMETER);
38 INSERT_SHOWER_WIDTH_PARAMETER=0.3;
39 gPARMS->SetDefaultParameter("FCAL:INSERT_SHOWER_WIDTH_PARAMETER",
40 INSERT_SHOWER_WIDTH_PARAMETER);
41 MIN_CUTDOWN_FRACTION=0.25;
42 gPARMS->SetDefaultParameter("FCAL:MIN_CUTDOWN_FRACTION",
43 MIN_CUTDOWN_FRACTION);
44
45 DEBUG_HISTS=false;
46 gPARMS->SetDefaultParameter("FCAL:DEBUG_HISTS",DEBUG_HISTS);
47
48 CHISQ_MARGIN=5.;
49 gPARMS->SetDefaultParameter("FCAL:CHISQ_MARGIN",CHISQ_MARGIN);
50
51 HistdE=new TH2D("HistdE",";E [GeV];#deltaE [GeV]",100,0,10,200,-0.5,0.5);
52 HistProb=new TH1D("HistProb",";CL",100,0,1);
53
54 return NOERROR;
55}
56
57//------------------
58// brun
59//------------------
60jerror_t DFCALCluster_factory_Island::brun(jana::JEventLoop *eventLoop, int32_t runnumber)
61{
62 eventLoop->GetSingle(dFCALGeom);
63
64 insert_Eres[0]=0.0003;
65 insert_Eres[1]=0.00025;
66 insert_Eres[2]=4.4e-5;
67 Eres[0]=0.0005;
68 Eres[1]=0.001225;
69 Eres[2]=9.0e-4;
70
71 posConst1=0.0147;
72 posConst2=6.587e-4;
73 posConst3=0.017265;
74 gPARMS->SetDefaultParameter("FCAL:POS_CORR_P1", posConst1);
75 gPARMS->SetDefaultParameter("FCAL:POS_CORR_P2", posConst2);
76 gPARMS->SetDefaultParameter("FCAL:POS_CORR_P3", posConst3);
77 insertPosConst1=0.08699;
78 insertPosConst2=8.23e-3;
79 insertPosConst3=0.1146;
80 gPARMS->SetDefaultParameter("FCAL:INSERT_POS_CORR_P1", insertPosConst1);
81 gPARMS->SetDefaultParameter("FCAL:INSERT_POS_CORR_P2", insertPosConst2);
82 gPARMS->SetDefaultParameter("FCAL:INSERT_POS_CORR_P3", insertPosConst3);
83
84 return NOERROR;
85}
86
87//------------------
88// evnt
89//------------------
90jerror_t DFCALCluster_factory_Island::evnt(JEventLoop *loop, uint64_t eventnumber)
91{
92 vector<const DFCALHit*>fcal_hits;
93 loop->Get(fcal_hits);
94 if (fcal_hits.size()==0) return OBJECT_NOT_AVAILABLE;
95
96 // Sort the hits according to energy.
97 stable_sort(fcal_hits.begin(),fcal_hits.end(),FCALHit_E_cmp);
98
99 // Associate groups of adjacent hits into cluster candidates
100 vector<vector<const DFCALHit*>>clusterCandidates;
101 FindClusterCandidates(fcal_hits,clusterCandidates);
102
103 for (unsigned int i=0;i<clusterCandidates.size();i++){
104 // Skip over single hit "clusterCandidates"
105 if (clusterCandidates[i].size()==1) continue;
106
107 // Mininum number of hits to make a shower = 2
108 if (clusterCandidates[i].size()<4){
109 // Create a new DFCALCluster object and add it to the _data list
110 DFCALCluster *myCluster= new DFCALCluster(0);
111 vector<const DFCALHit*>clusterCandidate=clusterCandidates[i];
112
113 double Etot=0.,t=0,x=0,y=0;
114 for (unsigned int k=0;k<clusterCandidate.size();k++){
115 double E=clusterCandidate[k]->E;
116 Etot+=E;
117 t+=E*clusterCandidate[k]->t;
118 x+=E*clusterCandidate[k]->x;
119 y+=E*clusterCandidate[k]->y;
120
121 myCluster->AddAssociatedObject(clusterCandidate[k]);
122 }
123 x/=Etot;
124 y/=Etot;
125 t/=Etot;
126
127 myCluster->setEnergy(Etot);
128 myCluster->setTimeEWeight(t);
129
130 int channel=dFCALGeom->channel(clusterCandidate[0]->row,
131 clusterCandidate[0]->column);
132 myCluster->setChannelEmax(channel);
133
134 // apply S-curve correction
135 double d=dFCALGeom->blockSize();
136 if (dFCALGeom->inInsert(channel)){
137 d=dFCALGeom->insertBlockSize();
138 }
139 CorrectPosition(channel,d,x,y);
140 myCluster->setCentroid(x,y);
141
142 _data.push_back(myCluster);
143
144 continue;
145 }
146
147 //------------------------------------------------------------------------
148 // Handle cluster candidates containing more than 3 hits
149 //------------------------------------------------------------------------
150 vector<const DFCALHit*>clusterHits=clusterCandidates[i];
151 vector<PeakInfo>peaks;
152
153 // Create weight matrix for hits
154 unsigned int num_hits=clusterHits.size();
155 TMatrixD W(num_hits,num_hits);
156 double Esum=0.;
157 for (unsigned int i=0;i<num_hits;i++){
158 const DFCALHit *hit=clusterHits[i];
159 double E=hit->E;
160 double varE=0.;
161 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
162 varE=insert_Eres[0]+insert_Eres[1]*E+insert_Eres[2]*E*E;
163 }
164 else{
165 varE=Eres[0]+Eres[1]*E+Eres[2]*E*E;
166 }
167 W(i,i)=1./varE;
168 Esum+=E;
169 }
170
171 int min_row=1000,min_col=1000,max_row=0,max_col=0;
172 for (unsigned int j=0;j<num_hits;j++){
173 if (clusterHits[j]->row<min_row) min_row=clusterHits[j]->row;
174 if (clusterHits[j]->column<min_col) min_col=clusterHits[j]->column;
175 if (clusterHits[j]->row>max_row) max_row=clusterHits[j]->row;
176 if (clusterHits[j]->column>max_col) max_col=clusterHits[j]->column;
177 }
178 // Create arrays to represent the cluster of hits to aid in peak search
179 int num_rows=max_row-min_row+3;
180 int num_cols=max_col-min_col+3;
181 vector<vector<double>>Emap(num_cols,vector<double>(num_rows));
182 vector<vector<unsigned int>>imap(num_cols,vector<unsigned int>(num_rows));
183 for (unsigned int j=0;j<num_hits;j++){
184 int ic=clusterHits[j]->column-min_col+1;
185 int ir=clusterHits[j]->row-min_row+1;
186 Emap[ic][ir]=clusterHits[j]->E;
187 imap[ic][ir]=j;
188 }
189
190 double chisq=1e6,chisq_old=1e6;
191
192 // Loop over hits looking for peaks.
193 for (int ic=1;ic<num_cols-1;ic++){
194 for (int ir=1;ir<num_rows-1;ir++){
195 double E=Emap[ic][ir];
196 if (E>MIN_CLUSTER_SEED_ENERGY){
197 double x=E*clusterHits[imap[ic][ir]]->x;
198 double y=E*clusterHits[imap[ic][ir]]->y;
199 int nhits_in_peak=1;
200
201 bool got_peak=true;
202 int lo_col=ic-1;
203 int hi_col=ic+1;
204 int lo_row=ir-1;
205 int hi_row=ir+1;
206 for (int j=lo_col;j<=hi_col;j++){
207 for (int k=lo_row;k<=hi_row;k++){
208 if (j==ic && k==ir) continue;
209
210 double Ejk=Emap[j][k];
211 if (Ejk<0.001) continue;
212
213 got_peak=(E-Ejk>0)&&got_peak;
214 if (got_peak){
215 // Accumulate energy and energy weighted position variables
216 E+=Ejk;
217 x+=Ejk*clusterHits[imap[j][k]]->x;
218 y+=Ejk*clusterHits[imap[j][k]]->y;
219
220 nhits_in_peak++;
221 }
222 }
223 }
224 if (got_peak){
225 double Ecalc=0.;
226 bool below_threshold=false;
227 if (peaks.size()>0){
228 // Find the expected amount of energy in the block with the most
229 // energy for this peak candidate
230 unsigned int ihit=imap[ic][ir];
231 for (unsigned int m=0;m<peaks.size();m++){
232 Ecalc+=peaks[m].E*CalcClusterEDeriv(clusterHits[ihit],
233 peaks[m]);
234 }
235 // Check that the excess energy is above threshold
236 if (Emap[ic][ir]-Ecalc<MIN_CLUSTER_SEED_ENERGY){
237 below_threshold=true;
238 }
239 }
240 if (below_threshold==false){
241 x/=E;
242 y/=E;
243 PeakInfo myPeak(E,x,y);
244
245 // Subtract background due to the set of peaks already in the
246 // list from the current peak
247 myPeak.E-=Ecalc;
248 for (unsigned int m=0;m<peaks.size();m++){
249 double frac=0.;
250 for (int j=lo_col;j<=hi_col;j++){
251 for (int k=lo_row;k<=hi_row;k++){
252 if (k==ir&&j==ic) continue;
253 if (Emap[j][k]>0.) {
254 frac+=CalcClusterEDeriv(clusterHits[imap[j][k]],
255 peaks[m]);
256 }
257 }
258 }
259 myPeak.E-=peaks[m].E*frac;
260 }
261
262 // Fit the data to find the best current guesses for the shower
263 // parameters for each peak within this group of FCAL hits.
264 if (nhits_in_peak>2){
265 // Save the current peak list
266 vector<PeakInfo>saved_peaks=peaks;
267 PeakInfo peak_guess=myPeak;
268
269 chisq_old=chisq;
270 bool good_fit=FitPeaks(W,clusterHits,peaks,myPeak,chisq);
271 if (good_fit && chisq<chisq_old){
272 peaks.push_back(myPeak);
273 }
274 else if (peaks.size()==0){
275 // Always add the first peak, even if the fit failed.
276 // Use the initial guess for the peak info.
277 peaks.push_back(peak_guess);
278 }
279 else{
280 // No improvement from adding the new peak. Restore the old
281 // list
282 peaks=saved_peaks;
283 chisq=chisq_old;
284 }
285 }
286 else {
287 // Don't try to fit with 2 hit peak candidates -- just add
288 // them to the list.
289 peaks.push_back(myPeak);
290 }
291 } // check threshold
292 } // found a peak?
293 } // cut on minimum energy of central block
294 } // loop over rows
295 } // loop over columns
296
297 if (DEBUG_HISTS&&num_hits>3&&peaks.size()==1){
298 HistProb->Fill(TMath::Prob(chisq,num_hits-3));
299
300 PeakInfo myPeakInfo=peaks[0];
301 for (unsigned int k=0;k<clusterHits.size();k++){
302 double Ecalc=0.,E=clusterHits[k]->E;
303 Ecalc+=myPeakInfo.E*CalcClusterEDeriv(clusterHits[k],myPeakInfo);
304 HistdE->Fill(E,E-Ecalc);
305 }
306 }
307
308 if (num_hits>3){
309 SplitPeaks(W,clusterHits,peaks,chisq);
310 }
311
312 // Estimate fraction of "seen" energy for each peak
313 size_t npeaks=peaks.size();
314 vector<double>peak_fractions(npeaks);
315 double Ecalc_sum=0.;
316 for (unsigned int k=0;k<npeaks;k++){
317 Ecalc_sum+=peaks[k].E;
318 }
319 for (unsigned int k=0;k<npeaks;k++){
320 peak_fractions[k]=peaks[k].E/Ecalc_sum;
321 }
322
323 // Add the clusters to the output of the factory
324 for (unsigned int k=0;k<npeaks;k++){
325 // using the measured energy gives slightly better energy resolution
326 double E=peak_fractions[k]*Esum;
327 if (E>SHOWER_ENERGY_THRESHOLD){
328 DFCALCluster *myCluster= new DFCALCluster(0);
329 myCluster->setEnergy(E);
330
331 // Compute energy-weighted time and find channel corresponding to
332 // peak position and add hits to the cluster object
333 unsigned int jmax=0.;
334 double fsum=0.,fmax=0.,t=0.;
335 for (unsigned int j=0;j<clusterHits.size();j++){
336 double f=CalcClusterEDeriv(clusterHits[j],peaks[k]);
337 t+=f*clusterHits[j]->t;
338 fsum+=f;
339
340 if (f>fmax){
341 fmax=f;
342 jmax=j;
343 }
344 }
345
346 myCluster->setTimeEWeight(t/fsum);
347 int channel=dFCALGeom->channel(clusterHits[jmax]->row,
348 clusterHits[jmax]->column);
349 myCluster->setChannelEmax(channel);
350
351 // apply S-curve correction
352 double d=dFCALGeom->blockSize();
353 if (dFCALGeom->inInsert(channel)){
354 d=dFCALGeom->insertBlockSize();
355 }
356 CorrectPosition(channel,d,peaks[k].x,peaks[k].y);
357 myCluster->setCentroid(peaks[k].x,peaks[k].y);
358
359 // Add hits to the cluster object as associated objects
360 for (unsigned int j=0;j<clusterHits.size();j++){
361 if (npeaks==1){
362 myCluster->AddAssociatedObject(clusterHits[j]);
363 }
364 else{
365 // Output hits surrounding peak position
366 double dx=clusterHits[j]->x-clusterHits[jmax]->x;
367 double dy=clusterHits[j]->y-clusterHits[jmax]->y;
368 double dcut=2.5*d;
369 if (fabs(dx)<dcut && fabs(dy)<dcut){
370 myCluster->AddAssociatedObject(clusterHits[j]);
371 }
372 }
373 }
374
375 _data.push_back(myCluster);
376 }
377 }
378 }
379
380 return NOERROR;
381}
382
383//------------------
384// erun
385//------------------
386jerror_t DFCALCluster_factory_Island::erun(void)
387{
388 return NOERROR;
389}
390
391//------------------
392// fini
393//------------------
394jerror_t DFCALCluster_factory_Island::fini(void)
395{
396 return NOERROR;
397}
398
399// Make a list of potential clusters, each consisting of a "central" block
400// and all adjacent blocks, starting with the block with the highest energy.
401// Each successive cluster candidate starts with the block with the next
402// highest energy that has not already been included in another cluster.
403void DFCALCluster_factory_Island::FindClusterCandidates(vector<const DFCALHit*>&fcal_hits,
404 vector<vector<const DFCALHit*>>&clusterCandidates) const {
405 vector<const DFCALHit*>clusterCandidate;
406 clusterCandidate.push_back(fcal_hits[0]);
407
408 vector<bool>used_hits(fcal_hits.size());
409 used_hits[0]=true;
410
411 int hits_left=fcal_hits.size()-1;
412 int begin_i=1;
413 while (hits_left>0){
414 for (size_t i=begin_i;i<fcal_hits.size();i++){
415 if (used_hits[i]==true) continue;
416
417 double dt=clusterCandidate[0]->t-fcal_hits[i]->t;
418 if (fabs(dt)>TIME_CUT) continue;
419
420 int drow=clusterCandidate[0]->row-fcal_hits[i]->row;
421 int dcol=clusterCandidate[0]->column-fcal_hits[i]->column;
422 if (abs(drow)<=1 && abs(dcol)<=1){
423 clusterCandidate.push_back(fcal_hits[i]);
424
425 used_hits[i]=true;
426 hits_left--;
427 }
428 }
429
430 clusterCandidates.push_back(clusterCandidate);
431 clusterCandidate.clear();
432 for (size_t i=begin_i;i<fcal_hits.size();i++){
433 if (used_hits[i]==false){
434 begin_i=i+1;
435 clusterCandidate.push_back(fcal_hits[i]);
436
437 used_hits[i]=true;
438 hits_left--;
439
440 break;
441 }
442 }
443 }
444 if (clusterCandidate.size()>0) clusterCandidates.push_back(clusterCandidate);
445
446 // At this point we do not necessarily have complete clusters.
447 // Merge cluster candidates that appear belong to a single cluster, again
448 // looking for adjacent hits.
449 double borderCut=0.5*dFCALGeom->blockSize()+dFCALGeom->insertBlockSize();
450 vector<vector<const DFCALHit*>>::iterator iter=clusterCandidates.begin();
451 while (iter!=clusterCandidates.end()){
452 bool matched=false;
453 vector<vector<const DFCALHit*>>::iterator iter2=iter+1;
454 for (;iter2!=clusterCandidates.end();iter2++){
455 for (unsigned int i=0;i<(*iter).size();i++){
456 for (unsigned int j=0;j<(*iter2).size();j++){
457 double dt=(*iter)[i]->t-(*iter2)[j]->t;
458 if (fabs(dt)>TIME_CUT) continue;
459
460 int row1=(*iter)[i]->row;
461 int col1=(*iter)[i]->column;
462 int row2=(*iter2)[j]->row;
463 int col2=(*iter2)[j]->column;
464
465 if (abs(row1-row2)<=1 && abs(col1-col2)<=1){
466 matched=true;
467 break;
468 }
469
470 // look for adjacent clusters between the lead glass and the insert,
471 // if present
472 bool isInsert1=dFCALGeom->isInsertBlock(row1,col1);
473 bool isInsert2=dFCALGeom->isInsertBlock(row2,col2);
474 if ((isInsert1&&!isInsert2) || (isInsert2&&!isInsert1)){
475 double dx=(*iter)[i]->x-(*iter2)[j]->x;
476 double dy=(*iter)[i]->y-(*iter2)[j]->y;
477 if (fabs(dx)<borderCut && fabs(dy)<borderCut){
478 matched=true;
479 break;
480 }
481 }
482 }
483 if (matched){
484 // Add the hits from this cluster to the cluster to which it is
485 // matched and erase it from the list.
486 for (unsigned int j=0;j<(*iter2).size();j++){
487 (*iter).push_back((*iter2)[j]);
488 }
489 clusterCandidates.erase(iter2);
490 break;
491 }
492 }
493 if (matched==true){
494 iter=clusterCandidates.begin();
495 break;
496 }
497 }
498 if (matched==false) iter++;
499 }
500}
501
502// Fit peaks within a cluster containing a list of hits contained in hitList
503bool DFCALCluster_factory_Island::FitPeaks(const TMatrixD &W,
504 vector<const DFCALHit*>&hitList,
505 vector<PeakInfo>&peaks,
506 PeakInfo &myNewPeak,double &chisq
507 ) const {
508 size_t nhits=hitList.size();
509 size_t npeaks=peaks.size();
510 // Save the current info for the new peak
511 PeakInfo saved_peak=myNewPeak;
512
513 // Iterate to find best shower energy and position
514 double chisq_old=1e6,chisq_new=0.;
515 unsigned int max_iter=100;
516 double min_frac=MIN_CUTDOWN_FRACTION;
517 double cutdown_scale=(1.-min_frac)/double(max_iter); //for cut-down for parameter adjustment
518 for (unsigned int k=0;k<max_iter;k++){
519 // Make sure the energy is positive!
520 if (myNewPeak.E<0){
521 return false;
522 }
523 //Check that the new peak positions are still within the fiducial area of
524 // the detector
525 if (dFCALGeom->isFiducial(myNewPeak.x,myNewPeak.y)==false){
526 return false;
527 }
528
529 // Matrix of per-block differences between measured and calculated energies
530 TMatrixD dE(nhits,1);
531 // Matrix containing partical derivatives of the shower profile function
532 // with respect to the three parameters (E, xc, yc): Jacobian matrix
533 TMatrixD A(nhits,3*npeaks+3);
534 // Loop over all the hits to fill dE and A
535 for (unsigned int i=0;i<nhits;i++){
536 double Ecalc=0.,df_dE=0;
537 for (unsigned int j=0;j<npeaks;j++){
538 PeakInfo myPeakInfo=peaks[j];
539
540 // Make sure the energy is positive!
541 if (myPeakInfo.E<0){
542 return false;
543 }
544
545 //Check that the new peak positions are still within the fiducial
546 //area of the detector
547 if (dFCALGeom->isFiducial(myPeakInfo.x,myPeakInfo.y)==false){
548 return false;
549 }
550
551 // Compute the Jacobian matrix elements
552 df_dE=CalcClusterEDeriv(hitList[i],myPeakInfo);
553 A(i,3*j+0)=df_dE;
554 A(i,3*j+1)=CalcClusterXYDeriv(true,hitList[i],myPeakInfo);
555 A(i,3*j+2)=CalcClusterXYDeriv(false,hitList[i],myPeakInfo);
556
557 Ecalc+=myPeakInfo.E*df_dE;
558 }
559 // Add contributions from the peak we wish to add
560 df_dE=CalcClusterEDeriv(hitList[i],myNewPeak);
561 A(i,3*npeaks+0)=df_dE;
562 A(i,3*npeaks+1)=CalcClusterXYDeriv(true,hitList[i],myNewPeak);
563 A(i,3*npeaks+2)=CalcClusterXYDeriv(false,hitList[i],myNewPeak);
564
565 Ecalc+=myNewPeak.E*df_dE;
566
567 double Ediff=hitList[i]->E-Ecalc;
568 dE(i,0)=Ediff;
569 //cout << " Ediff " << Ediff << endl;
570 }
571 // Compute chi^2 for this iteration
572 chisq_new=0.;
573 for (unsigned int i=0;i<nhits;i++) chisq_new+=W(i,i)*dE(i,0)*dE(i,0);
574 // cout << endl;
575 // cout << k << " chisq "<< chisq_new << " " << (chisq_new-chisq_old)/chisq_old <<endl;
576 //cout << endl;
577 if (chisq_new>chisq_old){
578 // Restore the "best" version before the convergence failure
579 chisq_new=chisq_old;
580 myNewPeak=saved_peak;
581 break;
582 }
583 if (fabs(chisq_new-chisq_old)/chisq_old<0.0001) break;
584
585 // Determine the set of corrections needed to bring the computed shower
586 // shape closer to the measurements
587 TMatrixD A_T(TMatrixD::kTransposed,A);
588 TMatrixD AT_A=A_T*W*A;
589 TMatrixD InvATA(TMatrixD::kInverted,AT_A);
590 // The function describing the cluster profile is rather nastily non-linear,
591 // so we perform a cut-down on the maximum shift vector.
592 double frac=min_frac+cutdown_scale*double(k);
593 TMatrixD dPar=frac*(InvATA*A_T*W*dE);
594
595 // Update the peak parameters
596 for (unsigned int i=0;i<npeaks;i++){
597 peaks[i].E+=dPar(3*i+0,0);
598 peaks[i].x+=dPar(3*i+1,0);
599 peaks[i].y+=dPar(3*i+2,0);
600 }
601 saved_peak=myNewPeak;
602
603 myNewPeak.E+=dPar(3*npeaks+0,0);
604 myNewPeak.x+=dPar(3*npeaks+1,0);
605 myNewPeak.y+=dPar(3*npeaks+2,0);
606
607 chisq_old=chisq_new;
608 }
609 chisq=chisq_new;
610
611 // Sanity check for peak position: does it correspond to one of the hit
612 // blocks?
613 vector<int>peak_channels;
614 peak_channels.push_back(dFCALGeom->channel(myNewPeak.x,myNewPeak.y));
615 for (unsigned int i=0;i<peaks.size();i++){
616 peak_channels.push_back(dFCALGeom->channel(peaks[i].x,peaks[i].y));
617 }
618 vector<int>hit_channels(nhits);
619 for (unsigned int i=0;i<nhits;i++){
620 hit_channels[i]=dFCALGeom->channel(hitList[i]->row,hitList[i]->column);
621 }
622 for (unsigned int j=0;j<peak_channels.size();j++){
623 int my_peak_channel=peak_channels[j];
624 if (find(hit_channels.begin(),hit_channels.end(),my_peak_channel)==hit_channels.end()){
625 return false;
626 }
627 }
628
629 return true;
630}
631
632// Routine to compute the partial derivatives of the shower profile function
633// with respect to the peak position parameters (xpeak,ypeak)
634double DFCALCluster_factory_Island::CalcClusterXYDeriv(bool isXDeriv,
635 const DFCALHit *hit,
636 const PeakInfo &myPeakInfo) const {
637 double sign1[4]={1,1,-1,-1};
638 double sign2[4]={1,-1,-1,1};
639 double half_block=0.5*dFCALGeom->blockSize();
640 double b=SHOWER_WIDTH_PARAMETER; // cm
641 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
642 half_block=0.5*dFCALGeom->insertBlockSize();
643 b=INSERT_SHOWER_WIDTH_PARAMETER;
644 }
645 double b2=b*b;
646 double f=0.;
647 double dxc=hit->x-myPeakInfo.x;
648 double dyc=hit->y-myPeakInfo.y;
649 double Ec=myPeakInfo.E;
650 for (int i=0;i<4;i++){
651 double dx=dxc+sign1[i]*half_block;
652 double dy=dyc+sign2[i]*half_block;
653 double dx2=dx*dx;
654 double dy2=dy*dy;
655 double factor=-sign1[i]*sign2[i]*Ec*b/(2*M_PI3.14159265358979323846)/sqrt(b2*dx2+dy2);
656 if (isXDeriv){
657 f+=factor*dy/(b2+dx2);
658 }
659 else {
660 f+=factor*dx/(b2+dy2);
661 }
662 }
663 return f;
664}
665
666// Shower profile function from Bland, et al., Instruments and Experimental
667// Techniques, 2008, Vol. 51, No. 3, pp. 342-350, eq. 6. Note that the
668// normalization of this function in this equation does not appear to be
669// correct (it's off by 1/sqrt(2pi)).
670double DFCALCluster_factory_Island::CalcClusterEDeriv(const DFCALHit *hit,
671 const PeakInfo &myPeakInfo) const {
672 double sign1[4]={1,1,-1,-1};
673 double sign2[4]={1,-1,-1,1};
674 double half_block=0.5*dFCALGeom->blockSize();
675 double b=SHOWER_WIDTH_PARAMETER; // cm
676 if (dFCALGeom->isInsertBlock(hit->row,hit->column)){
677 half_block=0.5*dFCALGeom->insertBlockSize();
678 b=INSERT_SHOWER_WIDTH_PARAMETER;
679 }
680 double f=0.;
681 double dxc=hit->x-myPeakInfo.x;
682 double dyc=hit->y-myPeakInfo.y;
683 for (int i=0;i<4;i++){
684 double dx=dxc+sign1[i]*half_block;
685 double dy=dyc+sign2[i]*half_block;
686 f+=sign1[i]*sign2[i]/(2*M_PI3.14159265358979323846)*atan(dx*dy/(b*sqrt(b*b+dx*dx+dy*dy)));
687 }
688 return f;
689}
690
691// Try to split peaks into two following the algorithm (barely) described in
692// Lednev, IHEP 93-153.
693void DFCALCluster_factory_Island::SplitPeaks(const TMatrixD &W,
694 vector<const DFCALHit*>&hits,
695 vector<PeakInfo>&peaks,
696 double &chisq) const{
697 unsigned int npeaks=peaks.size(),nhits=hits.size();
698 vector<PeakInfo>saved_peaks=peaks;
699
700 double chisq_old=chisq;
Value stored to 'chisq_old' during its initialization is never read
701 // Find the centroid of the hits for each peak region
702 for (unsigned int i=0;i<npeaks;i++){
703 double E0=0.,x0=0.,y0=0.;
704 vector<double>Elist(nhits);
705 for (unsigned int j=0;j<nhits;j++){
706 double Ecalc=0.;
707 for (unsigned int k=0;k<peaks.size();k++){
708 if (i==k) continue;
709 Ecalc+=peaks[k].E*CalcClusterEDeriv(hits[j],peaks[k]);
710 }
711 Elist[j]=hits[j]->E-Ecalc;
712 E0+=Elist[j];
713 x0+=Elist[j]*hits[j]->x;
714 y0+=Elist[j]*hits[j]->y;
715 }
716 x0/=E0;
717 y0/=E0;
718
719 // Find the second moments about the center of the cluster of hits
720 double xx=0.,yy=0.,yx=0.;
721 for (unsigned int j=0;j<nhits;j++){
722 double dx=hits[j]->x-x0;
723 double dy=hits[j]->y-y0;
724 xx+=Elist[j]*dx*dx;
725 yy+=Elist[j]*dy*dy;
726 yx+=Elist[j]*dx*dy;
727 }
728
729 // Determine the parameter alpha=(E1-E2)/E0 specifying the energy sharing
730 // between the two candidate peaks
731 double dxy=xx-yy;
732 double rsq2 = dxy*dxy + 4.*yx*yx;
733 if( rsq2 < 1.e-20 ) rsq2 = 1.e-20;
734 double rsq = sqrt(rsq2);
735 double dxc = -sqrt((rsq+dxy)*2.);
736 double dyc = sqrt((rsq-dxy)*2.);
737 if( yx >= 0. ) dyc = -dyc;
738 double r = sqrt(dxc*dxc + dyc*dyc);
739 double alpha = 0.;
740 for(unsigned int i=0;i<nhits;i++) {
741 double u=(hits[i]->x-x0)*dxc/r + (hits[i]->y-y0)*dyc/r;
742 alpha-=Elist[i]*u*fabs(u);
743 }
744 alpha/=E0*rsq;
745 // Make sure alpha is not too small or too big
746 if (alpha<-0.8) alpha=-0.8;
747 if (alpha> 0.8) alpha= 0.8;
748
749 // Find the first guess for the energies of the two potential peaks
750 double alpha_plus_factor=0.5*(1.+alpha);
751 double alpha_minus_factor=0.5*(1.-alpha);
752 double E1=E0*alpha_plus_factor;
753 double E2=E0*alpha_minus_factor;
754 if (E1<SHOWER_ENERGY_THRESHOLD || E2<SHOWER_ENERGY_THRESHOLD){
755 // Bail if either of the energies is below the shower threshold
756 continue;
757 }
758
759 // Scale dxc and dyc to find estimates for the peak separation in x and y
760 double scale=1./sqrt(1.-alpha*alpha);
761 dxc*=scale;
762 dyc*=scale;
763
764 // Estimates for peak positions
765 double x1=x0+dxc*alpha_plus_factor;
766 double y1=y0+dyc*alpha_plus_factor;
767 double x2=x0+dxc*alpha_minus_factor;
768 double y2=y0+dyc*alpha_minus_factor;
769
770 PeakInfo myNewPeak(E2,x2,y2);
771 peaks[i].E=E1;
772 peaks[i].x=x1;
773 peaks[i].y=y1;
774
775 // Refit with the split peaks
776 chisq_old=chisq;
777 bool good_fit=FitPeaks(W,hits,peaks,myNewPeak,chisq);
778 if (good_fit && chisq+CHISQ_MARGIN<chisq_old){
779 peaks.push_back(myNewPeak);
780 }
781 else {
782 // No improvement from adding the new peak. Restore the old list
783 peaks=saved_peaks;
784 chisq=chisq_old;
785 }
786 }
787}
788
789// Apply "S-curve" correction to the peak position
790void DFCALCluster_factory_Island::CorrectPosition(int channel,double d,
791 double &x,double &y) const{
792 DVector2 xyblock=dFCALGeom->positionOnFace(channel);
793 double d2=d*d;
794 double dx=x-xyblock.X();
795 double dx2=dx*dx;
796 double dy=y-xyblock.Y();
797 double dy2=dy*dy;
798 if (dFCALGeom->inInsert(channel)){
799 x-=dx*(dx2-d2/4.)*(insertPosConst1+insertPosConst2*dx+insertPosConst3*dx2);
800 y-=dy*(dy2-d2/4.)*(insertPosConst1+insertPosConst2*dy+insertPosConst3*dy2);
801 }
802 else{
803 x-=dx*(dx2-d2/4.)*(posConst1+posConst2*dx+posConst3*dx2);
804 y-=dy*(dy2-d2/4.)*(posConst1+posConst2*dy+posConst3*dy2);
805 }
806}