Commit c14cdb6c authored by dino's avatar dino
Browse files

Calculation of the energy partition between the hits in PSAFilterGridSearch...

Calculation of the energy partition between the hits in PSAFilterGridSearch when using AdaptiveTwoHits is
now done analytically, no more with a loop on possible values.
Some adjustments in TrackingFilter::SetOutput so that the adf tracked frame structure contains position and energy in the gamma-related part.

git-svn-id: svn://gal-serv.lnl.infn.it/agata/trunk/narval_emulator@1077 170316e4-aea8-4b27-aad4-0380ec0519c9
parent 395dca73
......@@ -101,7 +101,7 @@ DISPATCHER EventBuilder pseudo actor to connect the output of the ch
#endif
// Replacement of Xavier's variable to ask producers to return from process_block even
// if the buffer is not yet full. This is just to avoid complains from the compiler, not
// if the buffer is not yet full. This is just to avoid complaints from the compiler, not
// an implementation of the time-out mechanism (which is not very meaningful in off-line).
unsigned int library_timeout = 0;
......
......@@ -459,13 +459,14 @@ Int_t PSAFilter::SetOutput(int slot)
CoreT0 = pSlot->CoreT[0];
CoreT1 = pSlot->CoreT[1];
//#define ADDPSATIME
#define ADDPSATIME
#ifdef ADDPSATIME
// Add the T0 correction as determined by the PSA. --> The overall Ge-Ge timing becomes ~10% worse, with larger tails.
// If the T0 shift is subtracted the overall ge-ge timing becomes ~2 worse (but very symmetric, meaning added noise).
// The above seems not to be true if the number of samples used to determine Tzero in PsaFilterGridSearch::FitT0AfterPSA
// is smal: e.g. at nsamps=10 we now get an improvement by 10%. To be tested with a wider dinamycal range.
// Therefore, it is better not to correct. MOreover, one should also question the T0 realignment done in the PSA!
// Add the T0 correction as determined by the PSA.
// Using a large number of samples (nsamples>20) in PsaFilterGridSearch::FitT0AfterPSA to determine Tzero,
// Tgamma-gamma becomes ~10% worse than using only the timing done in PreprocessingFilterPSA.
// With (nsamples=10) Tgamma-gamma improves by ~10% but the peaks start getting tails.
// A reasonable compromise seems to be (nsamples=15), but the Tgamma-gamma improvement is rather modest.
// This matter is not completely clarified and one can decide to ignore the correction by commenting the #define ADDPSATIME.
CoreT0 += pSlot->shiftT0;
CoreT1 += pSlot->shiftT0;
#endif
......
......@@ -259,7 +259,7 @@ Int_t PSAFilterGridSearch::AlgoSpecificInitialise()
#if 0 && defined(PSA_MULTIHIST)
{
// Write the complete basis of a segment a <float> spectrum. Normalization is 1000.
// Write the complete basis of a segment as a <float> spectrum. Normalization is 1000.
// write only NSEGS+CC as they are going to be used (i.e. with differentiated netChargeSeg and CC)
// To get the non-differentiated version uncomment //#define USE_NETCHARGES in FridSearchParameters.h
// To get the whole internal basis replace NCHAN with TCHAN in the definition of numTraces
......@@ -340,7 +340,7 @@ Int_t PSAFilterGridSearch::AlgoSpecificInitialise()
//WriteBaseAverPt(1000.f/MAXNORM, "smoo");
// prepare the metric vectors for the chi2 loops
// prepare the metric vectors for the chi2 loops (even if not used)
if(gDistMetric > 0) {
//// abs(d)^gDistMetric
float largest = 0;
......@@ -405,6 +405,7 @@ Int_t PSAFilterGridSearch::AlgoSpecificInitialise()
#endif
fDistRescale = pow(RESCALE, gDistMetric);
#if 1 && defined(PSA_MULTIHIST)/* && !defined(USE_SSE_VERSION)*/
#if STANDARD_METRIC != STANDARD_METRIC_SQUARE // don't write if ^2
{
// The precalculated metric
MultiHist<float> *vMetric = new MultiHist<float>(NMETRIC);
......@@ -416,6 +417,7 @@ Int_t PSAFilterGridSearch::AlgoSpecificInitialise()
vMetric->write();
delete vMetric;
}
#endif // STANDARD_METRIC != STANDARD_METRIC_SQUARE
#endif
} // else { // if(gPsaSegCenter) {
......@@ -780,8 +782,15 @@ Int_t PSAFilterGridSearch::Process(int slot, int count)
}
pSlot->retValue = 0; // could be used as error message
if(extraLoop)
if(extraLoop) {
//// Perform a final time adjustment ??
//// Better not as it produces a slight worsening of Tgg. Not yet verified for AdaptiveTwoHits
//if(gFitTZero) {
// float DT = FitT0AfterPSA(PF);
// pSlot->shiftT0 += DT;
//}
break;
}
// 4444444444444444444444444444 adjust T0
......@@ -1274,12 +1283,13 @@ float PSAFilterGridSearch::CalcChi2GridPts(int segNum, int ptNum, pointFull &PF)
pointPsa *pPtSeg = fBasis.Pts[segNum]; // the points of this segment
SignalBasis::cfgrid &sgrid = fBasis.cflist[segNum]; // the coarse-fine structure of this segment
float baseScale = PF.baseScale*RESCALE;
float chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace = pPtSeg[ptNum].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace, PF.baseScale*RESCALE);
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale);
}
} // end loop over the segments
return chi2;
......@@ -1709,15 +1719,20 @@ int PSAFilterGridSearch::SearchFullGrid(pointFull &PF, int netChSeg)
float chi2 = 0;
int chiDone = 0;
// the base points of this segment
pointPsa *pPtSeg = fBasis.Pts[netChSeg];
int nPts = fBasis.numPts[netChSeg];
for(int iPts = 0; iPts < nPts; iPts++) {
chiDone++;
float baseScale = PF.baseScale*RESCALE;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace = fBasis.Pts[netChSeg][iPts].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace, PF.baseScale*RESCALE);
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace = pPtSeg[iPts].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale);
#ifndef MATRCHI2 // don't interrupt the loop over the segments while producing MatChi2
if(chi2 > chi2min)
break;
......@@ -1725,7 +1740,7 @@ int PSAFilterGridSearch::SearchFullGrid(pointFull &PF, int netChSeg)
}
} // end loop over the segments
#ifdef MATRCHI2
MatChi2(chi2, &fBasis.Pts[netChSeg][iPts]);
MatChi2(chi2, &pPtSeg[iPts]);
#endif
if(chi2 < chi2min) {
bestPt = iPts;
......@@ -1738,42 +1753,100 @@ int PSAFilterGridSearch::SearchFullGrid(pointFull &PF, int netChSeg)
PF.bestFac = 0;
PF.chi2min = chi2min;
// this part has not been checked after replacement of the loop on the energy partition with the optimum partition
if(twoHits) {
// Lower limit on the energy of the partial hits
// (this is also a 2*ener2Min threshold on the total energy of the segment)
const float ener2Min = 50.f;
if(PF.enerSeg < 2*ener2Min) {
return 0; // --> don't proceed with the fine search
}
// Constraints on the distance between the two points
const float f2Min = 7.f; // mm
const float fine2Min = f2Min*f2Min;
int bestPt1 = -1;
int bestPt2 = -1;
float bestFac = 0;
for(int iPts1 = 0; iPts1 < nPts; iPts1++) {
for(int iPts2 = 0; iPts2 < nPts; iPts2++) {
for(float ePart = 0.5f; ePart <= 0.9f; ePart += 0.1f) { // first point always larger than second
float bScale1 = PF.baseScale*ePart;
float bScale2 = PF.baseScale*(1.f - ePart);
chiDone++;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = fBasis.Pts[netChSeg][iPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = fBasis.Pts[netChSeg][iPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
if(chi2 > chi2min)
break;
// avoid points too close to each other
float dist2 = pPtSeg[iPts2].distance2(&pPtSeg[iPts1]);
if(dist2 < fine2Min)
continue;
// find optimum energy partition for the two points
// Sum{(baseTrace1-baseTrace2)(realTrace-baseTrace2)}/sum{(baseTrace1-baseTrace2)*(baseTrace1-baseTrace2)}
float Sum1212 = 0, Sum12y2 = 0, Sumy2y2 = 0;
float baseScale = PF.baseScale*RESCALE;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[iPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[iPts2].amplitude[iSegm] + time_first;
for(int ss = 0; ss < LOOP_SAMP; ss++) {
float d12 = baseTrace1[ss] - baseTrace2[ss]; // just the difference
float dy2 = realTrace[ss] - baseTrace2[ss]*baseScale; // base scaled to data
Sum1212 += d12*d12;
Sum12y2 += d12*dy2;
#if STANDARD_METRIC == STANDARD_METRIC_SQUARE
Sumy2y2 += dy2*dy2;
#endif
}
} // end loop over the segments
if(chi2 < chi2min) {
bestPt1 = iPts1;
bestPt2 = iPts2;
bestFac = ePart;
chi2min = chi2;
}
} // end loop over the energy partition
} // end loop over the active segments
if(Sum12y2 < 0)
continue;
Sum12y2 *= baseScale;
Sum1212 *= baseScale*baseScale;
if(Sum1212 < Sum12y2)
continue;
float ePartOpt = Sum12y2/Sum1212;
// skip solutions where one of the two points has to small fractional energy
if(ePartOpt < 0.05f || ePartOpt > 0.95f)
continue;
// skip solutions with too small energy
if(PF.enerSeg*ePartOpt < ener2Min || PF.enerSeg*(1.f-ePartOpt) < ener2Min) // keV
continue;
#if STANDARD_METRIC == STANDARD_METRIC_SQUARE
// if the metric is 2, chiSquare can be calculated as
float chi2M2 = Sumy2y2 - ePartOpt*Sum12y2;
chi2 = chi2M2;
#else
float bScale1 = PF.baseScale*RESCALE*ePartOpt;
float bScale2 = PF.baseScale*RESCALE*(1.f - ePartOpt);
chiDone++;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[iPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[iPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1, baseTrace2, bScale2);
if(chi2 > chi2min)
break;
}
} // end loop over the segments
#endif
if(chi2 < chi2min) {
bestPt1 = iPts1;
bestPt2 = iPts2;
bestFac = ePartOpt;
chi2min = chi2;
}
} // end loop over the base points iPts2
} // end loop over the base points iPts1
if(bestPt1>=0 && bestPt2>= 0) {
PF.bestPt1 = bestPt1;
PF.bestPt2 = bestPt2;
PF.bestFac = bestFac;
PF.chi2min = chi2min;
if(PF.bestFac < 0.5f) {
// enforce energy ordering
PF.bestFac = 1.f - PF.bestFac;
int tmp1 = PF.bestPt1; PF.bestPt1 = PF.bestPt2; PF.bestPt2 = tmp1;
}
}
}
......@@ -1786,10 +1859,10 @@ int PSAFilterGridSearch::SearchFullGrid(pointFull &PF, int netChSeg)
int PSAFilterGridSearch::SearchAdaptive1(pointFull &PF, int netChSeg, bool bCoarseOnly, bool bFirstOnly)
{
int time_first = TIME0; // start of basis
int samp_first = PF.samp_first; // start of signal
float baseScale = PF.baseScale; // scaling signals to data
char *lMask = PF.localMask; // selection of segments
int time_first = TIME0; // start of basis
int samp_first = PF.samp_first; // start of signal
float baseScale = PF.baseScale*RESCALE; // scaling signals to data, including expansion factor for mapped metric
char *lMask = PF.localMask; // selection of segments
// the basis points of this segment
pointPsa *pPtSeg = fBasis.Pts[netChSeg];
......@@ -1820,7 +1893,7 @@ int PSAFilterGridSearch::SearchAdaptive1(pointFull &PF, int netChSeg, bool bCoar
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace = pPtSeg[kPts].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale*RESCALE);
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale);
if(chi2 > chi2min)
break;
}
......@@ -1866,7 +1939,7 @@ int PSAFilterGridSearch::SearchAdaptive1(pointFull &PF, int netChSeg, bool bCoar
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace = pPtSeg[jPts].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale*RESCALE);
chi2 += Chi2InnerLoop(realTrace, baseTrace, baseScale);
if(chi2 > chi2min)
break;
}
......@@ -1887,7 +1960,7 @@ int PSAFilterGridSearch::SearchAdaptive1(pointFull &PF, int netChSeg, bool bCoar
return -chiDone;
}
// Coarse search performed looping over all ORDERED pairs of points, with the the two hits scanning all energy combinations.
// Coarse search performed looping over all ORDERED pairs of points, with the the two hits sharing the energy in the optimum way.
// As the bases are loaded less times the memory catching mechanism is more efficient, resulting in ~10% faster execution
int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoarseOnly, bool bFirstOnly)
{
......@@ -1897,7 +1970,6 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
if(PF.enerSeg < 2*ener2Min) {
return 0; // --> don't proceed with the fine search
}
// Constraints on the distance between the two points
const float d2Min = 13.f; // mm
const float f2Min = 7.f; // mm
......@@ -1937,12 +2009,6 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
// This is the most CPU consuming block of the 2-hits search and it is important to find ways to prune the loops
//#define SEARCHADAPTIVE2_MORECHECKS
#ifdef SEARCHADAPTIVE2_MORECHECKS
float aa = 0;
#endif
int bestPt1 = -1;
int bestPt2 = -1;
float bestFac = 0;
......@@ -1950,48 +2016,72 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
int kPts1 = sgrid.indFine[sgrid.posCoarse[kloop1]];
for(int kloop2 = kloop1+1; kloop2 < kCoarseN; kloop2++) { // loop on the 2nd hit
int kPts2 = sgrid.indFine[sgrid.posCoarse[kloop2]];
// avoid points too near to each other
// avoid points too close to each other
float dist2 = pPtSeg[kPts2].distance2(&pPtSeg[kPts1]);
if(dist2 < dist2Min)
continue;
#ifdef SEARCHADAPTIVE2_MORECHECKS
// energy partition that minimizes the distance of the baricentre to the 1-hit solution (check for a possible 1 <--> 2 exchange)
float aab = ( (pPtSeg[h1_bestPt].x-pPtSeg[kPts1].x)*(pPtSeg[kPts2].x-pPtSeg[kPts1].x)+
(pPtSeg[h1_bestPt].y-pPtSeg[kPts1].y)*(pPtSeg[kPts2].y-pPtSeg[kPts1].y)+
(pPtSeg[h1_bestPt].z-pPtSeg[kPts1].z)*(pPtSeg[kPts2].z-pPtSeg[kPts1].z) ) / dist2;
// find optimum energy partition for the two points (However: is this valid if the metric is not 2 ??)
// Sum{(baseTrace1-baseTrace2)(realTrace-baseTrace2)}/sum{(baseTrace1-baseTrace2)*(baseTrace1-baseTrace2)}
float Sum1212 = 0, Sum12y2 = 0, Sumy2y2 = 0;
float baseScale = PF.baseScale*RESCALE;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
for(int ss = 0; ss < LOOP_SAMP; ss++) {
float d12 = baseTrace1[ss] - baseTrace2[ss]; // just the difference
float dy2 = realTrace[ss] - baseTrace2[ss]*baseScale; // base scaled to data
Sum1212 += d12*d12;
Sum12y2 += d12*dy2;
#if STANDARD_METRIC == STANDARD_METRIC_SQUARE
Sumy2y2 += dy2*dy2;
#endif
// loop on the energy partition
for(float ePart = 0.1f; ePart < 0.91f; ePart += 0.1f) {
// skip solutions with too small energy
if(PF.enerSeg*ePart < ener2Min || PF.enerSeg*(1.f-ePart) < ener2Min) // keV
continue;
float bScale1 = PF.baseScale*ePart;
float bScale2 = PF.baseScale*(1.f - ePart);
chiDone++;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
if(chi2 > chi2min)
break;
}
} // end loop over the segments
if(chi2 < chi2min) {
#ifdef SEARCHADAPTIVE2_MORECHECKS
aa = aab; // recorded for inspection at the end of the function
#endif
bestPt1 = kPts1;
bestPt2 = kPts2;
bestFac = ePart;
chi2min = chi2;
kCoarse1 = kloop1;
kCoarse2 = kloop2;
}
} // end loop over the energy partition
} // end loop over the active segments
if(Sum12y2 < 0)
continue;
Sum12y2 *= baseScale;
Sum1212 *= baseScale*baseScale;
if(Sum1212 < Sum12y2)
continue;
float ePartOpt = Sum12y2/Sum1212;
// skip solutions where one of the two points has to small fractional energy
if(ePartOpt < 0.05f || ePartOpt > 0.95f)
continue;
// skip solutions with too small energy
if(PF.enerSeg*ePartOpt < ener2Min || PF.enerSeg*(1.f-ePartOpt) < ener2Min) // keV
continue;
#if STANDARD_METRIC == STANDARD_METRIC_SQUARE
// if the metric is 2, chiSquare can be calculated as
float chi2M2 = Sumy2y2 - ePartOpt*Sum12y2;
chi2 = chi2M2;
#else
float bScale1 = PF.baseScale*RESCALE*ePartOpt;
float bScale2 = PF.baseScale*RESCALE*(1.f - ePartOpt);
chiDone++;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1, baseTrace2, bScale2);
if(chi2 > chi2min)
break;
}
} // end loop over the active segments
#endif
if(chi2 < chi2min) {
bestPt1 = kPts1;
bestPt2 = kPts2;
bestFac = ePartOpt;
chi2min = chi2;
kCoarse1 = kloop1;
kCoarse2 = kloop2;
}
} // end loop over the base points iPts2
} // end loop over the base points iPts1
......@@ -2005,8 +2095,9 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
PF.bestPt1 = bestPt1;
PF.bestPt2 = bestPt2;
PF.chi2min = chi2min;
if(bestFac < 0.5f) {
// ensure the energy ordering
if(PF.bestFac < 0.5f) {
// enforce energy ordering
PF.bestFac = 1.f - PF.bestFac;
int tmp1 = PF.bestPt1; PF.bestPt1 = PF.bestPt2; PF.bestPt2 = tmp1;
int tmp2 = kCoarse1; kCoarse1 = kCoarse2; kCoarse2 = tmp2;
......@@ -2019,11 +2110,10 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
// refinements //
//////////////////
// 1) optimize the energy partition in the two coarse-grid points
// 2) scan the fine grid around its coarse solution for the heavy hit
// 3) scan the fine grid around its coarse solution for the light hit
// 2) scan the fine grid around the coarse solution for the heavy hit
// 3) scan the fine grid around the coarse solution for the light hit
// 4) Reoptimize the energy partition if 2) or 3) moved the hits
//
// This produces irregularities in the distribuition of points when the position of
// the first (or second) is not modified by the two independent searches.
......@@ -2031,52 +2121,11 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
int mDone = 0;
// 1) optimize the energy partition with smaller steps
chi2min = PF.chi2min;
int kPts1 = PF.bestPt1;
int kPts2 = PF.bestPt2;
// we should start from the previous partition and zig-zag around it
for(float ePart = PF.bestFac-0.1f; ePart < PF.bestFac+0.1f; ePart += 0.02f) {
// skip solutions with too small energy in the points;
if(PF.enerSeg*ePart < ener2Min || PF.enerSeg*(1.f-ePart) < ener2Min) // keV
continue;
float bScale1 = PF.baseScale*ePart;
float bScale2 = PF.baseScale*(1.f - ePart);
chiDone++;
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
if(chi2 > chi2min)
break;
}
} // end loop over the segments
if(chi2 < chi2min) {
bestFac = ePart;
chi2min = chi2;
}
} // end loop over the energy partition
if(chi2min < PF.chi2min) {
PF.chi2min = chi2min;
PF.bestFac = bestFac;
if(bestFac < 0.5f) {
// ensure the energy ordering
PF.bestFac = 1.f - PF.bestFac;
int tmp1 = PF.bestPt1; PF.bestPt1 = PF.bestPt2; PF.bestPt2 = tmp1;
int tmp2 = kCoarse1; kCoarse1 = kCoarse2; kCoarse2 = tmp2;
}
mDone |= 1;
}
// 2) optimize the fine-position of the heavy point while the light point is fixed
float coarseDist2 = pPtSeg[PF.bestPt2].distance2(&pPtSeg[PF.bestPt1]);
bool isClose = (coarseDist2 < near2Min);
float bScale1 = PF.baseScale*PF.bestFac;
float bScale2 = PF.baseScale*(1.f - PF.bestFac);
float bScale1 = PF.baseScale*RESCALE*PF.bestFac;
float bScale2 = PF.baseScale*RESCALE*(1.f - PF.bestFac);
chi2min = PF.chi2min;
int bestPt = -1;
int fxPt = PF.bestPt2;
......@@ -2098,7 +2147,7 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[jPts].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[fxPt].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1, baseTrace2, bScale2);
if(chi2 > chi2min)
break;
}
......@@ -2116,8 +2165,8 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
}
// 3) optimize the light point while the heavy point is fixed
bScale1 = PF.baseScale*PF.bestFac;
bScale2 = PF.baseScale*(1.f - PF.bestFac);
bScale1 = PF.baseScale*RESCALE*PF.bestFac;
bScale2 = PF.baseScale*RESCALE*(1.f - PF.bestFac);
chi2min = PF.chi2min;
bestPt = -1;
fxPt = PF.bestPt1;
......@@ -2139,7 +2188,7 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[fxPt].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[jPts].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1, baseTrace2, bScale2);
if(chi2 > chi2min)
break;
}
......@@ -2174,50 +2223,83 @@ int PSAFilterGridSearch::SearchAdaptive2(pointFull &PF, int netChSeg, bool bCoar
return -chiDone;
}
// 4) if there have been changes in position, ri-optimize the energy
// 4) ri-optimize energy partition if positions have changed
if(mDone >= 2) {
chi2min = PF.chi2min;
int kPts1 = PF.bestPt1;
int kPts2 = PF.bestPt2;
// we should start from the previous partition and zig-zag around it
for(float ePart = PF.bestFac-0.1f; ePart < PF.bestFac+0.1f; ePart += 0.02f) {
// skip solutions with too small energy in the points;
if(PF.enerSeg*ePart < ener2Min || PF.enerSeg*(1.f-ePart) < ener2Min) // keV
continue;
float bScale1 = PF.baseScale*ePart;
float bScale2 = PF.baseScale*(1.f - ePart);
chi2 = 0;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
nInner++;
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
chi2 += Chi2InnerLoop(realTrace, baseTrace1, bScale1*RESCALE, baseTrace2, bScale2*RESCALE);
if(chi2 > chi2min)
break;
// find optimum energy partition for the two points
// Sum{(baseTrace1-baseTrace2)(realTrace-baseTrace2)}/sum{(baseTrace1-baseTrace2)*(baseTrace1-baseTrace2)}
float Sum1212 = 0, Sum12y2 = 0, Sumy2y2 = 0;
float baseScale = PF.baseScale*RESCALE;
for(int iSegm=0; iSegm<NCHAN; iSegm++) {
if(lMask[iSegm] != '0') {
float * realTrace = PF.sAmplitude[iSegm] + samp_first;
float * baseTrace1 = pPtSeg[kPts1].amplitude[iSegm] + time_first;
float * baseTrace2 = pPtSeg[kPts2].amplitude[iSegm] + time_first;
for(int ss = 0; ss < LOOP_SAMP; ss++) {
float d12 = baseTrace1[ss] - baseTrace2[ss]; // just the difference
float dy2 = realTrace[ss] - baseTrace2[ss]*baseScale; // base scaled to data
Sum1212 += d12*d12;
Sum12y2 += d12*dy2;
#if STANDARD_METRIC == STANDARD_METRIC_SQUARE
Sumy2y2 += dy2*dy2;
#endif
}
} // end loop over the segments
if(chi2 < chi2min) {
bestFac = ePart;
chi2min = chi2;
}
} // end loop over the energy partition
} // end loop over the active segments
Sum12y2 *= baseScale;
Sum1212 *= baseScale*baseScale;
if(Sum12y2 < 0 || Sum1212 < Sum12y2) {
PF.bestFac = 0; // the 2 hits solution will be discarded