fit_eventrates.py
1 |
#! /usr/bin/env python
|
---|---|
2 |
# ==========================================================================
|
3 |
# Fit event rates
|
4 |
# --------------------------------------------------------------------------
|
5 |
# Copyright (C) 2023 Juergen Knoedlseder
|
6 |
#
|
7 |
# This program is free software: you can redistribute it and/or modify
|
8 |
# it under the terms of the GNU General Public License as published by
|
9 |
# the Free Software Foundation, either version 3 of the License, or
|
10 |
# (at your option) any later version.
|
11 |
#
|
12 |
# This program is distributed in the hope that it will be useful,
|
13 |
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
15 |
# GNU General Public License for more details.
|
16 |
#
|
17 |
# You should have received a copy of the GNU General Public License
|
18 |
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
19 |
#
|
20 |
# ==========================================================================
|
21 |
import gammalib |
22 |
import glob |
23 |
import math |
24 |
import matplotlib.pyplot as plt |
25 |
|
26 |
|
27 |
# ================ #
|
28 |
# Rate model class #
|
29 |
# ================ #
|
30 |
class rate_model(gammalib.GPythonOptimizerFunction): |
31 |
|
32 |
# Constructor
|
33 |
def __init__(self, arrays, ieng, norm): |
34 |
|
35 |
# Call base class constructor
|
36 |
gammalib.GPythonOptimizerFunction.__init__(self)
|
37 |
|
38 |
# Set eval method
|
39 |
self._set_eval(self.eval) |
40 |
|
41 |
# Set data
|
42 |
self._counts = arrays['counts'] |
43 |
self._ieng = ieng
|
44 |
self._norm = norm
|
45 |
|
46 |
# Extract dimensions
|
47 |
self._noads = self._counts.shape()[0] |
48 |
self._nphibar = self._counts.shape()[1] |
49 |
|
50 |
# Multiply veto and constant rate by EHA cut correction. Rates
|
51 |
# are zero in case that the vetorate is zero.
|
52 |
self._vetorate = gammalib.GNdarray(self._noads, self._nphibar) |
53 |
self._constrate = gammalib.GNdarray(self._noads, self._nphibar) |
54 |
self._diffrate = gammalib.GNdarray(self._noads, self._nphibar) |
55 |
for ioad in range(self._noads): |
56 |
vetorate = arrays['vetorate'][ioad]
|
57 |
if vetorate > 0.0: |
58 |
constrate = arrays['constrate'][ioad]
|
59 |
for iphibar in range(self._nphibar): |
60 |
self._vetorate[ioad, iphibar] = vetorate * arrays['ehacutcorr'][ioad,iphibar] |
61 |
self._constrate[ioad, iphibar] = constrate * arrays['ehacutcorr'][ioad,iphibar] |
62 |
self._diffrate[ioad, iphibar] = self._vetorate[ioad, iphibar] - self._constrate[ioad, iphibar] |
63 |
|
64 |
# Compute rate sums
|
65 |
self._vetorate_sum = gammalib.GNdarray(self._nphibar) |
66 |
self._constrate_sum = gammalib.GNdarray(self._nphibar) |
67 |
self._diffrate_sum = gammalib.GNdarray(self._nphibar) |
68 |
for iphibar in range(self._nphibar): |
69 |
for ioad in range(self._noads): |
70 |
self._vetorate_sum[iphibar] += self._vetorate[ioad, iphibar] |
71 |
self._constrate_sum[iphibar] += self._constrate[ioad, iphibar] |
72 |
self._diffrate_sum[iphibar] = self._vetorate_sum[iphibar] - self._constrate_sum[iphibar] |
73 |
|
74 |
# Return
|
75 |
return
|
76 |
|
77 |
# Methods
|
78 |
def eval(self): |
79 |
"""
|
80 |
Evaluate function
|
81 |
"""
|
82 |
# Recover parameters
|
83 |
fprompt = self._pars()[0].value() |
84 |
fconst = 1.0 - fprompt
|
85 |
|
86 |
# Allocate phibar normalistion arrays
|
87 |
nevents = gammalib.GNdarray(self._nphibar)
|
88 |
nmodel = gammalib.GNdarray(self._nphibar)
|
89 |
norm = gammalib.GNdarray(self._nphibar)
|
90 |
|
91 |
# Compute model
|
92 |
model = fprompt * self._vetorate + fconst * self._constrate |
93 |
|
94 |
# Phibar normalise model
|
95 |
model_norm = model.copy() |
96 |
for iphibar in range(self._nphibar): |
97 |
for ioad in range(self._noads): |
98 |
if model_norm[ioad, iphibar] > 0.0: |
99 |
nevents[iphibar] += self._counts[ioad, iphibar, self._ieng] |
100 |
nmodel[iphibar] += model_norm[ioad, iphibar] |
101 |
if nmodel[iphibar] > 0.0: |
102 |
norm[iphibar] = self._norm * nevents[iphibar] / nmodel[iphibar]
|
103 |
for ioad in range(self._noads): |
104 |
model_norm[ioad, iphibar] *= norm[iphibar] |
105 |
|
106 |
# Compute LogL
|
107 |
logL = 0.0
|
108 |
for ioad in range(self._noads): |
109 |
for iphibar in range(self._nphibar): |
110 |
if model_norm[ioad, iphibar] > 0.0: |
111 |
logL -= self._counts[ioad, iphibar, self._ieng] * \ |
112 |
math.log(model_norm[ioad, iphibar]) - \ |
113 |
model_norm[ioad, iphibar] |
114 |
|
115 |
# Evaluate gradient and curvature
|
116 |
grad = 0.0
|
117 |
for iphibar in range(self._nphibar): |
118 |
|
119 |
# Precompute normalisation gradient
|
120 |
nmodel2 = nmodel[iphibar] * nmodel[iphibar] |
121 |
g_norm = -self._norm * nevents[iphibar] / nmodel2 * self._diffrate_sum[iphibar] |
122 |
|
123 |
# Loop over superpackets
|
124 |
for ioad in range(self._noads): |
125 |
|
126 |
# Continue only if model is positive
|
127 |
if model_norm[ioad, iphibar] > 0.0: |
128 |
|
129 |
# Pre computation
|
130 |
fb = self._counts[ioad, iphibar, self._ieng] / model_norm[ioad, iphibar] |
131 |
fc = (1.0 - fb)
|
132 |
fa = fb / model_norm[ioad, iphibar] |
133 |
|
134 |
# Compute parameter gradient
|
135 |
g = norm[iphibar] * self._diffrate[ioad, iphibar] + g_norm * model[ioad, iphibar]
|
136 |
|
137 |
# Add to factor gradient
|
138 |
grad += g |
139 |
|
140 |
# Update gradient
|
141 |
self.gradient()[0] += fc * g |
142 |
|
143 |
# Update curvature
|
144 |
self.curvature()[0,0] += fa * g * g |
145 |
|
146 |
# Set value
|
147 |
self._set_value(logL)
|
148 |
|
149 |
# Return
|
150 |
return
|
151 |
|
152 |
|
153 |
# =================== #
|
154 |
# Load working arrays #
|
155 |
# =================== #
|
156 |
def load_working_arrays(filename): |
157 |
"""
|
158 |
Load working arrays
|
159 |
|
160 |
Parameters
|
161 |
----------
|
162 |
filename : str
|
163 |
Working arrays file name
|
164 |
|
165 |
Returns
|
166 |
-------
|
167 |
arrays : dict
|
168 |
Working arrays
|
169 |
"""
|
170 |
# Open FITS file
|
171 |
fits = gammalib.GFits(filename) |
172 |
|
173 |
# Get images
|
174 |
image_counts = fits.image('COUNTS')
|
175 |
image_ehacutcorr = fits.image('EHACUTCORR')
|
176 |
image_vetorate = fits.image('VETORATE')
|
177 |
|
178 |
# Extract dimensions
|
179 |
noads = image_counts.naxes(0)
|
180 |
nphibar = image_counts.naxes(1)
|
181 |
neng = image_counts.naxes(2)
|
182 |
|
183 |
# Allocate Ndarrays
|
184 |
counts = gammalib.GNdarray(noads, nphibar, neng) |
185 |
ehacutcorr = gammalib.GNdarray(noads, nphibar) |
186 |
vetorate = gammalib.GNdarray(noads) |
187 |
constrate = gammalib.GNdarray(noads) |
188 |
|
189 |
# Extract data from images
|
190 |
for ioad in range(noads): |
191 |
for iphibar in range(nphibar): |
192 |
for ieng in range(neng): |
193 |
counts[ioad,iphibar,ieng] = image_counts[ioad,iphibar,ieng] |
194 |
ehacutcorr[ioad,iphibar] = image_ehacutcorr[ioad,iphibar] |
195 |
vetorate[ioad] = image_vetorate[ioad] |
196 |
constrate[ioad] = 1.0
|
197 |
|
198 |
# Normalise rates to unity (excluding bins where vetorate is zero)
|
199 |
nvetorate = 0.0
|
200 |
nconstrate = 0.0
|
201 |
for ioad in range(noads): |
202 |
if vetorate[ioad] > 0.0: |
203 |
nvetorate += vetorate[ioad] |
204 |
nconstrate += constrate[ioad] |
205 |
else:
|
206 |
constrate[ioad] = 0.0
|
207 |
if nvetorate > 0.0: |
208 |
for ioad in range(noads): |
209 |
vetorate[ioad] /= nvetorate |
210 |
if nconstrate > 0.0: |
211 |
for ioad in range(noads): |
212 |
constrate[ioad] /= nconstrate |
213 |
|
214 |
# Put arrays in dictionary
|
215 |
arrays = {'counts' : counts,
|
216 |
'ehacutcorr': ehacutcorr,
|
217 |
'vetorate' : vetorate,
|
218 |
'constrate' : constrate}
|
219 |
|
220 |
# Return arrays
|
221 |
return arrays
|
222 |
|
223 |
|
224 |
# =========================== #
|
225 |
# Compute logL for energy bin #
|
226 |
# =========================== #
|
227 |
def compute_logL(arrays, ieng, norm, fprompt): |
228 |
"""
|
229 |
Compute logL for energy bin
|
230 |
|
231 |
Parameters
|
232 |
----------
|
233 |
arrays : dict
|
234 |
Working arrays
|
235 |
ieng : int
|
236 |
Energy bin
|
237 |
norm : float
|
238 |
Normalisation
|
239 |
fprompt : float
|
240 |
Prompt fraction [0-1]
|
241 |
|
242 |
Returns
|
243 |
-------
|
244 |
logL : float
|
245 |
Log-likelihood value
|
246 |
"""
|
247 |
# Extract dimensions
|
248 |
noads = arrays['counts'].shape()[0] |
249 |
nphibar = arrays['counts'].shape()[1] |
250 |
|
251 |
# Compute model normalised to number of events for each Phibar
|
252 |
model = gammalib.GNdarray(noads, nphibar) |
253 |
for ioad in range(noads): |
254 |
rate = fprompt * arrays['vetorate'][ioad] + \
|
255 |
(1.0-fprompt) * arrays['constrate'][ioad] |
256 |
for iphibar in range(nphibar): |
257 |
model[ioad,iphibar] = rate * arrays['ehacutcorr'][ioad,iphibar]
|
258 |
for iphibar in range(nphibar): |
259 |
nevents = 0.0
|
260 |
nmodel = 0.0
|
261 |
for ioad in range(noads): |
262 |
if model[ioad, iphibar] > 0.0: |
263 |
nevents += arrays['counts'][ioad, iphibar, ieng]
|
264 |
nmodel += model[ioad, iphibar] |
265 |
if nmodel > 0.0: |
266 |
for ioad in range(noads): |
267 |
model[ioad, iphibar] *= (norm * (nevents / nmodel)) |
268 |
|
269 |
# Compute LogL
|
270 |
logL = 0.0
|
271 |
for ioad in range(noads): |
272 |
for iphibar in range(nphibar): |
273 |
if model[ioad, iphibar] > 0.0: |
274 |
logL += arrays['counts'][ioad, iphibar, ieng] * math.log(model[ioad, iphibar]) - \
|
275 |
model[ioad, iphibar] |
276 |
print(ieng, norm, fprompt, logL) |
277 |
|
278 |
# Return logL
|
279 |
return logL
|
280 |
|
281 |
|
282 |
# ============ #
|
283 |
# Show results #
|
284 |
# ============ #
|
285 |
def show_results(results): |
286 |
"""
|
287 |
Show results
|
288 |
"""
|
289 |
# Create figure
|
290 |
fig = plt.figure(figsize=(6,4)) |
291 |
fig.subplots_adjust(left=0.15, right=0.99, top=0.93, bottom=0.12) |
292 |
plt.title('log-likelihood')
|
293 |
|
294 |
# Determine number of norms
|
295 |
nnorms = len(results['norms']) |
296 |
|
297 |
# Loop over energies
|
298 |
for index, ieng in enumerate(results['iengs']): |
299 |
|
300 |
# Set label and color
|
301 |
if ieng == 0: |
302 |
label = '0.75-1 MeV'
|
303 |
color = '#1f77b4' # blue |
304 |
elif ieng == 1: |
305 |
label = '1-3 MeV'
|
306 |
color = '#ff7f0e' # orange |
307 |
elif ieng == 2: |
308 |
label = '3-10 MeV'
|
309 |
color = '#279e68' # green |
310 |
elif ieng == 3: |
311 |
label = '10-30 MeV'
|
312 |
color = '#d62728' # red |
313 |
else:
|
314 |
label = ''
|
315 |
color = '#b5bd61' # ocker |
316 |
|
317 |
# Loop over norms
|
318 |
xs = [] |
319 |
ys = [] |
320 |
ymaxs = [] |
321 |
styles = [] |
322 |
labels = [] |
323 |
for inorm, norm in enumerate(results['norms']): |
324 |
|
325 |
# Setup vectors
|
326 |
x = [f for f in results['fprompts']] |
327 |
y = [logL for logL in results['logLs'][index*nnorms+inorm]] |
328 |
|
329 |
# Append vectors
|
330 |
xs.append(x) |
331 |
ys.append(y) |
332 |
ymaxs.append(max(y))
|
333 |
|
334 |
# Append labels and styles
|
335 |
if norm == 1.0: |
336 |
styles.append('o-')
|
337 |
labels.append(label) |
338 |
elif norm < 1.0: |
339 |
styles.append('o:')
|
340 |
labels.append(None)
|
341 |
else:
|
342 |
styles.append('o--')
|
343 |
labels.append(None)
|
344 |
|
345 |
# Nomalise y vectors
|
346 |
ymax = max(ymaxs)
|
347 |
for k, _ in enumerate(ys): |
348 |
for i, _ in enumerate(ys[k]): |
349 |
ys[k][i] -= ymax |
350 |
|
351 |
# Plot vectors
|
352 |
for i, _ in enumerate(xs): |
353 |
plt.plot(xs[i], ys[i], styles[i], label=labels[i], color=color) |
354 |
|
355 |
# Set attributes
|
356 |
plt.xlabel(r'f$_{\rm prompt}$')
|
357 |
plt.ylabel(r'$\Delta$logL')
|
358 |
plt.legend(loc='lower left')
|
359 |
|
360 |
# Show plot
|
361 |
plt.show() |
362 |
|
363 |
# Return
|
364 |
return
|
365 |
|
366 |
|
367 |
# ========================== #
|
368 |
# Create log-likelihood plot #
|
369 |
# ========================== #
|
370 |
def create_logL_plot(arrays): |
371 |
"""
|
372 |
Create log-likelihood plot
|
373 |
"""
|
374 |
# Setup arrays
|
375 |
iengs = [0,1,2,3] |
376 |
norms = [0.99,1.0,1.01] |
377 |
fprompts = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0] |
378 |
|
379 |
# Compute logL
|
380 |
results = {'iengs' : iengs,
|
381 |
'norms' : norms,
|
382 |
'fprompts': fprompts,
|
383 |
'logLs' : []}
|
384 |
for ieng in iengs: |
385 |
for norm in norms: |
386 |
logLs = [] |
387 |
for fprompt in fprompts: |
388 |
logL = compute_logL(arrays, ieng, norm, fprompt) |
389 |
logLs.append(logL) |
390 |
results['logLs'].append(logLs)
|
391 |
|
392 |
# Show results
|
393 |
show_results(results) |
394 |
|
395 |
# Return
|
396 |
return
|
397 |
|
398 |
|
399 |
# ======== #
|
400 |
# Fit logL #
|
401 |
# ======== #
|
402 |
def fit_logL(arrays, ieng, norm, fprompt): |
403 |
"""
|
404 |
Fit logL
|
405 |
|
406 |
Parameters
|
407 |
----------
|
408 |
arrays : dict
|
409 |
Working arrays
|
410 |
ieng : int
|
411 |
Energy bin
|
412 |
norm : float
|
413 |
Normalisation
|
414 |
fprompt : float
|
415 |
Initial prompt fraction
|
416 |
|
417 |
Returns
|
418 |
-------
|
419 |
logL : float
|
420 |
Log-likelihood value
|
421 |
"""
|
422 |
# Set fprompt parameter
|
423 |
pars = gammalib.GOptimizerPars() |
424 |
par1 = gammalib.GOptimizerPar('fprompt', fprompt)
|
425 |
par1.range(0.0,1.0) |
426 |
par1.factor_gradient(1.0) # To avoid zero gradient log message |
427 |
pars.append(par1) |
428 |
|
429 |
# Set fit function
|
430 |
fct = rate_model(arrays, ieng, norm) |
431 |
|
432 |
# Optimize function and compute errors
|
433 |
log = gammalib.GLog() |
434 |
log.cout(True)
|
435 |
opt = gammalib.GOptimizerLM(log) |
436 |
opt.optimize(fct, pars) |
437 |
opt.errors(fct, pars) |
438 |
|
439 |
# Retrieve logL
|
440 |
logL = opt.value() |
441 |
|
442 |
# Recover fprompt parameter and errors
|
443 |
fprompt = pars[0].value()
|
444 |
e_fprompt = pars[0].error()
|
445 |
|
446 |
# Print fit results
|
447 |
print('%.3f (%7.5f +/- %7.5f)' % (logL, fprompt, e_fprompt))
|
448 |
|
449 |
# Return logL
|
450 |
return logL
|
451 |
|
452 |
|
453 |
# =========================== #
|
454 |
# Test log-likelihood fitting #
|
455 |
# =========================== #
|
456 |
def test_logL_fit(arrays): |
457 |
"""
|
458 |
Test log-likelihood fitting
|
459 |
"""
|
460 |
# Setup arrays
|
461 |
iengs = [0,1,2,3] |
462 |
norms = [1.0]
|
463 |
fprompts = [0.5]
|
464 |
|
465 |
# Compute logL
|
466 |
results = {'iengs' : iengs,
|
467 |
'norms' : norms,
|
468 |
'fprompts': fprompts,
|
469 |
'logLs' : []}
|
470 |
for ieng in iengs: |
471 |
for norm in norms: |
472 |
logLs = [] |
473 |
for fprompt in fprompts: |
474 |
logL = fit_logL(arrays, ieng, norm, fprompt) |
475 |
logLs.append(logL) |
476 |
results['logLs'].append(logLs)
|
477 |
|
478 |
# Show results
|
479 |
show_results(results) |
480 |
|
481 |
# Return
|
482 |
return
|
483 |
|
484 |
|
485 |
# ======================== #
|
486 |
# Main routine entry point #
|
487 |
# ======================== #
|
488 |
if __name__ == '__main__': |
489 |
|
490 |
# Dump header
|
491 |
print('')
|
492 |
print('*******************')
|
493 |
print('* Fit event rates *')
|
494 |
print('*******************')
|
495 |
|
496 |
# Load working arrays
|
497 |
arrays = load_working_arrays('debug_gcomdris_compute_drws_vetorate.fits')
|
498 |
|
499 |
# Create logL plot
|
500 |
#create_logL_plot(arrays)
|
501 |
|
502 |
# Fit fprompt parameter
|
503 |
#fit_logL(arrays, 0, 1.0)
|
504 |
test_logL_fit(arrays) |