Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 12:09:47 2015
Converting from individual isochrone sets downloaded from PARSEC
into binary file.
Adding weights on the way.
@author: mints
"""
from __future__ import print_function
import numpy as np
from glob import glob
from scipy.interpolate import interp1d
from astropy.io import fits
import os
import sys
from parsec_isochrones.kroupaimf import KroupaIMF
from parsec_isochrones.constants import PARAMS, LOGG_SUN
metals = np.loadtxt('Metals.dat')
feh = interp1d(metals[:, 0], metals[:, 1])
IMF = KroupaIMF()
BANDS = sys.argv[1]
PAR = PARAMS[BANDS]
MIN_MASS_STEP = 0.0025
Av = 0.0
def get_weights(sample, normalize=True):
"""
Calculate weight as bin width, when bin centers
are given as input.
"""
w_sample = np.zeros(len(sample))
w_sample[1:-1] = 0.5*(sample[2:] - sample[:-2])
w_sample[0] = 0.5*(sample[1] - sample[0])
w_sample[-1] = 0.5*(sample[-1] - sample[-2])
if normalize:
w_sample = w_sample / np.mean(w_sample)
return w_sample
def process_single_age(data, age_weight):
# Megring stages
for stage in [0, 1, 2, 3]:
data[data[:, -1] == stage, -1] = 1
for stage in [4, 5, 6]:
data[data[:, -1] == stage, -1] = 2
for stage in [7, 8, 9]:
data[data[:, -1] == stage, -1] = 3
# Detect stage variations
# Replace stages, if stage is going backwards:
# If stage sequence is 4-7-4 we replace it with 4-4-4
diff = np.concatenate(([1], np.diff(data[:, -1])))
swaps = np.where(diff != 0)[0]
for iswap, swap in enumerate(swaps):
if diff[swap] < 0:
data[swaps[iswap-1]:swap, -1] = data[swap, -1]
elif swap > 0 and swap < len(data)-1:
# Extra check for misassigned stage values at change points
if np.abs(data[swap, 6] - data[swap+1, 6]) > 0.5:
data[swap, -1] = data[swap-1, -1]
# Look if intermediate values are needed
mass_diff = np.concatenate(([1], np.diff(data[:, 2])))
add_mass_steps = np.where((mass_diff > MIN_MASS_STEP) * (data[:, 2] < 4.) *
(data[:, -1] == 4))[0]
extra_rows = []
for extra_point in add_mass_steps:
# If mass step is larger than 0.005 we add more points:
dm = int((data[extra_point, 2] - data[extra_point-1, 2])/MIN_MASS_STEP)
for istep in range(dm):
extra_rows.append(data[extra_point-1] +
(data[extra_point] -
data[extra_point-1]) * (istep + 0.5) / float(dm))
if len(extra_rows) > 0:
# Add new rows
data = np.vstack((data, np.array(extra_rows)))
data = data[data[:, 2].argsort()]
# Add age and IMF weights
inf_imf = np.zeros(len(data))
inf_imf[0] = IMF.integrate(data[0, 2], data[1, 2])
for i in range(1, len(data)):
inf_imf[i] = IMF.integrate(data[i - 1, 2], data[i, 2])
data = np.hstack((data, np.array([age_weight * inf_imf]).T))
return data
def process_single_file(data, w_Z):
ages = np.unique(data[:, 1])
ages.sort()
age_step = ages[1] - ages[0]
wages = np.power(10, np.concatenate(([ages[0] - age_step*0.5],
ages + age_step*0.5)))
wages = wages[1:] - wages[:-1]
wages = wages / np.sum(wages)
age_split = np.concatenate(([1], np.diff(data[:, 1])))
old_split = 0
for ii, isplit in enumerate(np.where(age_split != 0)[0][1:]):
age_data = data[old_split:isplit]
age_data = process_single_age(age_data, wages[ii]*w_Z)
old_split = isplit
yield age_data
# Not forget the last one...
yield process_single_age(data[old_split:], wages[-1]*w_Z)
if __name__ == '__main__':
data = []
# Load all isochrones...
file_list = glob('isochrones/%s/%s_iso_*.dat.npy' % (BANDS, PAR.prefix))
file_list.sort()
met = np.array([float('0.%s' % z.split('.')[-3]) for z in file_list])
# This is for flat prior in Z
w_metals = get_weights(met)
# This is for flat prior in [Fe/H]
#w_metals = get_weights(feh(met))
for ifile, xfile in enumerate(file_list):
temp_data = np.load(xfile)
# Select only 2MASS/WISE photometry.
if PAR.old:
temp_data = temp_data[:, [0, 1, 2, 3, 4, 5, 6,
0, 0, 0] # no CO
+ PAR.mag_columns +
[-1]]
temp_data[:, 7] = 0. # no CO
else:
temp_data = temp_data[:, [0, 1, 2, 3, 4, 5, 6,
9, 0, 0] # CO
+ PAR.mag_columns +
[7]]
temp_data[:, 1] = np.log10(temp_data[:, 1])
temp_data[:, 0] = feh(met[ifile])
temp_data[:, 5] = np.power(10, temp_data[:, 5])
temp_data[:, 8] = np.power(10., 0.5 * (np.log10(temp_data[:, 3]) -
temp_data[:, 6] + LOGG_SUN))
temp_data[:, 9] = 3 * temp_data[:, 3] / (4. * np.pi *
temp_data[:, 8]**3)
print(xfile, met[ifile], w_metals[ifile])
data.extend([td for td in process_single_file(temp_data,
w_metals[ifile])])
data = np.vstack(data)
data = data[data[:, -1] > 0.]
s = """#0 feh
#1 age
#2 M_ini
#3 mass
#4 logL/Lo
#5 T
#6 logg
#7 CO
#8 radius
#9 density""".split()
# ...mag names
#-2 stage
#-1 Weight (added by this script)
colnames = s[1::2] + PAR.mag_names + ['stage', 'Weight']
print(colnames)
if PAR.old:
pversion = '1.2S'
else:
pversion = 'v1.2S + COLIBRI PR16'
meta = {'Creator': 'Mints',
'Ages': 'log',
'Z': 'custom',
'Parsec version': pversion,
'Av': Av}
columns = []
for icolumn, column in enumerate(colnames):
if column == 'stage':
columns.append(fits.Column(name=column, format='I',
array=data[:, icolumn]))
else:
columns.append(fits.Column(name=column, format='E',
array=data[:, icolumn]))
hdu = fits.PrimaryHDU()
agehdu = fits.BinTableHDU.from_columns(
[fits.Column(name='age', format='E', array=np.unique(data[:, 1]))])
hdulist = fits.HDUList([hdu, fits.BinTableHDU.from_columns(columns),
agehdu])
out = sys.argv[2]
if os.path.exists(out):
os.remove(out)
hdulist.writeto(out)
print('Done.')