diff --git a/_scripts/photometry.py b/_scripts/photometry.py new file mode 100644 index 0000000..1b2f9f3 --- /dev/null +++ b/_scripts/photometry.py @@ -0,0 +1,259 @@ +import numpy as np +import sep +from astropy.io import fits +from astropy import units as u +from astropy.coordinates import SkyCoord +from photutils.aperture import CircularAperture +from photutils.aperture import aperture_photometry +from glob import glob +from photutils.segmentation import make_source_mask +from astropy.time import Time +from collate import hdultocluster, cluster_search_radec +import csv +import warnings +warnings.filterwarnings(action='ignore') + + +def _in_cone(coord: SkyCoord, cone_center: SkyCoord, cone_radius: u.degree): + """ + Checks if SkyCoord coord is in the cone described by conecenter and + cone_radius + """ + d = (coord.ra - cone_center.ra) ** 2 + (coord.dec - cone_center.dec) ** 2 + return d < (cone_radius ** 2) + +def norm(ref_table): + ref_mag = ref_table['phot_g_mean_mag'] + g_rp_g = ref_table["phot_rp_mean_mag"] + g_bp_g = ref_table["phot_bp_mean_mag"] + + #Making sure that all three values are present for calculating transformed mag + gaia_mag_transformed = [] + r_transformed = [] + diffs = [] + for i,j,k in zip(range(0,len(ref_mag)),range(0,len(g_rp_g)),range(0,len(g_bp_g))): + if 25 > ref_mag[i] > 0.5 and 25 > g_rp_g[j] > 0.5 and 25 > g_bp_g[k] > 0.5: + #transforming reference magnitudes to SDSS12 g + diffs.append(g_bp_g[k]-g_rp_g[j]) + gaia_mag_transformed.append(ref_mag[i] - 0.13518 + 0.4625*(g_bp_g[k] - g_rp_g[j]) + 0.25171 * (g_bp_g[k] - g_rp_g[j])**2-0.021349 * (g_bp_g[k]-g_rp_g[j])**3) + r_transformed.append(ref_mag[i] + 0.12879 - 0.24662 * (g_bp_g[k] - g_rp_g[j]) + 0.027464 * (g_bp_g[k]-g_rp_g[j])**2 + 0.049465*(g_bp_g[k]-g_rp_g[j])**3) + else: + gaia_mag_transformed.append(np.nan) + r_transformed.append(np.nan) + gaia_mag_transformed = np.array(gaia_mag_transformed) + r_transformed=np.array(r_transformed) + + return gaia_mag_transformed, r_transformed + +def photometry(x, y, aperture, im): + + ''' + This function calculates the magnitude of any designated sources + by conducting photometry on a masked and background subtracted image + ''' + + data = im['SCI'].data + gain = im['SCI'].header['GAIN'] + + # masking + mask = make_source_mask(data, nsigma=2, npixels=5, dilate_size=11) + data = data.byteswap().newbyteorder() + bkg = sep.Background(data, mask=mask) + imgdata_bkgsub = data - bkg.back() + errmap = np.sqrt(np.sqrt(imgdata_bkgsub**2))/gain + + # Photometry + source_pos = np.transpose((x, y)) + source_ap = CircularAperture(source_pos, r=aperture) + source_flux = aperture_photometry(imgdata_bkgsub, source_ap, error=errmap) + mag = -2.5 * np.log10(source_flux['aperture_sum'] * gain) + magerr = np.sqrt(((-5 / ((2 * np.log10(10)) * (source_flux['aperture_sum'] * gain))) * (source_flux['aperture_sum_err'] * gain)) ** 2) + + return mag, magerr + + +def Error_Finder(bkgd, mag, gain, FWHM): + cnst_err = gain * bkgd * np.pi * (FWHM/2)**2 + mag_err = 1.086/gain**2 * np.sqrt(cnst_err + gain*10**(-0.4*mag))/10**(-0.4*mag) + return mag_err + + +def bkgd_fit(bkgd_guess, zp, i_mag, i_g, gain, fwhm): #Recursively, minimizes the median absolute deviation of the function + params = [] + median_list = [] + for bkgd in bkgd_guess: + params.append(bkgd) #append this guess set to the list. + model_output = Error_Finder(bkgd, i_mag, gain, fwhm) + abs_devs = np.abs(i_g - model_output + zp) + med_abs_dev = np.median(abs_devs) + median_list.append(med_abs_dev) + best_params = params[np.where(median_list == np.min(median_list))[0][0]] + return best_params + + +def mad_curve_fit(ZP_guess, c0_guesses, i_mag, g, g_r): #Recursively, minimizes the median absolute deviation of the function + params = [] + median_list = [] + for zp in ZP_guess: + for c0 in c0_guesses: + guess_params = [zp, c0] #make an array of guesses + params.append(guess_params) #append this guess set to the list. + abs_devs = np.abs(g - i_mag - zp - c0 * g_r) + med_abs_dev = np.median(abs_devs) + median_list.append(med_abs_dev) + best_params = params[np.where(median_list == np.min(median_list))[0][0]] + return best_params + +def get_reference_stars(cat_catalog, ref_catalog, variable_catalog, ref_coords): + #Remove reference stars that are also variable + variable_stars_in_catalog = [cluster_search_radec(variable_catalog, coord.ra.deg, coord.dec.deg) for coord in ref_coords] + variable_star_coords = [SkyCoord(v['ra'],v['dec'], frame = 'icrs', unit = 'degree') for v in variable_stars_in_catalog][0] + nonvar_idx = [] + for v in variable_star_coords: + for c in range(0, len(ref_coords)): + if v!=ref_coords[c]: + nonvar_idx.append(c) + else: + pass + ref_coords = np.array(ref_coords)[list(set(nonvar_idx))] + + #These are the comp stars in the cat table itself. so these stars are in our images + sdi_reference_stars = [cluster_search_radec(cat_catalog, coord.ra.deg, coord.dec.deg) for coord in ref_coords] + #And these are the same refence stars but from gaia + ref_stars = [cluster_search_radec(ref_catalog, coord.ra.deg, coord.dec.deg) for coord in ref_coords] + + #Now check for zero x, y, and a in the comp stars. If there are zero values, remove the comp source entirely + c_idx = [] + for i in range(0,len(sdi_reference_stars)): + if sdi_reference_stars[i]['x'].all() !=0 and sdi_reference_stars[i]['y'].all() !=0 and sdi_reference_stars[i]['a'].all()!=0: + c_idx.append(i) + + sdi_reference_stars = np.array(sdi_reference_stars)[c_idx] #The flux of the reference stars in our images + ref_stars = np.array(ref_stars)[c_idx] #The apparent magnitude of reference stars in Gaia + return sdi_reference_stars, ref_stars + + +#-----------Retrieve files--------------- +files = glob(r'C:\Users\Sam Whitebook\Documents\Visual Studio 2010\Projects\Lubin Lab\Light_Curves\sdi_output\*.fits', recursive=True) +hduls = [fits.open(f) for f in files] + +#Science images: +sci_f = glob(r'C:\Users\Sam Whitebook\Documents\Visual Studio 2010\Projects\Lubin Lab\Light_Curves\GTAnd_SCI\*.fits', recursive=True) +sci_images = [fits.open(f) for f in sci_f] + +del files, sci_f + + +#----------Collect our sources----------------- +ref_ra = hduls[0]['REF'].data['ra'] +ref_dec = hduls[0]['REF'].data['dec'] +ref_coords = SkyCoord(ref_ra,ref_dec,frame = 'icrs',unit='degree') + +ref_catalog = hdultocluster(hduls, name="REF", tablename="ROBJ") #Reference stars from Gaia DR2 +cat_catalog = hdultocluster(hduls, name = 'CAT', tablename= 'OBJ') #All sources found in the image +variable_catalog = hdultocluster(hduls, name = 'XRT', tablename= 'XOBJ') #All variable sources found after subtraction + +target_coord = SkyCoord(11.291, 41.508, frame = 'icrs', unit = 'degree') + +target_star_in_catalog = cluster_search_radec(cat_catalog, target_coord.ra.deg, target_coord.dec.deg) + +del ref_ra, ref_dec + + +#----------------Find reference stars close to the target------------------ +sdi_reference_stars, ref_stars = get_reference_stars(cat_catalog, ref_catalog, variable_catalog, ref_coords) + + +#This is the target star in every image +target_star = cluster_search_radec(cat_catalog, target_coord.ra.deg, target_coord.dec.deg) + +#Remove all the images for which the target star could not be found by removing places that have zeroes +target_not_present_idx = np.where(target_star['x'] != 0) +target = target_star[target_not_present_idx] + +#next remove those indices in all the comp stars and sources, and remove those images +ref_stars = [ref_stars[i][target_not_present_idx] for i in range(0,len(ref_stars))] +sdi_reference_stars = [sdi_reference_stars[i][target_not_present_idx] for i in range(0,len(sdi_reference_stars))] +hduls_with_target = [i for i in target_not_present_idx[0]] + +#Convert Gaia g, r magnitudes to SDSS g' r' magnitudes. This is because our images are taken in SDSS g' r' filters +ref_stars = np.array([norm(i) for i in ref_stars]) +reference_star_g_mag = ref_stars[:,0,0] +reference_star_r_mag = ref_stars[:,1,0] + +del target_coord, ref_coords, ref_catalog, cat_catalog, variable_catalog, hduls + +#----------------Estimate Uncertainty and Target Magnitude------------------ +target_mag = [] +target_magerr = [] +times = [] +for im in hduls_with_target: + try: + t = sci_images[im][0].header['DATE-OBS'] + time = Time(t) + t = time.mjd + times=times.append(t) + except IndexError: + t = sci_images[im]['SCI'].header['OBSMJD'] + times.append(np.array(t)) + + gain = sci_images[im][0].header['GAIN'] + + #Calculate the instrumental magnitude (flux) of the star + #Using 'a' as a sloppy alternative to aperture. Maybe look into sep.kron_radius or flux_radius. aperture is a radius. + #Have to select index [0] for each mag to get just the numerical value without the description of the column object + target_inst_mag = photometry(target['x'][im],target['y'][im], target['a'][im], sci_images[im])[0][0] + + instrumental_mag = [] + in_magerr = [] + for i in range(0,len(sdi_reference_stars)): + arr = photometry(sdi_reference_stars[i]['x'][im],sdi_reference_stars[i]['y'][im], sdi_reference_stars[i]['a'][im], sci_images[im]) + instrumental_mag.append(arr[0][0]) + in_magerr.append(arr[1][0]) + + #Only select reference stars that are brighter than 20th magnitude + bright_stars_idx = [] + ref_magerr = 0.16497 + for i in range(0,len(instrumental_mag)): + if reference_star_g_mag[i] < 18: #20 is the lowest magnitude our pipeline can detect + bright_stars_idx.append(i) + else: + pass + instrumental_mag = np.array(instrumental_mag)[bright_stars_idx] + ref_mag = np.array(reference_star_g_mag)[bright_stars_idx] + r_ref_mag = np.array(reference_star_r_mag)[bright_stars_idx] + + #First find all places where there are non-nan values: + nan_idx = np.where([np.isnan(r)==False for r in ref_mag])[0] + instrumental_mag = [instrumental_mag[i] for i in nan_idx] + g = [ref_mag[i] for i in nan_idx] + r = [r_ref_mag[i] for i in nan_idx] + instrumental_mag = np.array(instrumental_mag) + g = np.array(g) + r = np.array(r) + g_r = g - r + inst_minus_g = instrumental_mag - g + + #Assuming Gaia's magnitudes are the true values of the star, we can use our instrumental mags in g to estimate uncertainty. + zp_guesses = np.linspace(25, 30, 80) + bkgd_guesses = np.linspace(15000, 25000, 100) + c0_guesses = np.linspace(-1.5, 1.5, 20) + + best_params = mad_curve_fit(zp_guesses, c0_guesses, instrumental_mag, g, g_r) + zp = best_params[0] + c0 = best_params[1] + + bkgd = bkgd_fit(bkgd_guesses, zp, instrumental_mag, inst_minus_g, 1.6, 12.5) + + target_mag_err = Error_Finder(bkgd, target_inst_mag, 1.6, 12.5) + + target_mag.append(target_inst_mag + zp) + target_magerr.append(target_mag_err) + +rows = zip(target_mag, target_magerr, times) +wfname = 'GTAnd_least_abs_dev_g_i.csv' + +with open(wfname, 'w') as f: + writer = csv.writer(f) + for row in rows: + writer.writerow(row) \ No newline at end of file diff --git a/sdi/photometry.py b/sdi/photometry.py new file mode 100644 index 0000000..35d969b --- /dev/null +++ b/sdi/photometry.py @@ -0,0 +1,321 @@ +import glob +import click +import sep +import astropy.units as u +import astropy.coordinates as coord +import matplotlib.pyplot as plt +import numpy as np +import astroalign as aa +from astropy.io import fits +from astroquery.ipac.irsa import Irsa +from astroquery.sdss import SDSS +from astropy.wcs import WCS +from astropy.wcs.utils import pixel_to_skycoord +from astropy.coordinates import SkyCoord +from matplotlib.colors import LogNorm, TABLEAU_COLORS +from photutils.aperture import aperture_photometry, CircularAperture, CircularAnnulus, ApertureStats +from regions import CirclePixelRegion, PixCoord +from photutils import centroids +from . import _cli as cli + +def photometry(files_dir, out_dir , filter , plot_mode, median_divide): #barebones, can add more stuff later + files = sorted(glob.glob(files_dir + '/*')) # filepath, this is a directory containing directories. + outstr= out_dir + print(outstr) + data = [] + for i, dir in enumerate(files): + set = dict(images=[], header=None, template=None, refs=None, WCS=None,run=i) # dictionary object used to sort input files + images = sorted(glob.glob(dir + '/*')) # formatting + print(dir) + hdus = [fits.open(i) for i in images] # opens fits files + frames = [h[1].data for h in hdus] # image data + header = (hdus[0])[1].header # the header for each run is the header for the first image, you don't need each individual header. + try: # some datsets won't align, this is not ideal but we can skip alignment. + aligned = [aa.register(i, frames[0])[0] for i in frames[0:]] # use astroalign for the time being, want to use multiprcoessed eventually + except: + aligned = frames + print("DID NOT ALIGN") + + template = np.median(aligned, axis=0) # creates median value template + w = WCS(hdus[0][1].header) # WCS matrix object, used to transform pixel values to RA-DEC coordinates + + # Fills out set object + set['images'] = aligned + set['header'] = header + set['template'] = template + set['WCS'] = w + bkg_phot = sep.Background(template) # background subtract for source extraction. + set['refs'] = sep.extract(template - bkg_phot.back(), bkg_phot.globalrms * 3, minarea=25, segmentation_map=False) # find sources in image + # this ensures that the source list is the same for all sets + set['run'] = i + print(len(set['refs'])) # check + data.append(set) + + sources = [] + n_ref = 0 # check counter + # observation filter + for c, src in enumerate(data[0]['refs']): # looking at the sources picked out in the first run so that everything is with respect to one source list. + x = src['x'] + y = src['y'] + coord = pixel_to_skycoord(x, y, data[0]['WCS']).transform_to('icrs') # gives wcs transformation for pixel coordinates + search = SDSS.query_crossid(coord,fields=['ra', 'dec', 'psfMag_{}'.format(filter), 'psfMagErr_{}'.format(filter)],radius=15 * u.arcsec, region=False) # narrow field cone search to find source based on ra, dec. + radius = (src['xmax'] - src['xmin']) / 2 + ref = dict(ra_dec=coord, source_id=c, rad=radius, ref_mag=None, ref_mag_err=None,border=False) # because the pixel value isn't static across observing runs, don't save the x-y values just ra dec and convert later. + for i in range(0, len(data)): # creates mag slots for each observing run + ref['calibrated_mags_{}'.format(i)] = [] # even though the calibrated mags should line up (and not need to be separated) this is to help visualize day changes later on + ref['instrumental_mags_{}'.format(i)] = [] + ref['inst_mag_errs_{}'.format(i)] = [] + if search: # if SDSS query returned results, continue + if search['type'] == 'STAR': # filters search results by sources that of type star. + ref['ref_mag'] = search['psfMag_{}'.format(filter)] # add reference mag + ref['ref_mag_err'] = search['psfMagErr_{}'.format(filter)] # add SDSS error + n_ref += 1 + sources.append(ref) + + for set in data: + print(set['run']) + for i, image in enumerate(set['images']): + print(i) + N_r = set['header']['RDNOISE'] # readout noise + for source in sources: + coords = SkyCoord.to_pixel(source['ra_dec'], wcs=set['WCS']) # gets pixel values of source from RA DEC + # print(coords[0], coords[1]) + if (set['header']['NAXIS1'] - coords[0]) < 0 or coords[0] < 0 or (set['header']['NAXIS2'] - coords[1]) < 0 or coords[1] < 0: # checks to see if a source exceeds image boundaries for later image sets. + source['border'] = True + print(source['source_id'], coords[0], coords[1]) + continue + + pcoords = PixCoord(coords[0], coords[1]) # another coord object needed for Regions + radius_i = source['rad'] + radius_o_0 = radius_i + 5 # inner annulus radius + radius_o_1 = radius_o_0 + 5 # outer annulus radius + + source_circle = CirclePixelRegion(pcoords, radius_i).to_mask() # makes region of source shape + source_aperture = source_circle.cutout(image) # gets data of source + + background_annulus = CircularAnnulus(coords, radius_o_0, radius_o_1) + background_mean = ApertureStats(image, background_annulus).mean + + source_flux_pix = source_aperture - (source_circle * background_mean) # pixel wise background subtraction + source_flux_total = np.sum(source_flux_pix) # total flux + + readout_sum_square = np.sum(source_circle * np.float64(N_r ** 2)) # applies square readout noise to source array shape, then adds. Gives sum of square readout noise over back subtracted source. + delta_n = (readout_sum_square + source_flux_total + (((radius_i ** 2) / ((radius_o_1 ** 2) - (radius_o_0 ** 2))) ** 2) * (readout_sum_square + aperture_photometry(image, background_annulus)['aperture_sum'][0])) ** (1 / 2) # this is the stuff for SNR + if source_flux_total <= 0: + inst_mag = -2.5 * np.log10( + abs(source_flux_total)) # For now, the case where the background is oversubtracted from LCO is handled in this way but this is probably not the correct way to do this. + delta_m = 2.5 * np.log10(np.e) * abs(delta_n / source_flux_total) # magnitude error + + else: + inst_mag = -2.5 * np.log10(source_flux_total) + delta_m = 2.5 * np.log10(np.e) * abs(delta_n / source_flux_total) + source['instrumental_mags_{}'.format(set['run'])].append(inst_mag) + source['inst_mag_errs_{}'.format(set['run'])].append(delta_m) + + # NEED TO PROPAGATE ERROR FROM THIS SECTION + + for set in data: + res = [] + mag_thresh = 15 # magnitude threshold for picking reference stars + + # criteria for reference stars are that they have an SDSS reference mag, are brighter than the mag threshold and are not outside of the image + inst_mags = [np.mean(source['instrumental_mags_{}'.format(set['run'])]) for source in sources if source['ref_mag'] != None and source['ref_mag'] < mag_thresh and source['border'] == False] + sky_mags = [source['ref_mag'][0] for source in sources if source['ref_mag'] != None and source['ref_mag'] < mag_thresh and source['border'] == False] + # Makes linear model for calibration: + # This is the first round of modeling, with outliers. + print(len(inst_mags), len(sky_mags)) + p0 = np.polyfit(inst_mags, sky_mags, deg=1) # linear fit for instrumental to SDSS mags + x = np.arange(-15, 0) # plotting fit line + y = p0[0] * x + p0[1] # fit line + plt.plot(x, y, color='b', label="Model With Outliers, m = {}, b = {}".format("%.4f" % p0[0], "%.4f" % p0[1])) + diffs = [s['ref_mag'][0] - (np.mean(s['instrumental_mags_{}'.format(set['run'])]) * p0[0] + p0[1]) for s in sources if s['ref_mag'] != None and s['ref_mag'] < mag_thresh and s['border'] == False] + stdv = np.std(diffs) + inst_mags_final = [] + sky_mags_final = [] + outlier_inst = [] + outlier_sky = [] + + for diff in diffs: # rudementary sigma clipping to remove outliers from calibration model. + if diff < stdv: + i = diffs.index(diff) + inst_mags_final.append(inst_mags[i]) + sky_mags_final.append(sky_mags[i]) + else: + i = diffs.index(diff) + outlier_inst.append(inst_mags[i]) + outlier_sky.append(sky_mags[i]) + p1 = np.polyfit(inst_mags_final, sky_mags_final, deg=1) # recalculates calibration model without outliers. + # p2 = np.polyfit(inst_mags_final, sky_mags_final, deg = 0) + # print(p2[0]) + print("first try: {}".format(p0)) # prints slopes of each model. In theory, they should come out to around 1. + print("second try: {}".format(p1)) + + plt.scatter(outlier_inst, outlier_sky, color='b', s=4, label="Outliers") + plt.scatter(inst_mags_final, sky_mags_final, color='r', s=4, label="Kept") + plt.plot(x, [i * p1[0] + p1[1] for i in x], color='r', + label="Model Without Outliers, m = {}, b = {}".format("%.4f" % p1[0], "%.4f" % p1[1])) + # plt.plot(x, [i+ p1[1] for i in x], color = 'g', label = "unity") + plt.xlabel("Instrumental Magnitude SDSS g-band") + plt.ylabel("SDSS Reference Magnitude g-band") + plt.title("Instrumental vs Reference Magnitude") + plt.legend() + #add dynamic plot saving back in here. + plt.show() + + # add calibrated mags to sources: + for source in sources: + vals = [] + for val in source['instrumental_mags_{}'.format(set['run'])]: + if abs(1 - p1[0]) < abs(1 - p0[0]): + cal = np.float64(val * p1[0] + p1[1]) + vals.append(cal) + else: + cal = np.float64(val * p0[0] + p0[1]) + vals.append(cal) + source['calibrated_mags_{}'.format(set['run'])] = vals # probably a cleaner way to do this part but was having issue where calibrated magnitudes were being added to dict as individual arrays + + + + for set in data: #this part is entirely optional, need to nest in if statement. + print(set['run']) + differences = [s['ref_mag'] - np.mean(s['calibrated_mags_{}'.format(set['run'])]) for s in sources if s['ref_mag'] != None and s['border'] == False] + mean_diff = np.mean(differences) + std_diff = np.std(differences) + med_diff = np.median(differences) + print(mean_diff, med_diff, std_diff) + + X = [np.mean(s['calibrated_mags_{}'.format(set['run'])]) for s in sources if s['ref_mag'] != None and s['ref_mag'] < mag_thresh and s['border'] == False] + Y = [s['ref_mag'] for s in sources if s['ref_mag'] != None and s['ref_mag'] < mag_thresh and s['border'] == False] + p3 = np.polyfit(X, Y, deg=1) + x_vals = np.arange(9.5, 18.5) + plt.plot(x_vals, [x * p3[0] + p3[1] for x in x_vals], color='b', label='m = {}, b = {}'.format("%.4f" % p3[0], "%.4f" % p3[1])) + + print(p3) + + for s in sources: + if s['ref_mag'] != None and abs(s['ref_mag'] - np.mean(s['calibrated_mags_{}'.format(set['run'])])) > 3 * std_diff: + plt.errorbar(np.mean(s['calibrated_mags_{}'.format(set['run'])]), s['ref_mag'], xerr=np.median(s['inst_mag_errs_{}'.format(set['run'])]), yerr=s['ref_mag_err'],linestyle='none', marker='o', markersize=2.5, color='b') + print(s['source_id']) + else: + plt.errorbar(np.mean(s['calibrated_mags_{}'.format(set['run'])]), s['ref_mag'], xerr=np.median(s['inst_mag_errs_{}'.format(set['run'])]), yerr=s['ref_mag_err'], linestyle='none', marker='o', markersize=2.5, color='r') + + plt.title("Calibrated vs Reference Magnitude") + plt.xlabel("TRIPP Calibrated Magnitude SDSS g-band") + plt.ylabel("SDSS Reference Magnitude g-band") + plt.legend() + #add dynamic plot saving back in. + plt.show() + + + ###### + runs = np.arange(0, len(data)) + colors = [] + for run in runs: # sets up color scheme for plotting. + i = 0 + for val in sources[0]['calibrated_mags_{}'.format(run)]: + i += 1 + colors.append(run) + print(i) + c = np.array(colors) + colors_options = np.array(['#3B5BA5', '#E87A5D', '#F3B941', '#f00a42', '#6F9BA4', '#9D9EA2', "#C5E86C", "#B4B5DF"]) # HEXIDECIMAL COLORS need to add a ton of colors to this even if they don't all get used. Could set it up do randomly generate hex colors but that will be inconsistent and kinda look like shit. + ####### + mode = plot_mode + med_dev = median_divide + if med_dev == True: # only run this for single night observing runs. Make part of each night? + median_curves = [] + for i, run in enumerate(runs): + median_mags = [] + for source in sources: + if source['border'] == False: + median_mags.append([source[f'calibrated_mags_{i}']]) + median_curve = np.median(median_mags, axis=0) / np.median(median_mags) # normalized median light curve + median_curves.append(median_curve) + curve = np.concatenate(median_curves) + med_curve_final = np.concatenate(curve) + p = np.arange(0, len(med_curve_final)) + plt.scatter(p, med_curve_final) + plt.show() + ####### + transient_candidates = [] + # plots light curves. + for source in sources: + x, y = SkyCoord.to_pixel(source['ra_dec'], wcs=data[0]['WCS']) + tick_locs = [] + if mode == 0: + MAGS = np.concatenate([source['calibrated_mags_{}'.format(i)] for i in runs if source['border'] == False]) + ERRS = np.concatenate([source['inst_mag_errs_{}'.format(i)] for i in runs if source['border'] == False]) + r = np.arange(0, len(MAGS), 1) + set_counter = 0 + for i, val in enumerate(MAGS): + if len(data) > 1 and i % len(data[set_counter]) == 0: # THIS WILL ONLY WORK IF ALL SETS HAVE 25 FRAMES. Need to make it dynamic in case different nights don't have the same number of images. + plt.errorbar(i, val, yerr=ERRS[i], linestyle='none', marker='o', markersize=4, c=colors_options[c][i]) + tick_locs.append(i) + set_counter += 1 + else: + plt.errorbar(i, val, yerr=ERRS[i], linestyle='none', marker='o', markersize=4,c=colors_options[c][i]) + if len(data) > 1: + plt.xticks(tick_locs, [data[i]['header']['DAY-OBS'] for i in runs], rotation=20) + + if mode == 1: + MAGS = [np.median(source['calibrated_mags_{}'.format(i)]) for i in runs if source['border'] == False] + ERRS = [np.median(source['inst_mag_errs_{}'.format(i)]) for i in runs if source['border'] == False] + r = np.arange(0, len(MAGS), 1) + for i, mag in enumerate(MAGS): + plt.errorbar(i, mag, yerr=ERRS[i], linestyle='none', marker='o', markersize=4, c=colors_options[r][i]) + tick_locs.append(i) + plt.xticks(tick_locs, [data[i]['header']['DAY-OBS'] for i in runs], rotation=20) + + Chis = [] + avg_mag = np.mean(MAGS) + for i, m in enumerate(MAGS): + chi_i = ((m - avg_mag) ** 2) / (ERRS[i] ** 2) + Chis.append(chi_i) + dof = len(MAGS) - 1 + chi_dof = np.sum(Chis) / dof + + # transient/variable detection: + dev = np.std(MAGS) + print(dev) + + plt.title("X, Y: {}, {}; RA_DEC: {}; CHI2: {}, ID: {}".format("%.2f" % x, "%.2f" % y, source['ra_dec'],"%.2f" % chi_dof, source['source_id'])) + plt.plot(r, np.ones(len(r)) * avg_mag, label="TRIPP AVG MAG", linestyle='--', color='b') + if source['ref_mag'] != None: + if source['ref_mag'] < 16: + plt.plot(r, np.ones(len(r)) * source['ref_mag'], linestyle='--', color='r', label="SDSS MAG [REF]") + if source['ref_mag'] >= 16: + plt.plot(r, np.ones(len(r)) * source['ref_mag'], linestyle='--', color='g', label="SDSS MAG [NOT REF]") + plt.ylabel("TRIPP Magnitude, SDSS-{}'".format(filter)) + plt.xlabel("Observation Date YYYY/MM/DD") + plt.legend() + #dynamic plot saving + plt.gca().invert_yaxis() + #print("Plotted curve") + plt.show() + + plt.title("Source Number: {}, Position: {}, {}".format(source['source_id'], "%.2f" % x, "%.2f" % y)) + circle0 = plt.Circle((x, y), source['rad'], color='r', fill=False) + circle1 = plt.Circle((x, y), source['rad'] + 5, color='b', fill=False) + circle2 = plt.Circle((x, y), source['rad'] + 5, color='g', fill=False) + ax = plt.gca() + ax.add_patch(circle0) + ax.add_patch(circle1) + ax.add_patch(circle2) + plt.imshow(data[0]['template'], cmap='gray', norm=LogNorm(vmin=1, vmax=200), origin='lower') + #dynamic plot saving here + #print("plotted location") + plt.show() + ###### + +@cli.cli.command("photometry") +@click.option('-d', '--files_dir', type=str, help="Specify path to directory of fits files.") +@click.option('-D', "--out_dir", type=str, help = "Specify path to directory for output files", required = False) +@click.option('-f', "--filter", type = str, default = 'r', help = "Specify imaging filter for data set using SDSS color scheme", required = True) +@click.option('-p', "--plot_mode", type=int, default = 0 ,help ="Specify plotting mode", required = False) +@click.option('-m', "--median_divide", type=bool, default = True, help ="Divides by a median light curve to reduce systemic trends from light curves", required = False) +#@cli.operator + +def photometry_cmd(files_dir, out_dir = "None", filter = 'r', plot_mode =0, median_divide = True): + """ + does the photometry + """ + return photometry(files_dir, out_dir, filter, plot_mode, median_divide) \ No newline at end of file diff --git a/sdi/ref.py b/sdi/ref.py index 62f3a67..f6e78e2 100644 --- a/sdi/ref.py +++ b/sdi/ref.py @@ -13,6 +13,7 @@ import astropy.units as u from astropy.io import fits from . import _cli as cli +import copy # define specific columns so we don't get dtype issues from the chaff COLUMNS = ["source_id", "ra", "ra_error", "dec", "dec_error", @@ -43,6 +44,11 @@ def ref(hduls, read_ext=-1, write_ext="REF"): Must include 'ra' and 'dec' fields. :param write_ext: the HDU extname to write reference information from. """ + + # Retrieve hduls from virtual memory + print(hduls) + hduls = [i for i in hduls] + # This import is here because the GAIA import slows down `import sdi` # substantially; we don't want to import it unless we need it from astroquery.gaia import Gaia @@ -58,6 +64,7 @@ def ref(hduls, read_ext=-1, write_ext="REF"): initial_empty = 0 # An adaptive method of obtaining the threshold value for hdul in hduls: + print(hdul[read_ext]) threshold = max(hdul[read_ext].data["a"])*hdul['ALGN'].header['PIXSCALE']/3600 threshold = u.Quantity(threshold, u.deg) ra = hdul[read_ext].data["RA"] @@ -75,7 +82,7 @@ def ref(hduls, read_ext=-1, write_ext="REF"): output_format="csv").get_results() data = data.as_array() # add the cache table to the data - if not cached_table: + if len(cached_table): cached_table = np.hstack((cached_table, data.data)) else: cached_table = data.data @@ -91,7 +98,7 @@ def ref(hduls, read_ext=-1, write_ext="REF"): # look through the cache to find a match if _in_cone(cs, coord, threshold): # if we find a match, copy it to the output table - if not output_table: + if len(output_table): output_table = np.hstack((output_table, np.copy(ct))) else: output_table = np.copy(ct) @@ -104,7 +111,7 @@ def ref(hduls, read_ext=-1, write_ext="REF"): ########### Add a blank if we didn't find anything ################# if not appended: - if not output_table: + if len(output_table): # If we do not find one cached, then add a blank blank = np.empty(shape=0, dtype=output_table.dtype) output_table = np.hstack((output_table, blank)) @@ -120,7 +127,7 @@ def ref(hduls, read_ext=-1, write_ext="REF"): if np.isnan(val): elm[j] = 0.0 # only append the hdul if output_table is not empty - if not output_table: + if len(output_table): hdul.append(fits.BinTableHDU(data=output_table, header=header, name=extname)) else: warnings.warn(f"empty reference table created, no stars found in the database corresponding to {hdul}") diff --git a/sdi/subtract.py b/sdi/subtract.py index 5912cc8..a613077 100644 --- a/sdi/subtract.py +++ b/sdi/subtract.py @@ -1,15 +1,9 @@ import click import ois -from astropy.io import fits #sfft specific from astropy.io.fits import CompImageHDU from . import _cli as cli from .combine import combine -import os #sfft specific -from sfft.EasyCrowdedPacket import Easy_CrowdedPacket #sfft specific -from sfft.CustomizedPacket import Customized_Packet #sfft specific -from sfft.EasySparsePacket import Easy_SparsePacket #sfft specific import time -import numpy as np #sfft specific import sys def subtract(hduls, name="ALGN", method: ("sfft", "ois", "numpy")="sfft"): @@ -22,62 +16,7 @@ def subtract(hduls, name="ALGN", method: ("sfft", "ois", "numpy")="sfft"): """ hduls = [h for h in hduls] outputs = [] - if method == "sfft": - print(" ") - print("Writing Temporary Fits Files") - start = time.perf_counter() - temp_image_fits_filenames = [] - venv_file_path = sys.prefix - for i, hdu in enumerate(hduls): #Write the science hduls into temporary fits files - temp_filename = venv_file_path + "/sdi_pipeline/sdi/temp_image{}.fits".format(i) - temp_data = hdu[name].data - primary = fits.PrimaryHDU(temp_data) - primary.writeto(temp_filename, overwrite = True) - temp_image_fits_filenames.append(temp_filename) - temp_ref_path = venv_file_path + "/sdi_pipeline/temp_ref.fits" - template_fits = combine(hduls, name) # Create the reference image - template_fits.writeto(temp_ref_path, overwrite = True) # Write the reference image into temporary fits file - - stop = time.perf_counter() - print("Writing Complete") - print("Time Elapsed: {} sec".format(round(stop-start, 4))) - - # Check file size of temporary fits files on disc - size = 0 - for i, fits_name in enumerate(temp_image_fits_filenames): - size += os.path.getsize(fits_name) - size += os.path.getsize(temp_ref_path) - print("Total size of temporary files is {} mb".format(size/1024**2)) - outputs = [] - - #Subtract - start = time.perf_counter() - print("Method = sfft") #TODO Incorperate input masks for when we take real data - for i, fits_name in enumerate(temp_image_fits_filenames): - sol, diff = Customized_Packet.CP(FITS_REF = temp_ref_path, FITS_SCI = fits_name, - FITS_mREF = temp_ref_path, FITS_mSCI = fits_name, - ForceConv = "REF", GKerHW = 4, BGPolyOrder = 2, KerPolyOrder = 2) - - if np.isnan(np.sum(diff)) == True: - raise ValueError("Residual contains NaN") - else: - hduls[i].insert(1,CompImageHDU(data = diff, header = hduls[i][name].header, name = "SUB")) - outputs.append(hduls[i]) - stop = time.perf_counter() - print("Subtraction Complete") - print("Time Elapsed: {} sec".format(round(stop-start, 4))) - print("Removing Temporary Fits Files") - for i, fits_name in enumerate(temp_image_fits_filenames): #Remove Temporary Fits files from disc - if os.path.exists(fits_name): - os.remove(fits_name) - else: - print("{} does not exist".format(fits_name)) - if os.path.exists(temp_ref_path): - os.remove(temp_ref_path) - else: - print("temp_ref.fits does not exist") - print("Removal Complete") - elif method == "ois": + if method == "ois": print("Method = OIS") template = combine(hduls, name) for i,hdu in enumerate(hduls): @@ -101,11 +40,11 @@ def subtract(hduls, name="ALGN", method: ("sfft", "ois", "numpy")="sfft"): @cli.cli.command("subtract") @click.option("-n", "--name", default="ALGN", help="The HDU to be aligned.") -@click.option("-m", "--method", default="sfft", help="The subtraction method to use; ois or numpy (straight subtraction) or sfft (GPU accelerated). sfft-sparse and sfft-croweded are in development") +@click.option("-m", "--method", default="ois", help="The subtraction method to use; ois or numpy (straight subtraction) or sfft (GPU accelerated). sfft-sparse and sfft-croweded are in development") @cli.operator ## subtract function wrapper -def subtract_cmd(hduls,name="ALGN", method="sfft"): +def subtract_cmd(hduls, name="ALGN", method="ois"): """ Returns differences of a set of images from a template image\n Arguments:\n diff --git a/setup.py b/setup.py index c3e40b9..7493180 100644 --- a/setup.py +++ b/setup.py @@ -24,7 +24,7 @@ py_modules=["sdi"], # packages=find_packages(include=["openfits"]), include_package_data=True, - install_requires=["cupy-cuda11x", "sfft", "click", "astropy", "photutils", "ois", "astroalign", "astroquery", "scikit-learn"], + install_requires=["astropy", "photutils", "ois", "astroalign", "astroquery", "scikit-learn"], entry_points=""" [console_scripts] sdi=sdi._cli:cli