Skip to content
Snippets Groups Projects
Commit 53f9fc90 authored by David Rafferty's avatar David Rafferty
Browse files

First commit

parents
Branches
Tags
No related merge requests found
Showing
with 1389 additions and 0 deletions
SMTool: the LOFAR Sky Model Tool
================================
Authors:
* David Rafferty, from contributed scripts from
* Björn Adebahr
* Francesco de Gasperin
* Reinout van Weeren
Contents:
* __doc/__: documentation
* __tests/__: contains test sky models and scripts useful for validation
* __lsmtool/__: contains the main LSMTool scripts
* __lsmtool/operations/__: contains the modules for operations
* __parsets/__: some example parsets
File added
# -*- coding: utf-8 -*-
#
# Defines the custom logger
import logging
def add_coloring_to_emit_ansi(fn):
"""
colorize the logging output
"""
# add methods we need to the class
def new(*args):
levelno = args[1].levelno
if(levelno>=50):
color = '\x1b[31m' # red
elif(levelno>=40):
color = '\x1b[31m' # red
elif(levelno>=30):
color = '\x1b[33m' # yellow
elif(levelno>=20):
color = '\x1b[32m' # green
elif(levelno>=10):
color = '\x1b[35m' # pink
else:
color = '\x1b[0m' # normal
args[1].msg = color + args[1].msg + '\x1b[0m' # normal
return fn(*args)
return new
# set the logging colors
logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
# set the logging format and default level (info)
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
def setLevel(level):
"""
Change verbosity
"""
if level == 'warning':
logging.root.setLevel(logging.WARNING)
elif level == 'info':
logging.root.setLevel(logging.INFO)
elif level == 'debug':
logging.root.setLevel(logging.DEBUG)
File added
# -*- coding: utf-8 -*-
#
#This module stores the version and changelog
# Version number
__version__ = '1.0.0'
# Change log
def changelog():
"""
LSMTool Changelog.
-----------------------------------------------
2014/05/25 - Version 1.0.0 (initial release)
"""
pass
File added
import os
import glob
__all__ = [ os.path.basename(f)[:-3] for f in glob.glob(os.path.dirname(__file__)+"/*.py") if not f.endswith('__init__.py') and not f.split('/')[-1].startswith('_')]
for x in __all__: __import__(x, locals(), globals())
File added
# -*- coding: utf-8 -*-
#
# Defines cluster functions used by the group operation
#
# Copyright (C) 2013 - Reinout van Weeren
# Copyright (C) 2013 - Francesco de Gasperin
# Modified by David Rafferty as required for integration into LSMTool
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, sys
import numpy as np
import pylab as pl
import itertools
try:
import pyrap.tables as pt
import lofar.stationresponse as lsr
except ImportError:
pass
class Patch():
def __init__(self, name, ra, dec, flux):
self.name = name
self.ra = ra
self.dec = dec
self.flux = flux
class Cluster():
def __init__(self, name, patch):
self.name = name
self.init_patch = patch
self.patches = []
self.update_cluster_coords()
def add_patch(self, patch):
"""
Add a patch to this cluster
"""
self.patches.append(patch)
def total_cluster_flux(self):
"""
Return total cluster flux
"""
cluster_flux = self.init_patch.flux
for patch in self.patches:
cluster_flux += patch.flux
return cluster_flux
def update_cluster_coords(self):
"""
update the self.centroid_ra, self.centroid_dec, self.mid_ra, self.mid_dec
"""
self.centroid_ra = self.init_patch.ra*self.init_patch.flux
self.centroid_dec = self.init_patch.dec*self.init_patch.flux
min_ra = np.inf
max_ra = -np.inf
min_dec = np.inf
max_dec = -np.inf
for patch in self.patches:
self.centroid_ra += patch.ra*patch.flux
self.centroid_dec += patch.dec*patch.flux
if patch.ra < min_ra: min_ra = patch.ra
if patch.ra > max_ra: max_ra = patch.ra
if patch.dec < min_dec: min_dec = patch.dec
if patch.dec > max_dec: max_dec = patch.dec
self.centroid_ra /= self.total_cluster_flux()
self.centroid_dec /= self.total_cluster_flux()
self.mid_ra = min_ra + (max_ra - min_ra)/2.
self.mid_dec = min_dec + (max_dec - min_dec)/2.
def ratohms_string(ra):
rah, ram, ras = ratohms(ra)
return str(rah) + ':' + str(ram) + ':' +str(round(ras,2))
def dectodms_string(dec):
decd, decm, decs = dectodms(dec)
return str(decd) + '.' + str(decm) + '.' +str(round(decs,2))
def compute_patch_center(data, beam_ms = None):
"""
Return the patches names, central (weighted) RA and DEC and total flux
"""
patch_names = np.unique(data['Name'])
patches = []
# get the average time of the obs, OK for 1st order correction
if beam_ms != None:
t = pt.table(beam_ms, ack=False)
tt = t.query('ANTENNA1==0 AND ANTENNA2==1', columns='TIME')
time = tt.getcol("TIME")
time = min(time) + ( max(time) - min(time) ) / 2.
t.close()
sr = lsr.stationresponse(beam_ms, False, True)
for patch_name in patch_names:
comp_ids = np.where(data['Name'] == patch_name)[0]
patch_ra = 0.
patch_dec = 0.
weights_ra = 0.
weights_dec = 0.
patch_flux = 0.
for comp_id in comp_ids:
comp_ra = data['RA'][comp_id]
comp_dec = data['Dec'][comp_id]
comp_flux = np.float(data['I'][comp_id])
# beam correction
if beam_ms != None:
sr.setDirection(comp_ra*np.pi/180.,comp_dec*np.pi/180.)
# use station 0 to compute the beam and get mid channel
beam = sr.evaluateStation(time,0)
r = abs(beam[int(len(beam)/2.)])
beam = ( r[0][0] + r[1][1] ) / 2.
#print "Beam:", beam,
comp_flux *= beam
# calculate the average weighted patch center, and patch flux
patch_flux += comp_flux
patch_ra += comp_ra * comp_flux
patch_dec += comp_dec * comp_flux
weights_ra += comp_flux
weights_dec += comp_flux
patches.append(Patch(patch_name, patch_ra/weights_ra, patch_dec/weights_dec, patch_flux))
return patches
def angsep2(ra1, dec1, ra2, dec2):
"""Returns angular separation between two coordinates (all in degrees)"""
from astropy.coordinates import FK5
import astropy.units as u
coord1 = FK5(ra1, dec1, unit=(u.degree, u.degree))
coord2 = FK5(ra2, dec2, unit=(u.degree, u.degree))
return coord1.separation(coord2)
def create_clusters(patches_orig, Q):
"""
Clusterize all the patches of the skymodel iteratively around the brightest patches
"""
# sort the patches by brightest first
idx = np.argsort([patch.flux for patch in patches_orig])[::-1] # -1 to reverse sort
patches = list(np.array(patches_orig)[idx])
# initialize clusters with the brightest patches
clusters = []
for i, patch in enumerate(patches[0:Q]):
clusters.append(Cluster('CLUSTER_'+str(i), patch))
# Iterate until no changes in which patch belongs to which cluster
count = 1
patches_seq_old = []
while True:
print 'Iteration', count
for cluster in clusters:
cluster.update_cluster_coords()
# reset patches
cluster.patches = []
# select patches after the first Q
for patch in patches[Q:]:
# finding closest cluster for each patch
angulardistance = np.inf
for cluster in clusters:
adis = abs(angsep2(cluster.centroid_ra, cluster.centroid_dec, patch.ra, patch.dec))
if adis < angulardistance:
angulardistance = adis
closest_cluster = cluster
# add this patch to the closest cluster
closest_cluster.add_patch(patch)
patches_seq = []
for cluster in clusters:
patches_seq.extend(cluster.patches)
count += 1
if patches_seq == patches_seq_old: break
patches_seq_old = patches_seq
# Make output patch column
patchNames = [''] * len(patches)
patchNames_orig = [p.name for p in patches_orig]
for c in clusters:
for p in c.patches:
patchNames[patchNames_orig.index(p.name)] = c.name
return np.array(patchNames)
File added
# -*- coding: utf-8 -*-
#
# Defines filter functions used by the remove and select operations
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import logging
from .. import tableio
def filter(LSM, filterExpression, exclusive=False, aggregate=False, weight=False,
beamMS=None, useRegEx=False):
"""
Filters the sky model, keeping all sources that meet the given expression.
After filtering, the sky model contains only those sources for which the
given filter expression is true.
Parameters
----------
filterExpression : str or dict
A string specifying the filter expression in the form:
'<property> <operator> <value> [<units>]'
(e.g., 'I <= 10.5 Jy'). These elements can also be given as a
dictionary in the form:
{'filterProp':property, 'filterOper':operator,
'filterVal':value, 'filterUnits':units}
or as a list:
[property, operator, value, value]
The property to filter on must be a valid column name or the filename
of a mask image.
Supported operators are:
- !=
- <=
- >=
- >
- <
- = (or '==')
Units are optional and must be specified as required by astropy.units.
exclusive : bool, optional
If False, sources that meet the filter expression are kept. If True,
sources that do not meet the filter expression are kept.
aggregate : bool, optional
If True, values are aggregated by patch before filtering. There,
filtering will be done by patch.
weight : bool, optional
If True, aggregated values will be calculated when appropriate using
the Stokes I fluxes of sources in each patch as weights
beamMS : string, optional
Measurement set from which the primary beam will be estimated. Either
beamMS or beamFWHM and position must be specified for ApparentFlux
filters.
useRegEx : bool, optional
If True, string matching will use regular expression matching. If
False, string matching uses Unix filename matching.
Examples
--------
Filter on column 'I' (Stokes I flux). This filter will select all sources
with Stokes I flux greater than 1.5 Jy::
>>> s = SkyModel('sky.model')
>>> s.filter('I > 1.5 Jy')
INFO: Filtered out 1102 sources.
If the sky model has patches and the filter is desired per patch, use
``aggregate = True``::
>>> s.filter('I > 1.5 Jy', aggregate=True)
Filter on source names, keeping those that match "src*_1?"::
>>> s.filter('Name == src*_1?')
Use a CASA clean mask image to keep sources that lie in masked regions::
>>> s.filter('clean_mask.mask == True')
"""
import numpy as np
import math as m
import os
if filterExpression is None:
logging.error('Please specify a filter expression.')
return 1
if type(filterExpression) is list:
filterProp, filterOperStr, filterVal, filterUnits = filterExpression
filterOper, f = convertOperStr(filterOperStr)
elif type(filterExpression) is dict:
if ('filterProp' in filterExpression.keys() and
'filterOper' in filterExpression.keys() and
'filterVal' in filterExpression.keys()):
filterProp = filterExpression['filterProp']
filterOperStr = filterExpression['filterOper']
filterOper, f = convertOperStr(filterOperStr)
filterVal = filterExpression['filterVal']
else:
logging.error("Please specify filter dictionary as "
"{'filterProp':property, 'filterOper':operator, "
"'filterVal':value, 'filterUnits':units}")
return 1
if 'filterUnits' in filterExpression.keys():
filterUnits = filterExpression['filterUnits']
else:
filterUnits = None
elif type(filterExpression) is str:
# Parse the filter expression
filterProp, filterOper, filterVal, filterUnits = parseFilter(filterExpression)
else:
return 1
# Get the column values to filter on
if LSM._verifyColName(filterProp) in LSM.table.colnames:
filterProp = LSM._verifyColName(filterProp)
colVals = LSM.getColValues(filterProp, units=filterUnits,
aggregate=aggregate, weight=weight)
if beamMS is not None and filterProp == 'I':
RADeg = LSM.getColValues('RA')
DecDeg = LSM.getColValues('Dec')
colVals = applyBeam(beamMS, colVals, RADeg, DecDeg)
else:
# Assume filterProp is a mask filename and try to load mask
if os.path.exists(fileName):
RARad = LSM.getColValues('RA', units='radian')
DecRad = LSM.getColValues('Dec', units='radian')
colVals = getMaskValues(mask, RARad, DecRad)
if colVals is None:
return 1
else:
return 1
# Do the filtering
if colVals is None:
return 1
filt = getFilterIndices(colVals, filterOper, filterVal, useRegEx=useRegEx)
if exclusive:
filt = [i for i in range(len(colVals)) if i not in filt]
if len(filt) == 0:
logging.error('Filter would result in an empty sky model.')
return 1
if len(filt) == len(colVals):
logging.info('Filtered out 0 sources.')
return 0
if LSM._hasPatches and aggregate:
sourcesToKeep = LSM.getColValues('Patch', aggregate=True)[filt]
def filterByName(tab, key_colnames):
if tab['Patch'][0] in sourcesToKeep:
return True
else:
return False
nPatchesOrig = len(LSM.table.groups)
LSM.table = LSM.table.groups.filter(filterByName) # filter
LSM.table = LSM.table.group_by('Patch') # regroup
nPatchesNew = len(LSM.table.groups)
if exclusive:
logging.info('Removed {0} patches.'.format(nPatchesOrig-nPatchesNew))
else:
logging.info('Kept {0} patches.'.format(nPatchesNew))
else:
nRowsOrig = len(LSM.table)
LSM.table = LSM.table[filt]
nRowsNew = len(LSM.table)
if LSM._hasPatches:
LSM.table = LSM.table.group_by('Patch') # regroup
if exclusive:
logging.info('Removed {0} sources.'.format(nRowsOrig-nRowsNew))
else:
logging.info('Kept {0} sources.'.format(nRowsNew))
return LSM
def parseFilter(filterExpression):
"""
Takes a filter expression and returns tuple of
(property, operation, val, units), all as strings
"""
# Get operator function
filterOper, filterOperStr = convertOperStr(filterExpression)
if filterOper is None:
return (None, None, None, None)
filterParts = filterExpression.split(filterOperStr)
if len(filterParts) != 2:
logging.error("Filter expression must be of the form '<property> "
"<operator> <value> <unit>'\nE.g., 'Flux >= 10 Jy'")
return (None, None, None, None)
# Get the column to filter on
filterProp = filterParts[0].strip().lower()
if filterProp not in tableio.allowedColumnNames:
logging.error('Column name "{0}" is not a valid column.'.format(colName))
return (None, None, None, None)
# Get the filter value(s)
filterValAndUnits = filterParts[1].strip()
if tableio.allowedColumnDefaults[filterProp] == 'N/A':
# Column values are strings. Allow only '==' and '!=' operators
if filterOperStr not in ['=', '==', '!=']:
logging.error("Filter operator '{0}' not allow with string columns. "
"Supported operators are '!=' or '=' (or '==')".format(filterOperStr))
return (None, None, None, None)
# Check for a list of values
if '[' in filterValAndUnits and ']' in filterValAndUnits:
filterVal = filterValAndUnits.split(']')[0].strip('[')
filterValParts = filterVal.split(',')
filterVal = []
for val in filterValParts:
val = val.strip()
val = val.strip('"')
val = val.strip("'")
filterVal.append(val)
else:
filterVal = filterValAndUnits.split(' ')[0].strip()
filterVal = filterVal.strip('"')
filterVal = filterVal.strip("'")
else:
# The column to filter is type float
try:
filterVal = float(filterValAndUnits.split(' ')[0].strip())
except ValueError:
logging.error('Filter value not understood. Make sure the value is '
'separated from the units (if any)')
return (None, None, None, None)
# Try and get the units
try:
filterUnits = filterValAndUnits.split(']')
if len(filterUnits) == 1:
filterUnits = filterUnits[0].split(' ')[1].strip()
else:
filterUnits = filterUnits[1].strip()
except IndexError:
filterUnits = None
if filterUnits == '':
filterUnits = None
if type(filterVal) is str and filterUnits is not None:
logging.error('Units are not allowed with string columns.')
return (None, None, None, None)
return (filterProp, filterOper, filterVal, filterUnits)
def convertOperStr(operStr):
"""
Returns operator function corresponding to string.
"""
import operator as op
filterOperStr = None
ops = {'!=':op.ne, '<=':op.le, '>=':op.ge, '>':op.gt, '<':op.lt,
'==':op.eq, '=':op.eq}
for op in ops:
if op in operStr:
if filterOperStr is None:
filterOperStr = op
elif len(op) > len(filterOperStr):
# Pick longer match
filterOperStr = op
if filterOperStr is None:
logging.error("Filter operator '{0}' not understood. Supported "
"operators are '!=', '<=', '>=', '>', '<', '=' (or '==')".
format(operStr))
return None, None
return ops[filterOperStr], filterOperStr
def getFilterIndices(colVals, filterOper, filterVal, useRegEx=False):
"""
Returns the indices that correspond to the input filter expression
"""
import operator as op
import fnmatch
import re
if type(filterVal) is not list:
filterVal = [filterVal]
filterInd = []
for val in filterVal:
if type(val) is str:
# String -> use regular expression or Unix filename matching search
if filterOper is op.eq:
if useRegEx:
filt = [i for i, item in enumerate(colVals) if re.search(val, item) is not None]
else:
filt = [i for i, item in enumerate(colVals) if fnmatch.fnmatch(item, val)]
elif filterOper is op.ne:
if useRegEx:
filt = [i for i, item in enumerate(colVals) if re.search(val, item) is None]
else:
filt = [i for i, item in enumerate(colVals) if not fnmatch.fnmatch(item, val)]
else:
logging.error("Filter operator '{0}' not allow with string columns. "
"Supported operators are '!=' or '=' (or '==')".format(filterOper))
return None
else:
filtBool = filterOper(colVals, val)
filt = [f for f in range(len(colVals)) if filtBool[f]]
filterInd += filt
return filterInd
def getMaskValues(mask, RARad, DecRad):
"""
Returns an array of mask values for each (RA, Dec) pair in radians
"""
import math
import pyrap
try:
maskdata = pyrap.images.image(mask)
maskval = maskdata.getdata()[0][0]
except:
loggin.error("Error opening mask file '{0}'".format(mask))
return None
vals = []
for raRad, decRad in zip(RARad, DecRad):
(a, b, _, _) = maskdata.toworld([0, 0, 0, 0])
(_, _, pixY, pixX) = maskdata.topixel([a, b, decRad, raRad])
try:
# != is a XOR for booleans
if (not maskval[math.floor(pixY)][math.floor(pixX)]) != False:
vals.append(True)
else:
vals.append(False)
except:
vals.append(False)
return np.array(vals)
def applyBeam(beamMS, fluxes, RADeg, DecDeg):
"""
Returns flux attenuated by primary beam.
"""
try:
import pyrap.tables as pt
except ImportError:
logger.error('Could not import pyrap.tables')
try:
import lofar.stationresponse as lsr
except ImportError:
logger.error('Could not import lofar.stationresponse')
t = pt.table(beamMS, ack=False)
tt = t.query('ANTENNA1==0 AND ANTENNA2==1', columns='TIME')
time = tt.getcol("TIME")
time = min(time) + ( max(time) - min(time) ) / 2.
t.close()
attFluxes = []
sr = lsr.stationresponse(beamMS, False, True)
for flux, RA, Dec in zip(fluxes, RADeg, DecDeg):
# Use station 0 to compute the beam and get mid channel
sr.setDirection(RA*np.pi/180., Dec*np.pi/180.)
beam = sr.evaluateStation(time, 0)
r = abs(beam[int(len(beam)/2.)])
beam = ( r[0][0] + r[1][1] ) / 2.
attFluxes.append(flux * beam)
return np.array(attFluxes)
File added
# -*- coding: utf-8 -*-
#
# Defines the tessellate functions used by the group operation
#
# Based on the IDL version of the Voronoi binning method of:
# Cappellari M., Copin Y., 2003, MNRAS, 342, 345
# Modified by David Rafferty as required for integration into LSMTool
#
# This software is provided as is without any warranty whatsoever.
# Permission to use, copy and distribute unmodified copies for
# non-commercial purposes is granted. Permission to modify for
# personal or internal use is granted, provided this copyright
# and disclaimer are included unchanged at the beginning of
# the file. All other rights are reserved.
import numpy as np
from numpy import sum, sqrt, min, max, any
from numpy import argmax, argmin, mean, abs
from numpy import int32 as Nint
from numpy import float32 as Nfloat
import copy
def get_skymodel(skymodel_fn):
"""Returns x, y, flux arrays for input skymodel
"""
import math as m
try:
from astropy import WCS
except ImportError, err:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from pywcs import WCS
y = []
x = []
fluxes = []
# Make wcs object to handle transformation from ra and dec to pixel coords.
w = WCS(naxis=2)
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = np.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set_pv([(2, 1, 45.0)])
arcsec_per_pix = abs(w.wcs.cdelt[0]) * 3600.0 # arcsec/pixel
for line in open(skymodel_fn):
sline=line.split(',')
if line.startswith("FORMAT"): continue
if line[0] == '#': continue
if line[0] == '\n': continue
name = str(sline[0])
srctype = str(sline[1])
ra_src = str(sline[2]).split(':')
ra_deg = float(ra_src[0])*15.0 + (float(ra_src[1])/60.0)*15.0 + (float(ra_src[2])
/3600.0)*15.0
dec_src = str(sline[3]).split('.')
if len(dec_src) == 3:
dec_deg = float(dec_src[0]) + (float(dec_src[1])/60.0) + (float(dec_src[2])/3600.0)
else:
dec_deg = float(dec_src[0]) + (float(dec_src[1])/60.0) + (float(dec_src[2]
+ '.' + dec_src[3])/3600.0)
flux = str(sline[4])
fluxes.append(np.float(flux))
ra_dec = np.array([[ra_deg, dec_deg]])
try:
x.append(w.wcs_world2pix(ra_dec, 1)[0][0])
y.append(w.wcs_world2pix(ra_dec, 1)[0][1])
except AttributeError:
x.append(w.wcs_sky2pix(ra_dec, 1)[0][0])
y.append(w.wcs_sky2pix(ra_dec, 1)[0][1])
minx = np.min(x)
miny = np.min(y)
x += abs(minx)
y += abs(miny)
return x, y, np.array(fluxes), arcsec_per_pix
def dist2(x1,y1,x2,y2, scale=1.0) :
return ((x1-x2)**2 + (y1-y2)**2)/scale**2
def guess_regular_grid(xnodes, ynodes, pixelsize=None) :
"""
Return a regular grid guessed on an irregular one (Voronoi)
xnodes, ynodes: arrays of Voronoi bins
Return: xunb, yunb = regular grid for x and y (unbinned)
"""
## First deriving a pixel size
xn_rav, yn_rav = xnodes.ravel(), ynodes.ravel()
if pixelsize == None :
pixelsize = derive_pixelsize(xnodes, ynodes)
minxn = np.int(np.min(xn_rav) / pixelsize) * pixelsize
minyn = np.int(np.min(yn_rav) / pixelsize) * pixelsize
xunb, yunb = np.meshgrid(np.arange(minxn, np.max(xn_rav)+pixelsize, pixelsize),
np.arange(minyn, np.max(yn_rav)+pixelsize, pixelsize))
return xunb, yunb
def derive_unbinned_field(xnodes, ynodes, data, xunb=None, yunb=None) :
"""
Provide an array of the same shape as the input xunb, and yunb
with the values derived from the Voronoi binned data
xnodes, ynodes: 2 arrays providing the nodes from the binning
data : values for each node
xunb, yunb: x and y coordinates of the unbinned data
if not provided (default) they will be guessed from the nodes
Return: xunb, yunb, and unbinned_data arrays with the same shape as xunb,
"""
if xunb == None :
xunb, yunb = guess_regular_grid(xnodes, ynodes)
x_rav, y_rav = xunb.ravel(), yunb.ravel()
xnodes_rav, ynodes_rav = xnodes.ravel(), ynodes.ravel()
data_rav = data.ravel()
unbinned_data = np.zeros_like(x_rav)
for i in xrange(len(x_rav)) :
indclosestBin = argmin(dist2(x_rav[i], y_rav[i], xnodes_rav, ynodes_rav))
unbinned_data[i] = data_rav[indclosestBin]
return xunb, yunb, unbinned_data.reshape(xunb.shape)
listmethods = ["voronoi", "quadtree"]
class bin2D:
"""
Class for Voronoi binning of a set of x and y coordinates
using given data and potential associated noise
"""
def __init__(self, xin, yin, data, target_flux=1.0, pixelsize=None,
method="Voronoi", cvt=1, wvt=0):
self.xin = xin.ravel()
self.yin = yin.ravel()
self.data = data.ravel()
## Sort all pixels by their distance to the maximum flux
if pixelsize == None :
self.pixelsize = derive_pixelsize(self.xin, self.yin, verbose=1)
else :
self.pixelsize = pixelsize
self.target_flux = target_flux
## Binning method and options
self.method = str.lower(method)
self.cvt = cvt
self.wvt = wvt
self.scale = 1.0
self._check_input()
def _check_input(self):
"""
Check consistency of input data
"""
# Basic checks of the data
# First about the dimensions of the datasets
self.npix = len(self.xin)
### Accrete the bins ============================================
def bin2d_accretion(self, verbose=0):
""" Accrete the bins according to their flux
"""
## Status holds the bin number when assigned
self.status = np.zeros(self.npix, dtype=Nint)
## Good is 1 if the bin was accepted
self.good = np.zeros(self.xin.size, dtype=Nint)
## Start with the max flux in the data
currentBin = [argmax(self.data)]
for ind in range(1,self.npix+1): ## Running over the index of the Voronoi BIN
## Only one pixel at this stage
current_flux = sum(self.data[currentBin])
if verbose : print "Bin %d"%(ind)
self.status[currentBin] = ind # only one pixel at this stage
## Barycentric centroid for 1 pixel...
xbar, ybar = self.xin[currentBin], self.yin[currentBin]
## Indices of remaining unbinned data
unBinned = np.where(self.status == 0)[0]
##+++++++++++++++++++++++++++++++++++++++++++++++++
## STOP THE WHILE Loop if all pixels are now binned
while len(unBinned) > 0 :
##+++++++++++++++++++++++++++++++++++++++++++++++++
## Coordinates of the Remaining unbinned pixels
xunBin = self.xin[unBinned]
yunBin = self.yin[unBinned]
## Location of current bin
xcurrent = self.xin[currentBin]
ycurrent = self.yin[currentBin]
## Closest unbinned pixel to the centroid making sure the highest flux is used
indclosestBar = argmin(dist2(xbar,ybar, xunBin, yunBin))
xclosest = xunBin[indclosestBar]
yclosest = yunBin[indclosestBar]
## Distance between this closest pixel and the current pixel
currentSqrtDist= sqrt(min(dist2(xclosest,yclosest, xcurrent,ycurrent)))
## Add new pixel
possibleBin = currentBin + [unBinned[indclosestBar]]
## Transfer new flux to current value
old_flux = current_flux
current_flux = sum(self.data[possibleBin])
## Test if getting better for flux
if (abs(current_flux-self.target_flux) > abs(old_flux - self.target_flux)):
if (old_flux > 0.8 * self.target_flux):
self.good[currentBin] = 1
break
##++++++++++++++++++++++++++++++++++++++++++
## If the new Bin is ok we associate the number of the bin to that one
self.status[unBinned[indclosestBar]] = ind
## ... and we use that now as the current Bin
currentBin = possibleBin
## And update the values
## First the centroid
xbar, ybar = mean(self.xin[currentBin]), mean(self.yin[currentBin])
## New set of unbinned pixels
unBinned = np.where(self.status == 0)[0]
## ----- End of While Loop --------------------------------------------
## Unbinned pixels
unBinned = np.where(self.status == 0)[0]
## Break if all pixels are binned
if len(unBinned) == 0 :
break
## When the while loop is finished for this new BIN
## Find the centroid of all Binned pixels
Binned = np.where(self.status != 0)[0]
xbar, ybar = mean(self.xin[Binned]), mean(self.yin[Binned])
## Closest unbinned pixel to the centroid of all Binned pixels
xunBin = self.xin[unBinned]
yunBin = self.yin[unBinned]
indclosestBar = argmin(dist2(xbar,ybar, xunBin, yunBin))
## Take now this closest pixel as the next one to look at
currentBin = [unBinned[indclosestBar]]
## Set to zero all bins that did not reach the target flux
self.status *= self.good
### Compute centroid of bins ======================================
def bin2d_centroid(self, verbose=0):
## Compute the area for each node
self.Areanode, self.bins = np.histogram(self.status, bins=np.arange(np.max(self.status+0.5))+0.5)
## Select the ones which have at least one bin
self.indgoodbins = np.where(self.Areanode > 0)[0]
self.Areanode = self.Areanode[self.indgoodbins]
ngoodbins = self.indgoodbins.size
## Reset the xnode, ynode, SNnode, and statusnode (which provides the number for the node)
self.xnode = np.zeros(ngoodbins, Nfloat)
self.ynode = np.zeros_like(self.xnode)
self.flux_node = np.zeros_like(self.xnode)
self.statusnode = np.zeros_like(self.xnode)
self.listbins = []
for i in range(ngoodbins) :
## indgoodbins[i] provides the bin of the Areanode, so indgoodbins[i] + 1 is the status number
self.statusnode[i] = self.indgoodbins[i]+1
## List of bins which have statusnode as a status
listbins = np.where(self.status==self.statusnode[i])[0]
## Centroid of the node
self.xnode[i], self.ynode[i] = mean(self.xin[listbins]), mean(self.yin[listbins])
self.flux_node[i] = sum(self.data[listbins])
self.listbins.append(listbins)
### Compute WEIGHTED centroid of bins ======================================
def bin2d_weighted_centroid(self, weight=None, verbose=0):
if weight != None : self.weight = weight
self.Areanode, self.bins = np.histogram(self.status, bins=np.arange(np.max(self.status+0.5))+0.5)
## Select the ones which have at least one bin
self.indgoodbins = np.where(self.Areanode > 0)[0]
self.Areanode = self.Areanode[self.indgoodbins]
ngoodbins = self.indgoodbins.size
## Reset the xnode, ynode, SNnode, and statusnode (which provides the number for the node)
self.xnode = np.zeros(ngoodbins, Nfloat)
self.ynode = np.zeros_like(self.xnode)
self.flux_node = np.zeros_like(self.xnode)
self.statusnode = np.zeros_like(self.xnode)
self.listbins = []
for i in range(ngoodbins) :
## indgoodbins[i] provides the bin of the Areanode, so indgoodbins[i] + 1 is the status number
self.statusnode[i] = self.indgoodbins[i]+1
## List of bins which have statusnode as a status
listbins = np.where(self.status==self.statusnode[i])[0]
## Weighted centroid of the node
self.xnode[i], self.ynode[i] = np.average(self.xin[listbins], weights=self.weight[listbins]), np.average(self.yin[listbins], weights=self.weight[listbins])
self.flux_node[i] = sum(self.data[listbins])
self.listbins.append(listbins)
### Assign bins ============================================
def bin2d_assign_bins(self, sel_pixels=None, scale=None, verbose=0) :
"""
Assign the bins when the nodes are derived With Scaling factor
"""
if scale != None: self.scale = scale
if sel_pixels == None : sel_pixels = range(self.xin.size)
for i in sel_pixels :
minind = argmin(dist2(self.xin[i], self.yin[i], self.xnode, self.ynode, scale=self.scale))
self.status[i] = self.statusnode[minind]
if verbose :
print "Pixel ", self.status[i], self.xin[i], self.yin[i], self.xnode[minind], self.ynode[minind]
## reDerive the centroid
self.bin2d_centroid()
### Do CV tesselation ============================================
def bin2d_cvt_equal_mass(self, wvt=None, verbose=1) :
"""
Produce a CV Tesselation
wvt: default is None (will use preset value, see self.wvt)
"""
## Reset the status and statusnode for all nodes
self.status = np.zeros(self.npix, dtype=Nint)
self.statusnode = np.arange(self.xnode.size) + 1
if wvt != None : self.wvt = wvt
if self.wvt : self.weight = np.ones_like(self.data)
else : self.weight = self.data**4
self.scale = 1.0
self.niter = 0
## WHILE LOOP: stop when the nodes do not move anymore ============
Oldxnode, Oldynode = copy.copy(self.xnode[-1]), copy.copy(self.ynode[-1])
while (not np.array_equiv(self.xnode, Oldxnode)) | (not np.array_equiv(self.ynode, Oldynode)):
Oldxnode, Oldynode = copy.copy(self.xnode), copy.copy(self.ynode)
## Assign the closest centroid to each bin
self.bin2d_assign_bins()
## New nodes weighted centroids
self.bin2d_weighted_centroid()
## Eq. (4) of Diehl & Statler (2006)
if self.wvt : self.scale = sqrt(self.Areanode/self.flux_node)
self.niter += 1
### Do Voronoi binning ============================================
def bin_voronoi(self, wvt=None, cvt=None, verbose=0) :
""" Actually do the Voronoi binning
wvt: default is None (will use preset value, see self.wvt)
cvt: default is None (will use preset value, see self.cvt)
"""
if cvt != None : self.cvt = cvt
if wvt != None : self.wvt = wvt
self.bin2d_accretion(verbose=verbose)
self.bin2d_centroid()
## Get the bad pixels, not assigned and assign them
badpixels = np.where(self.status == 0)[0]
self.bin2d_assign_bins(badpixels)
if self.cvt :
self.bin2d_cvt_equal_mass()
else : self.scale = 1.0
## Final nodes weighted centroids after assigning to the final nodes
self.bin2d_assign_bins()
if self.wvt : self.weight = np.ones_like(self.data)
else : self.weight = self.data
self.bin2d_weighted_centroid()
def show_voronoibin(self, datain=None, shownode=1, mycmap=None) :
"""
Display the voronoi bins on a map
datain: if None (Default), will use random colors to display the bins
if provided, will display that with a jet (or specified mycmap) cmap
(should be either the length of the voronoi nodes array or the size of the initial pixels)
shownode: default is 1 -> show the voronoi nodes, otherwise ignore (0)
mycmap: in case datain is provide, will use that cmpa to display the bins
"""
from distutils import version
try:
import matplotlib
except ImportError:
raise Exception("matplotlib 0.99.0 or later is required for this routine")
if version.LooseVersion(matplotlib.__version__) < version.LooseVersion('0.99.0'):
raise Exception("matplotlib 0.99.0 or later is required for this routine")
from matplotlib.collections import PatchCollection
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
fig = plt.figure(1,figsize=(7,7))
plt.clf()
ax = plt.gca()
patches = []
binsize = self.pixelsize
for i in range(len(self.xin)) :
patches.append(mpatches.Rectangle((self.xin[i],self.yin[i]), binsize*10, binsize*10))
if datain == None :
dataout = self.status
mycmap = 'prism'
else :
if len(datain) == self.xnode.size :
dataout = np.zeros(self.xin.size, Nfloat)
for i in range(self.xnode.size) :
listbins = self.listbins[i]
dataout[listbins] = [datain[i]]*len(listbins)
elif len(datain) == self.xin.size :
dataout = datain
if mycmap == None : mycmap = 'jet'
colors = dataout * 100.0 / max(dataout)
collection = PatchCollection(patches, cmap=mycmap)
collection.set_array(np.array(colors))
ax.add_collection(collection)
if shownode :
plt.scatter(self.xnode,self.ynode, marker='o', edgecolors='k', facecolors='none', s=100)
for i in range(self.xnode.size):
ax.annotate(i, (self.xnode[i], self.ynode[i]))
listbins = self.listbins[i]
plt.axis('image')
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.title("Voronoi Map")
def bin_data(self, datain=None, noisein=None) :
"""
Return a Voronoi adaptive binning of your data.
datain: if provided, will be used as data input
if not provided (None = default), will use self.data
noisein: if provided, will be used as noise input
if not provided (None = default), will use self.noise
Output = xnode, ynode, bindata, S/N
"""
if datain == None: datain = copy.copy(self.data)
dataout = np.zeros(self.xnode.size, Nfloat)
xout = np.zeros_like(dataout)
yout = np.zeros_like(dataout)
flux_out = np.zeros_like(dataout)
for i in range(self.xnode.size) :
listbins = self.listbins[i]
xout[i] = np.average(self.xin.ravel()[listbins], weights=datain[listbins])
yout[i] = np.average(self.yin.ravel()[listbins], weights=datain[listbins])
dataout[i] = mean(datain[listbins])
flux_out[i] = sum(datain[listbins])
return xout, yout, dataout, flux_out
def radec2xy(RA, Dec):
"""Returns x, y for input ra, dec
"""
from astropy.wcs import WCS
y = []
x = []
# Make wcs object to handle transformation from ra and dec to pixel coords.
w = WCS(naxis=2)
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = np.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.set_pv([(2, 1, 45.0)])
arcsec_per_pix = abs(w.wcs.cdelt[0]) * 3600.0 # arcsec/pixel
for ra_deg, dec_deg in zip(RA, Dec):
ra_dec = np.array([[ra_deg, dec_deg]])
x.append(w.wcs_world2pix(ra_dec, 1)[0][0])
y.append(w.wcs_world2pix(ra_dec, 1)[0][1])
minx = np.min(x)
miny = np.min(y)
x += abs(minx)
y += abs(miny)
return x, y
def derive_pixelsize(x, y, verbose=0) :
""" Find the pixelsize by looking at the minimum distance between
pairs of x,y
x: xaxis coordinates
y: yaxis coordinates
Return: pixelsize
"""
pixelsize = 1.e30
for i in range(len(x)-1) :
mindist = np.min(dist2(x[i], y[i], x[i+1:], y[i+1:]))
pixelsize = np.minimum(mindist, pixelsize)
pixelsize = np.sqrt(pixelsize)
return pixelsize
def bins2Patches(bin_obj):
"""Returns a patch column based on binning"""
patchNames = []
for src_idx in range(len(bin_obj.data)):
for i in range(bin_obj.xnode.size):
listbins = bin_obj.listbins[i]
if src_idx in listbins:
patchNames.append('bin{0}'.format(i))
return np.array(patchNames)
File added
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This operation implements adding of sources to the sky model
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import logging
from .. import tableio
logging.debug('Loading ADD module.')
def run(step, parset, LSM):
outFile = parset.getString('.'.join(["LSMTool.Steps", step, "OutFile"]), '' )
colNamesVals = {}
for colName in tableio.inputColumnNames:
colNamesVals[colName] = parset.getString('.'.join(["LSMTool.Steps",
step, tableio.inputColumnNames[colName]]), '' )
result = add(LSM, colNamesVals)
# Write to outFile
if outFile == '' or outFile is None:
outFile = LSM._fileName
LSM.writeFile(outFile, clobber=True)
return 0
def add(LSM, colNamesVals):
LSM.setRowValues(colNamesVals)
File added
File added
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This operation implements joining of two sky models
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import logging
logging.debug('Loading CONCATENATE module.')
def run(step, parset, LSM):
outFile = parset.getString('.'.join(["LSMTool.Steps", step, "OutFile"]), '' )
skyModel2 = parset.getString('.'.join(["LSMTool.Steps", step, "Skymodel2"]), '' )
matchBy = parset.getString('.'.join(["LSMTool.Steps", step, "MatchBy"]), '' )
radius = parset.getString('.'.join(["LSMTool.Steps", step, "Radius"]), '' )
keep = parset.getString('.'.join(["LSMTool.Steps", step, "KeepMatches"]), '' )
concatenate(LSM, skyModel2, matchBy, radius, keep)
# Write to outFile
if outFile == '' or outFile is None:
outFile = LSM._fileName
LSM.writeFile(outFile, clobber=True)
return 0
def concatenate(LSM1, LSM2, matchBy='name', radius=10.0, keep='all'):
"""
Concatenate two sky models
"""
from astropy.table import vstack, Column
from astropy.coordinates import ICRS
from astropy import units as u
import numpy as np
from .. import skymodel
if type(LSM2) is str:
LSM2 = skymodel.SkyModel(LSM2)
if (LSM1._hasPatches and not LSM2._hasPatches):
LSM2.group('every')
if (LSM2._hasPatches and not LSM1._hasPatches):
LSM1.group('every')
table1 = LSM1.table.copy()
table2 = LSM2.table.copy()
# Due to a bug in astropy, the spectral index column must be removed before
# joining
nameCol1 = table1['Name']
nameCol2 = table2['Name']
spCol1 = table1['SpectralIndex']
spCol1Indx = table1.index_column('SpectralIndex')
spCol2 = table2['SpectralIndex']
table1.remove_column('SpectralIndex')
table2.remove_column('SpectralIndex')
if matchBy.lower() == 'name':
LSM1.table = vstack([table1, table2])
if matchBy.lower() == 'patch':
LSM1.table = vstack([table1, table2])
elif matchBy.lower() == 'radius':
# Create catalogs
catalog1 = ICRS(LSM1.getColValues('RA'), LSM1.getColValues('Dec'),
unit=(u.degree, u.degree))
catalog2 = ICRS(LSM2.getColValues('RA'), LSM2.getColValues('Dec'),
unit=(u.degree, u.degree))
idx, d2d, d3d = catalog1.match_to_catalog_sky(catalog2)
matches = np.where(d2d.value <= radius)
matchCol1 = np.array(range(len(LSM1)))
matchCol2 = np.array(range(len(LSM2))) + len(LSM1)
# Set values to be the same for the matches
matchCol2[idx[matches]] = matchCol1[matches]
# Now add columns and stack
col1 = Column(name='match', data=matchCol1)
col2 = Column(name='match', data=matchCol2)
table1.add_column(col1)
table2.add_column(col2)
LSM1.table = vstack([table1, table2])
# Add spectral index column back
spData = np.zeros((len(LSM1), 2), dtype=np.float)
for i, name in enumerate(LSM1.getColValues('Name')):
if name in nameCol2:
indx = np.where(nameCol2.data == name)
spData[i] = spCol2[indx]
else:
indx = np.where(nameCol1.data == name)
spData[i] = spCol1[indx]
spCol = Column(name='SpectralIndex', data=spData)
LSM1.table.add_column(spCol, index=spCol1Indx)
if keep == 'from1' or keep == 'from2':
# Remove any duplicates
if matchBy.lower() == 'name':
colName = 'Name'
elif matchBy.lower() == 'radius':
colName = 'match'
vals = LSM1.table[colName]
toRemove = []
for val in vals:
indx = np.where(vals == val)[0]
if len(indx) > 1:
if keep == 'from1':
toRemove.append(indx[1:])
else:
toRemove.append(indx[0])
LSM1.table.remove_rows(toRemove)
# Rename any duplicates
names = LSM1.getColValues('Name')
for name in set(names):
indx = np.where(names == name)[0]
if len(indx) > 1:
LSM1.table['Name'][indx[0]] = name + '_1'
LSM1.table['Name'][indx[1]] = name + '_2'
if matchBy.lower() == 'radius':
LSM1.table.remove_column('match')
if LSM1._hasPatches:
LSM1._updateGroups()
File added
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment