Dataset Viewer
prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import sqlite3
import numpy as np
import Helpers
conn = sqlite3.connect('../data/SandP500.sqlite3')
all_tickers = Helpers.get_all_tickers(conn)
cursor = conn.cursor()
prices_at_start = np.array([])
prices_at_end = np.array([])
for ticker in all_tickers:
cursor.execute("SELECT closing_price "
"FROM historical_prices "
f"WHERE ticker is '{ticker}'"
"AND date is '2013-02-08'")
all_rows = cursor.fetchall()
if len(all_rows) == 0:
continue
print(ticker)
price_at_start = all_rows[0]
prices_at_start =
|
np.append(prices_at_start, price_at_start)
|
numpy.append
|
import numpy as np
from ..visualization import Viewer
from ..utils import Subject, Observer, deprecated, matrices, NList
import copy
from numba import njit, int64, float64
from numba.types import ListType as LT
@njit(int64[:](LT(LT(int64))), cache=True)
def _valence(adj_x2y):
valences = np.zeros(len(adj_x2y), dtype=np.int64)
for idx, row in enumerate(adj_x2y):
valences[idx] = len(row)
return valences
class Clipping(object):
class __Flip(object):
def __init__(self):
self.x = False
self.y = False
self.z = False
def __init__(self):
self.min_x = None
self.max_x = None
self.min_y = None
self.max_y = None
self.min_z = None
self.max_z = None
self.flip = self.__Flip()
super(Clipping, self).__init__()
def __repr__(self):
return ("Clipping:\n" +
f"min_x: {self.mini_x} \tmax_x: {self.max_x} \t{('flipped' if self.flip.x else '')}\n" +
f"min_y: {self.min_y} \tmax_y: {self.max_y} \t{('flipped' if self.flip.y else '')}\n" +
f"min_z: {self.min_z} \tmax_z: {self.max_z} \t{('flipped' if self.flip.z else '')}\n")
class AbstractMesh(Observer, Subject):
"""
This class represents a generic mesh. It must be extended by a specific mesh class. It stores all the information
shared among the different kind of supported meshes.
"""
def __init__(self):
self.__boundary_needs_update = True
self.__boundary_cached = None
self.__finished_loading = False
self._dont_update = False
self.__poly_size = None
self.vertices = None #npArray (Nx3)
self.__edges = None #npArray (Nx2)
self.__polys = None #npArray (NxM)
self.labels = None # npArray (Nx1)
self.uvcoords = None
self.coor = [] #Mappatura indici coordinate uv per faccia
self.texture = None
self.material = {}
self.smoothness = False
self.__adj_vtx2vtx = None
self.__adj_vtx2edge = None
self.__adj_vtx2poly = None #npArray (NxM)
self.__adj_edge2vtx = None
self.__adj_edge2edge = None
self.__adj_edge2poly = None
self.__adj_poly2vtx = None
self.__adj_poly2edge = None
self.__adj_poly2poly = None
self.__bounding_box = None #npArray (2x3)
self.__simplex_centroids = None #npArray (Nx1)
self.__clipping = Clipping()
self.__visible_polys = None
self.simplex_metrics = dict() #dictionary[propertyName : ((min, max), npArray (Nx1))]
self.__filename = ''
Observer.__init__(self)
Subject.__init__(self)
# ==================== METHODS ==================== #
def __setattr__(self, key, value):
self.__dict__[key] = value
if key[0] != "_" and self.__finished_loading:
self.update()
def copy(self):
new = type(self)()
for key in self.__dict__.keys():
if "observer" not in key and ("adj" not in key or "poly2poly" in key):
setattr(new, key, copy.deepcopy(getattr(self, key)))
return new
def update(self):
"""
Update the mesh manually when the Viewer is set as not reactive.
"""
self.__boundary_needs_update = True
self.__update_bounding_box()
if (not self._dont_update):
self._notify()
def show(self, width = 700, height = 700, mesh_color = None, reactive = False):
"""
Show the mesh within the current cell. It is possible to manipulate the mesh through the UI.
Parameters:
UI (bool): Show or not show the graphic user interface of the viewer
width (int): The width of the canvas
height (int): The height of the canvas
Return:
Viewer: The viewer object
"""
view = Viewer(self, width = width, height = height, reactive=reactive)
view.show()
return view
def set_clipping(self, min_x = None, max_x = None,
min_y = None, max_y = None,
min_z = None, max_z = None,
flip_x = None, flip_y = None, flip_z = None):
"""
clipping the mesh along x, y and z axes. It doesn't affect the geometry of the mesh.
Parameters:
min_x (float): The minimum value of x
max_x (float): The maximum value of x
min_y (float): The minimum value of y
max_y (float): The maximum value of y
min_z (float): The minimum value of z
max_z (float): The maximum value of z
"""
if min_x is not None:
self.__clipping.min_x = min_x
if max_x is not None:
self.__clipping.max_x = max_x
if min_y is not None:
self.__clipping.min_y = min_y
if max_y is not None:
self.__clipping.max_y = max_y
if min_z is not None:
self.__clipping.min_z = min_z
if max_z is not None:
self.__clipping.max_z = max_z
if flip_x is not None:
self.__clipping.flip.x = flip_x
if flip_y is not None:
self.__clipping.flip.y = flip_y
if flip_z is not None:
self.__clipping.flip.z = flip_z
self.__boundary_needs_update = True
self.update()
def reset_clipping(self):
"""
Set the clippings to the bounding box in order to show the whole mesh.
"""
self.set_clipping(min_x = self.bbox[0,0], max_x = self.bbox[1,0],
min_y = self.bbox[0,1], max_y = self.bbox[1,1],
min_z = self.bbox[0,2], max_z = self.bbox[1,2])
self.__boundary_needs_update = True
self.update()
def load_from_file(filename):
raise NotImplementedError('This method must be implemented in the subclasses')
def __compute_adjacencies(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def save_file(self, filename):
raise NotImplementedError('This method must be implemented in the subclasses')
def get_metric(self, property_name, id_element):
"""
Get a specific metric element from the dictionary of metrics 'simplex_metrics'.
Parameters:
property_name (string): The name of the wanted metric
id_element (int): The index of a specific element of the metric
Returns:
object: The specific metric element. The return type depends on the metric
"""
return self.simplex_metrics[property_name][id_element]
@property
def clipping(self):
"""
Return the clipping region of the current mesh.
"""
return self.__clipping
@property
def visible_polys(self):
return self.__visible_polys
def __compute_metrics(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def as_triangles_flat(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def as_edges_flat(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def _as_threejs_colors(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def boundary(self):
"""
Compute the boundary of the current mesh. It only returns the faces that are inside the clipping
"""
min_x = self.clipping.min_x
max_x = self.clipping.max_x
min_y = self.clipping.min_y
max_y = self.clipping.max_y
min_z = self.clipping.min_z
max_z = self.clipping.max_z
flip_x = self.clipping.flip.x
flip_y = self.clipping.flip.y
flip_z = self.clipping.flip.z
centroids = np.array(self.poly_centroids)
x_range = np.logical_xor(flip_x,((centroids)[:,0] >= min_x) & (centroids[:,0] <= max_x))
y_range = np.logical_xor(flip_y,((centroids[:,1] >= min_y) & (centroids[:,1] <= max_y)))
z_range =
|
np.logical_xor(flip_z,((centroids[:,2] >= min_z) & (centroids[:,2] <= max_z)))
|
numpy.logical_xor
|
import os, math
import _pickle as pickle
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from sklearn import preprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', default='data', help='Parent dir of the dataset')
parser.add_argument('--file-name', default='electricity.csv', help='Directory containing data.csv')
parser.add_argument('--pickle-name', default='electricity.pkl', help='Directory containing data.csv')
parser.add_argument('--horizon', type=int, default=24, help='Forecast horizon. Default=24')
parser.add_argument('--test', action='store_true', help='whenever to use test set only.')
parser.add_argument('--hop', action='store_true', help='Whether to use test set for validation') # default=False
if __name__ == '__main__':
args = parser.parse_args()
### load the data
dir_path = args.data_folder # './data'
file_name = args.file_name
if file_name=='electricity.csv':
train_start = '2012-01-01 00:00:00'
if args.test:
train_end = '2013-10-19 23:00:00'
test_start = '2014-05-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif args.hop:
train_end = '2012-04-30 23:00:00'
test_start = '2012-04-24 00:00:00'
test_end = '2012-05-31 23:00:00'
else:
train_end = '2013-10-19 23:00:00'
test_start = '2013-10-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif 'europe_power_system' in file_name:
train_start = '2015-01-01 00:00:00'
if args.test:
train_end = '2017-01-15 23:00:00'
test_start = '2017-06-17 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
elif args.hop:
train_end = '2015-04-30 23:00:00'
test_start = '2015-04-24 00:00:00' #need additional 7 days as given info
test_end = '2015-05-31 23:00:00'
else:
train_end = '2017-01-15 23:00:00'
test_start = '2017-01-16 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
df = pd.read_csv(os.path.join(dir_path, file_name), sep=",", index_col=0, parse_dates=True, decimal='.')
df = df.reset_index()
df = df.drop([df.columns[0]], axis=1).transpose()
dt = df.rename(columns=df.iloc[0]).values #.drop(df.index[0])
## The date range
date_list = pd.date_range(start=train_start, end=test_end)
date_list = pd.to_datetime(date_list)
yr = int(date_list.year[0])
hour_list = []
for nDate in date_list:
for nHour in range(24):
tmp_timestamp = nDate+timedelta(hours=nHour)
hour_list.append(tmp_timestamp)
hour_list = np.array(hour_list)
#print('hour_list', hour_list.shape[0])
#print('dt.shape[0]', dt.shape[0])
station_index = list(range(dt.shape[0]))
#if args.horizon ==36:
# sliding_window_dis = 24;
#else:
# sliding_window_dis = args.horizon;
#print('sliding_window_dis: ', sliding_window_dis)
sliding_window_dis = args.horizon; # 24;
input_len = 168;
output_len = args.horizon; #24;
sample_len = input_len + output_len; #192; #168+24
coef = args.horizon/24;
total_n = int((len(date_list) - 8)/coef) #800; ## The total days
test_n = int(len(pd.date_range(start=test_start, end=test_end))/coef) #7 ## The testing days, day of the last 7 days
train_n = total_n - test_n ## The training days
#print('train_n', train_n)
#print('test_n', test_n)
trainX_list = [];trainX2_list = [];trainY_list = [];trainY2_list = []
testX_list = [];testX2_list = [];testY_list = [];testY2_list = []
#for station in station_index:
for station in station_index:
print('Station', station)
sub_series = dt[station,1:].astype('float32')
sub_index = np.array(range(dt.shape[1]-1))-np.min(np.where(sub_series>0))
trainX = np.zeros(shape=(train_n, input_len)) ## The input series
trainY = np.zeros(shape=(train_n, output_len)) ## The output series
testX =
|
np.zeros(shape=(test_n, input_len))
|
numpy.zeros
|
import numpy as np
import sys, os
if __name__== "__main__":
# read samples mesh gids
smgids = np.loadtxt("sample_mesh_gids.dat", dtype=int)
print(smgids)
# read full velo
fv = np.loadtxt("./full/velo.txt")
# read full velo
fullJ = np.loadtxt("./full/jacobian.txt")
# read sample mesh velo
sv = np.loadtxt("velo.txt")
# read sample mesh jac
sjac = np.loadtxt("jacobian.txt")
maskedVelo = []
maskedJacob= []
for i in smgids:
maskedVelo.append(fv[i])
maskedJacob.append(fullJ[i,:])
maskedVelo = np.array(maskedVelo)
maskedJacob = np.array(maskedJacob)
assert(np.allclose(maskedVelo.shape, sv.shape))
assert(np.isnan(sv).all() == False)
assert(np.isnan(fv).all() == False)
assert(np.allclose(sv, maskedVelo,rtol=1e-8, atol=1e-10))
assert(
|
np.allclose(maskedJacob.shape, sjac.shape)
|
numpy.allclose
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module for tools used in vaspy
"""
import bz2
from itertools import zip_longest
import os
import re
import numpy as np
from typing import List, Iterable, Sequence, Tuple, Union, IO, Any, Optional
def open_by_suffix(filename: str) -> IO[str]:
"""Open file."""
if os.path.splitext(filename)[1] == ".bz2":
thefile = bz2.open(filename, mode="rt")
else:
thefile = open(filename, mode="rt")
return thefile
def each_slice(
iterable: Iterable, n: int, fillvalue: Optional[float] = None
) -> Iterable[Any]:
"""each_slice(iterable, n[, fillvalue]) => iterator
make new iterator object which get n item from [iterable] at once.
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
_RERANGE = re.compile(r"(\d+)-(\d+)")
_RESINGLE = re.compile(r"\d+")
def atom_selection_to_list(
input_str: str, number: bool = True
) -> List[Union[int, str]]:
"""Return list of ordered "String" represents the number.
Parameters
----------
input_str: str
range of the atoms. the numbers deliminated by "-" or ","
Returns
--------
list
ordered "String" represents the number.
Example
--------
>>> atom_selection_to_list("1-5,8,8,9-15,10", False)
['1', '10', '11', '12', '13', '14', '15', '2', '3', '4', '5', '8', '9']
>>> atom_selection_to_list("1-5,8,8,9-15,10")
[1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15]
"""
array = input_str.split(",")
output = set()
for each in array:
if re.search(_RERANGE, each):
start, stop = re.findall(_RERANGE, each)[0]
# Version safety
output |= set(str(i) for i in range(int(start), int(stop) + 1))
elif re.search(_RESINGLE, each):
output.add(each)
if number:
return sorted(int(i) for i in output)
return sorted(list(output))
def atomtypes_atomnums_to_atoms(
atomtypes: Iterable[str], atomnums: Iterable[int]
) -> Tuple[str, ...]:
"""Return list representation for atom in use.
Parameters
------------
atomtypes: list
atom names
atomnums: list
atom numbers
Examples
--------
>>> test_nums = [2, 3, 2, 1]
>>> test_elements = ['Si', 'Ag', 'H', 'Si']
>>> atomtypes_atomnums_to_atoms(test_elements, test_nums)
('Si', 'Si', 'Ag', 'Ag', 'Ag', 'H', 'H', 'Si')
"""
atoms = []
for elem, nums in zip(atomtypes, atomnums):
for _ in range(nums):
atoms.append(elem)
return tuple(atoms)
def atoms_to_atomtypes_atomnums(atoms: List[str]) -> Tuple[List[str], List[int]]:
r"""Return atomnums and atomtypes list.
Returns
--------
atomnums
list of number of atoms
atomtypes
list of atomnames
Examples
--------
>>> test = ['Si', 'Si', 'Ag', 'Ag', 'Ag', 'H', 'H', 'Si']
>>> atoms_to_atomtypes_atomnums(test)
(['Si', 'Ag', 'H', 'Si'], [2, 3, 2, 1])
"""
thelast = ""
atomnums: List[int] = []
atomtypes: List[str] = []
while atoms:
atom = atoms.pop(0)
if thelast == atom:
atomnums[-1] = atomnums[-1] + 1
else:
atomnums.append(1)
atomtypes.append(atom)
thelast = atom
return atomtypes, atomnums
def cuboid(crystal_axes: Union[Sequence[List[float]], np.ndarray]) -> np.ndarray:
"""Return the coordinates for cuboid that includes tetrahedron represented by vectors.
Parameters
------------
vectors: array-like.
Three vectors for tetrahedron. (Crystal axis a,b,c)
Return
"""
a = np.array(crystal_axes[0])
b = np.array(crystal_axes[1])
c =
|
np.array(crystal_axes[2])
|
numpy.array
|
__author__ = 'Mario'
import numpy as np
from scipy.stats import norm
class EuropeanLookback():
def __init__(self, strike, expiry, spot, sigma, rate, dividend, M, flag, N=100, Vbar=.12, alpha=.69):
# Instantiate variables
self.strike = float(strike)
self.expiry = float(expiry)
self.spot = float(spot)
self.sigma = float(sigma)
self.sigma2 = sigma2 = float(sigma)*float(sigma)
self.rate = float(rate)
self.dividend = float(dividend)
self.alpha = float(alpha)
self.dt = float(expiry)/float(N)
self.Vbar = Vbar
self.xi = xi = .025
self.N = N
self.M = int(M)
self.beta1 = -.88
self.beta2 = -.42
self.beta3 = -.0003
self.alphadt = self.alpha*self.dt
self.xisdt = self.xi*np.sqrt(self.dt)
self.erddt = np.exp((self.rate-self.dividend)*self.dt)
self.egam1 = np.exp(2*(self.rate-self.dividend)*self.dt)
self.egam2 = -2*self.erddt + 1
self.eveg1 = np.exp(-self.alpha*self.dt)
self.eveg2 = self.Vbar - self.Vbar*self.eveg1
self.VectorizedMonteCarlo(float(spot), float(rate), float(sigma),
float(expiry), int(N), int(M), float(strike),
float(sigma2), flag)
def VectorizedMonteCarlo(self, spot, rate, sigma, expiry, N, M, strike, sigma2, flag):
# Initialize the matrices
newdt = float(expiry)/float(N) # Get the dt for the Weiner process
dW = np.sqrt(newdt)*np.random.normal(0,1,(M,N-1)) # Create the brownian motion
W = np.cumsum(dW, axis=1) # Set up the Weiner Process as a Matrix
time = np.linspace(0, expiry, N) # Set the discrete time space
tempA = np.zeros((M,1)) # Create an initial zero vector for the first column
#This is the Random aspects and the stochastic volatility
Wnew = np.c_[tempA,W] # Append the Weiner matrix to the zeros vector
Vt = self.sigma2
Vtn = np.abs(Vt + self.alphadt*(self.Vbar - Vt) + self.xisdt*np.sqrt(Vt)*Wnew)
tt = np.tile(np.array(time),(M,1)) # Create a matrix of time x M so we have time for every iteration
self.tau = expiry-1
### Calculate the lookback option ###
assetpath1 = np.array(spot*np.exp((rate-.5*Vtn)*tt+np.sqrt(Vtn)*Wnew)) #European standard Antithetic1
assetpath2 = np.array(spot*np.exp((rate-.5*Vtn)*tt+
|
np.sqrt(Vtn)
|
numpy.sqrt
|
import unittest
from scipy.stats import gaussian_kde
from scipy.linalg import cholesky
import numpy as np
from pyapprox.bayesian_inference.laplace import *
from pyapprox.density import NormalDensity, ObsDataDensity
from pyapprox.utilities import get_low_rank_matrix
from pyapprox.randomized_svd import randomized_svd, MatVecOperator, \
adjust_sign_svd
from pyapprox.tests.test_density import helper_gradient
from pyapprox.multivariate_gaussian import MultivariateGaussian,\
CholeskySqrtCovarianceOperator, CovarianceOperator, get_operator_diagonal
from pyapprox.models.wrappers import evaluate_1darray_function_on_2d_array
class QuadraticMisfitModel(object):
def __init__(self,num_vars,rank,num_qoi,
obs=None,noise_covariance=None,Amatrix=None):
self.num_vars = num_vars
self.rank=rank
self.num_qoi=num_qoi
if Amatrix is None:
self.Amatrix = get_low_rank_matrix(num_qoi,num_vars,rank)
else:
self.Amatrix=Amatrix
if obs is None:
self.obs = np.zeros(num_qoi)
else:
self.obs=obs
if noise_covariance is None:
self.noise_covariance = np.eye(num_qoi)
else:
self.noise_covariance=noise_covariance
self.noise_covariance_inv = np.linalg.inv(self.noise_covariance)
def value(self,sample):
assert sample.ndim==1
residual = np.dot(self.Amatrix,sample)-self.obs
return np.asarray(
[0.5*np.dot(residual.T,np.dot(self.noise_covariance_inv,residual))])
def gradient(self,sample):
assert sample.ndim==1
grad = np.dot(self.Amatrix.T,np.dot(self.noise_covariance_inv,
np.dot(self.Amatrix,sample)-self.obs))
return grad
def gradient_set(self,samples):
assert samples.ndim==2
num_vars, num_samples = samples.shape
gradients = np.empty((num_vars,num_samples),dtype=float)
for i in range(num_samples):
gradients[:,i] = self.gradient(samples[:,i])
return gradients
def hessian(self,sample):
assert sample.ndim==1 or sample.shape[1]==1
return np.dot(
np.dot(self.Amatrix.T,self.noise_covariance_inv),self.Amatrix)
def __call__(self,samples,opts=dict()):
eval_type=opts.get('eval_type','value')
if eval_type=='value':
return evaluate_1darray_function_on_2d_array(
self.value,samples,opts)
elif eval_type=='value_grad':
vals = evaluate_1darray_function_on_2d_array(
self.value,samples,opts)
return np.hstack((vals,self.gradient_set(samples).T))
elif eval_type=='grad':
return self.gradient_set(samples).T
else:
raise Exception('%s is not a valid eval_type'%eval_type)
class LogUnormalizedPosterior(object):
def __init__(self, misfit, misfit_gradient, prior_pdf, prior_log_pdf,
prior_log_pdf_gradient):
"""
Initialize the object.
Parameters
----------
"""
self.misfit = misfit
self.misfit_gradient = misfit_gradient
self.prior_pdf = prior_pdf
self.prior_log_pdf = prior_log_pdf
self.prior_log_pdf_gradient = prior_log_pdf_gradient
def gradient(self,samples):
"""
Evaluate the gradient of the logarithm of the unnormalized posterior
likelihood(x)*posterior(x)
at a sample x
Parameters
----------
samples : (num_vars,num_samples) vector
The location at which to evalute the unnormalized posterior
Returns
-------
val : (1x1) vector
The logarithm of the unnormalized posterior
"""
if samples.ndim==1:
samples=samples[:,np.newaxis]
grad = -self.misfit_gradient(samples) + \
self.prior_log_pdf_gradient(samples)
return grad
def __call__(self,samples,opts=dict()):
"""
Evaluate the logarithm of the unnormalized posterior
likelihood(x)*posterior(x)
at samples x
Parameters
----------
sampels : np.ndarray (num_vars, num_samples)
The samples at which to evalute the unnormalized posterior
Returns
-------
values : np.ndarray (num_samples,1)
The logarithm of the unnormalized posterior
"""
if samples.ndim==1:
samples=samples[:,np.newaxis]
eval_type = opts.get('eval_type','value')
if eval_type=='value':
values = -self.misfit(samples)+self.prior_log_pdf(samples)
assert values.ndim==2
elif eval_type=='grad':
values = self.gradient(samples).T
elif eval_type=='value_grad':
values = -self.misfit(samples)+self.prior.log_pdf(samples)
grad = self.gradient(samples)
values = np.hstack((values,grad))
else:
raise Exception()
return values
def assert_ndarray_allclose(matrix1,matrix2,atol=1e-8,rtol=1e-5,msg=None):
"""
A more useful function for testing equivalence of numpy arrays.
Print norms used by np.allclose function to determine equivalence.
Matrix1 is considered the truth
"""
if not np.allclose(matrix1,matrix2,atol=atol,rtol=rtol):
if msg is not None:
print(msg)
diff = np.absolute(matrix1-matrix2)
abs_error = diff.max()
rel_error = (diff/np.absolute(matrix1)).max()
print('abs error:', abs_error)
print('rel error:', rel_error)
print('atol:', atol)
print('rtol:', rtol)
print('matrix1 shape',matrix1.shape)
print('matrix2 shape',matrix2.shape)
assert False, 'matrices are not equivalent'
def setup_quadratic_misfit_problem(prior,rank,noise_sigma2=1):
# Define observations
num_qoi = 2*rank
#assert num_qoi>=rank
noise_covariance = np.eye(num_qoi)*noise_sigma2
noise_covariance_inv = np.linalg.inv(noise_covariance)
# In high dimensions computing cholesky factor is too expensive.
# That is why we use PDE based operator
noise_covariance_chol_factor = np.linalg.cholesky(noise_covariance)
truth_sample = prior.generate_samples(1)[:,0]
num_vars = truth_sample.shape[0]
Amatrix = get_low_rank_matrix(num_qoi,num_vars,rank)
noise = np.dot(noise_covariance_chol_factor,
np.random.normal(0.,noise_sigma2,num_qoi))
obs = np.dot(Amatrix,truth_sample)+noise
# Define mistit model
misfit_model = QuadraticMisfitModel(num_vars,rank,num_qoi,Amatrix)
return misfit_model, noise_covariance_inv, obs
def posterior_covariance_helper(prior, rank, comparison_tol,
test_sampling=False, plot=False):
"""
Test that the Laplace posterior approximation can be obtained using
the action of the sqrt prior covariance computed using a PDE solve
Parameters
----------
prior : MultivariateGaussian object
The model which must be able to compute the action of the sqrt of the
prior covariance (and its tranpose) on a set of vectors
rank : integer
The rank of the linear model used to generate the observations
comparision_tol :
tolerances for each of the internal comparisons. This allows different
accuracy for PDE based operators
"""
# Define prior sqrt covariance and covariance operators
L_op = prior.sqrt_covariance_operator
# Extract prior information required for computing exact posterior
# mean and covariance
num_vars = prior.num_vars()
prior_mean = np.zeros((num_vars),float)
L = L_op(np.eye(num_vars),False)
L_T = L_op(np.eye(num_vars),True)
assert_ndarray_allclose(L.T,L_T,rtol=comparison_tol,atol=0,
msg='Comparing prior sqrt and transpose')
prior_covariance = np.dot(L,L_T)
prior_pointwise_variance = prior.pointwise_variance()
assert_ndarray_allclose(
np.diag(prior_covariance), prior_pointwise_variance, rtol=1e-14,
atol=0,msg='Comparing prior pointwise variance')
misfit_model, noise_covariance_inv, obs = setup_quadratic_misfit_problem(
prior,rank,noise_sigma2=1)
# Get analytical mean and covariance
prior_hessian = np.linalg.inv(prior_covariance)
exact_laplace_mean, exact_laplace_covariance = \
laplace_posterior_approximation_for_linear_models(
misfit_model.Amatrix, prior.mean, prior_hessian,
noise_covariance_inv, obs)
# Define prior conditioned misfit operator
sample = np.zeros(num_vars)
misfit_hessian_operator = MisfitHessianVecOperator(
misfit_model, sample, fd_eps=None)
LHL_op = PriorConditionedHessianMatVecOperator(
L_op, misfit_hessian_operator)
# For testing purposes build entire L*H*L matrix using operator
# and compare to result based upon explicit matrix mutiplication
LHL_op = LHL_op.apply(np.eye(num_vars),transpose=False)
H = misfit_model.hessian(sample)
assert np.allclose(H,np.dot(np.dot(
misfit_model.Amatrix.T,noise_covariance_inv),misfit_model.Amatrix))
LHL_mat = np.dot(L_T,np.dot(H,L))
assert_ndarray_allclose(LHL_mat, LHL_op, rtol=comparison_tol,
msg='Comparing prior matrix and operator based LHL')
# Test singular values obtained by randomized svd using operator
# are the same as those obtained using singular decomposition
Utrue,Strue,Vtrue = np.linalg.svd(LHL_mat)
Utrue, Vtrue = adjust_sign_svd(Utrue,Vtrue)
standard_svd_opts = {
'num_singular_values':rank, 'num_extra_samples':10}
svd_opts={'single_pass':True, 'standard_opts':standard_svd_opts}
L_post_op = get_laplace_covariance_sqrt_operator(
L_op, misfit_hessian_operator, svd_opts, weights=None,
min_singular_value=0.0)
#print np.max((Strue[:rank]-L_post_op.e_r)/Strue[0])
max_error = np.max(Strue[:rank]-L_post_op.e_r)
assert max_error/Strue[0]<comparison_tol, max_error/Strue[0]
assert_ndarray_allclose(Vtrue.T[:,:rank],L_post_op.V_r,rtol=1e-6,
msg='Comparing eigenvectors')
L_post_op.V_r=Vtrue.T[:,:rank]
# Test posterior sqrt covariance operator transpose is the same as
# explicit matrix transpose of matrix obtained by prior sqrt
# covariance operator
L_post = L_post_op.apply(np.eye(num_vars),transpose=False)
L_post_T = L_post_op.apply(np.eye(num_vars),transpose=True)
assert_ndarray_allclose(L_post.T,L_post_T,rtol=comparison_tol,
msg='Comparing posterior sqrt and transpose')
# Test posterior covariance operator produced matrix is the same
# as the exact posterior covariance obtained using analytical formula
if rank==num_vars:
# this test only makes sense if entire set of directions is found
# if low rank approx is used then this will ofcourse induce errors
post_covariance = np.dot(L_post,L_post_T)
assert_ndarray_allclose(
exact_laplace_covariance,post_covariance,rtol=comparison_tol,
atol=0.,
msg='Comparing matrix and operator based posterior covariance')
# Test pointwise covariance of posterior
post_pointwise_variance, prior_pointwise_variance=\
get_pointwise_laplace_variance_using_prior_variance(
prior, L_post_op, prior_pointwise_variance)
assert_ndarray_allclose(
np.diag(exact_laplace_covariance),post_pointwise_variance,
rtol=comparison_tol,atol=0.,msg='Comparing pointwise variance')
if not test_sampling:
return
num_samples = int(2e5)
posterior_samples = sample_from_laplace_posterior(
exact_laplace_mean, L_post_op, num_vars, num_samples, weights=None)
assert_ndarray_allclose(
exact_laplace_covariance,np.cov(posterior_samples),
atol=1e-2*exact_laplace_covariance.max(),rtol=0.,
msg='Comparing posterior samples covariance')
assert_ndarray_allclose(
exact_laplace_mean.squeeze(),
np.mean(posterior_samples,axis=1),atol=2e-2,rtol=0.,
msg='Comparing posterior samples mean')
if plot:
# plot marginals of posterior using orginal ordering
from pyapprox.visualization import plot_multiple_2d_gaussian_slices
texfilename= 'slices.tex'
plot_multiple_2d_gaussian_slices(
exact_laplace_mean[:10], np.diag(exact_laplace_covariance)[:10],
texfilename, reference_gaussian_data=(0.,1.),show=False)
# plot marginals of posterior in rotated coordinates
# from most to least important.
# The following is not feasiable in practice as we cannot compute
# entire covariance matrix in full space. But we have
# C_r = V_r*L*V_r*D*V_r.T*L.T*V_r.T
# is we compute matrix products from right to left we only have to
# compute at most (d x r) matrices. And if only want first 20 say
# variances then can apply C_r to vectors e_i i=1,...,20
# then we need at most (dx20 matrices)
texfilename= 'rotated-slices.tex'
V_r= L_post_op.V_r
plot_multiple_2d_gaussian_slices(
np.dot(V_r.T,exact_laplace_mean[:10]),
np.diag(np.dot(V_r.T,np.dot(exact_laplace_covariance,V_r)))[:10],
texfilename, reference_gaussian_data=(0.,1.),show=True)
class TestLaplace(unittest.TestCase):
def setUp( self ):
np.random.seed(2)
@unittest.skip(reason="only shows how to plot")
def test_plot_multiple_2d_gaussian_slices(self):
from pyapprox.visualization import plot_multiple_2d_gaussian_slices
mean=np.array([0,1,-1])
covariance = np.diag(np.array([1,0.5,0.025]))
texfilename= 'slices.tex'
plot_multiple_2d_gaussian_slices(
mean[:10], np.diag(covariance)[:10],texfilename,
reference_gaussian_data=(0.,1.),show=False)
import glob, os
filenames = glob.glob(texfilename[:-4]+'*')
for filename in filenames:
os.remove(filename)
def test_operator_diagonal(self):
num_vars = 4; eval_concurrency=2
randn = np.random.normal(0.,1.,(num_vars,num_vars))
prior_covariance = np.dot(randn.T,randn)
sqrt_covar_op = CholeskySqrtCovarianceOperator(
prior_covariance,eval_concurrency)
covariance_operator=CovarianceOperator(sqrt_covar_op)
diagonal = get_operator_diagonal(
covariance_operator, num_vars, eval_concurrency, transpose=None)
assert np.allclose(diagonal,np.diag(prior_covariance))
def test_posterior_dense_matrix_covariance_operator(self):
num_vars = 121; rank = 10; eval_concurrency=20
#randn = np.random.normal(0.,1.,(num_vars,num_vars))
#prior_covariance = np.dot(randn.T,randn)
prior_covariance = np.eye(num_vars)
prior_sqrt_covariance_op = CholeskySqrtCovarianceOperator(
prior_covariance,eval_concurrency)
prior = MultivariateGaussian(prior_sqrt_covariance_op)
comparison_tol = 6e-7
posterior_covariance_helper(
prior, rank, comparison_tol,test_sampling=True)
def test_log_unnormalized_posterior(self):
num_dims = 4; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
prior_mean = np.zeros((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_covariance_chol_factor = np.linalg.cholesky(prior_covariance)
noise_covariance = np.eye(num_qoi)*0.1
noise_covariance_inv = np.linalg.inv(noise_covariance)
misfit_model = QuadraticMisfitModel(
num_dims, rank, num_qoi, obs, noise_covariance=noise_covariance)
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
samples = prior_density.generate_samples(2)
exact_log_unnormalized_posterior_vals = np.log(
np.exp(-misfit_model(samples))*
prior_density.pdf(samples))
log_unnormalized_posterior_vals = objective(samples)
assert np.allclose(exact_log_unnormalized_posterior_vals,
log_unnormalized_posterior_vals)
exact_log_unnormalized_posterior_grads = \
-misfit_model.gradient_set(samples)+\
prior_density.log_pdf_gradient(samples)
log_unnormalized_posterior_grads = objective(
samples,{'eval_type':'grad'})
assert np.allclose(exact_log_unnormalized_posterior_grads.T,
log_unnormalized_posterior_grads)
def test_get_map_point(self):
num_dims = 4; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
prior_mean = np.zeros((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_covariance_chol_factor = np.linalg.cholesky(prior_covariance)
noise_covariance = np.eye(num_qoi)*0.1
noise_covariance_inv = np.linalg.inv(noise_covariance)
misfit_model = QuadraticMisfitModel(
num_dims, rank, num_qoi, obs, noise_covariance=noise_covariance)
# exact map point should be mean of Gaussian posterior
prior_hessian = np.linalg.inv(prior_covariance)
exact_map_point = \
laplace_posterior_approximation_for_linear_models(
misfit_model.Amatrix,prior_mean,prior_hessian,
noise_covariance_inv,obs)[0]
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
initial_point = prior_mean
map_point, obj_min = find_map_point(objective,initial_point)
assert np.allclose(exact_map_point.squeeze(), map_point)
assert np.allclose(
objective.gradient(map_point),objective.gradient(exact_map_point))
assert np.allclose(objective.gradient(map_point),np.zeros(num_dims))
def test_push_forward_gaussian_though_linear_model(self):
num_qoi = 1
num_dims = 2
A = np.random.normal(0.,1.,(num_qoi,num_dims))
b = np.random.normal(0.,1.,(num_qoi))
mean = np.ones((num_dims),float)
covariance = 0.1*np.eye(num_dims)
covariance_chol_factor = cholesky(covariance)
push_forward_mean, push_forward_covariance =\
push_forward_gaussian_though_linear_model(A,b,mean,covariance)
# Generate samples from original density and push forward through model
# and approximate density using KDE
num_samples = 1000000
samples = dot(covariance_chol_factor,
np.random.normal(0.,1.,(num_dims,num_samples)))+\
np.tile(mean.reshape(num_dims,1),num_samples)
push_forward_samples = dot(A,samples)+b
kde_density = ObsDataDensity(push_forward_samples)
push_forward_density = NormalDensity(
push_forward_mean,covariance=push_forward_covariance)
test_samples = np.linspace(
push_forward_samples.min(),
push_forward_samples.max(),100).reshape(1,100)
kde_values = kde_density.pdf(test_samples)
normal_values = push_forward_density.pdf(test_samples)
assert np.linalg.norm(kde_values-normal_values[:,0])<4e-2
#plt = kde_density.plot_density(1000,show=False)
#import pylab
#pylab.setp(plt, linewidth=2, color='r')
#push_forward_density.plot_density(100,show=True)
def test_quadratic_misfit_model(self):
num_dims = 10; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
model = QuadraticMisfitModel(num_dims,rank,num_qoi,obs)
sample = np.random.normal(0.,1.,(num_dims))
helper_gradient(model.value,model.gradient,sample)
def test_neg_log_posterior(self):
num_dims = 10; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
noise_covariance = np.eye(num_qoi)*0.1
misfit_model=QuadraticMisfitModel(
num_dims,rank,num_qoi,obs,noise_covariance=noise_covariance)
prior_mean = np.ones((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
sample = np.random.normal(0.,1.,(num_dims))
helper_gradient(misfit_model.value,misfit_model.gradient,sample)
def test_directional_derivative_using_finite_difference(self):
num_dims = 10; rank = 3; num_qoi=3
model = QuadraticMisfitModel(num_dims,rank,num_qoi)
directions = np.random.normal(0.,1.,(num_dims,2))
directions /= np.linalg.norm(directions,axis=0)
# derivatives of function values
sample = np.random.normal(0.,1.,(num_dims,1))
opts = {'eval_type':'value_grad'}
result = model(sample,opts)[0,:]
# result is num_samples x num_qoi. There is only one sample so take
# first row of result above
value_at_sample = result[0:1]# must be a vector
gradient = result[1:]
#gradient = model.gradient(sample)
assert np.allclose(
|
np.dot(gradient,directions)
|
numpy.dot
|
"""Class for playing and annotating video sources in Python using Tkinter."""
import json
import logging
import pathlib
import datetime
import tkinter
import tkinter.filedialog
import numpy as np
import cv2
import PIL.Image
import PIL.ImageTk
logger = logging.getLogger("VideoPyer")
logging.basicConfig(level=logging.INFO)
# Delay should be changed with caution
# Tkinter event loop gets flooded with delays < 60 ms
DELAY = 60
# Default colour options
BKG_COLOUR = "#3E4149"
COLOUR_MAP = {"blue": "#749CE2", "pink": "#E274CF", "green": "#8CE274"}
class VideoPyer: # pylint: disable=too-many-instance-attributes
"""Play, pause and record position of mouse clicks on videos."""
def __init__(self, window: tkinter.Tk, title: str) -> None:
"""Set up video frame and menus of GUI, variables and logging.
Args:
window (tkinter.Tk): Main instance of tkinter.Tk.
title (str): Title of Tk window.
"""
self.window = window
self.window.title(title)
self.window.configure(background=BKG_COLOUR)
# Frame that will contain the video
video_frame = tkinter.Frame(self.window)
video_frame.pack(side=tkinter.TOP, pady=5)
self.canvas = tkinter.Canvas(video_frame, bg=BKG_COLOUR)
# Log position of double click on canvas to record salient 'point'
self.canvas.bind("<Double-1>", self.log_point)
# Log head direction arrow drawn on press and release of click
self.canvas.bind("<Button-1>", self.log_click)
self.canvas.bind("<ButtonRelease-1>", self.draw_line)
self.arrow_start_x, self.arrow_start_y = None, None # Store start pos of click
# Remove a selected tk object on backspace
self.canvas.bind("<BackSpace>", self.remove_tk_object)
self.selected_tk_object = None # Current object user selects
# Rotate head direction arrow with Up or Down keys
self.canvas.bind("<KeyPress>", self.rotate)
self.canvas.focus_set() # Enable listen to key presses by default
self.canvas.pack()
# Frame that will display the menu buttons
menu_frame = tkinter.Frame(self.window)
menu_frame.pack(side=tkinter.BOTTOM, pady=5)
# Button to select video
self.btn_select = tkinter.Button(
menu_frame,
text="Select video",
width=10,
command=self.select_and_open_source,
highlightbackground=BKG_COLOUR,
)
self.btn_select.grid(row=0, column=0)
# Button to begin play
self.btn_play = tkinter.Button(
menu_frame,
text="Play",
width=8,
command=self.resume_video,
highlightbackground=BKG_COLOUR,
state="disabled",
)
self.btn_play.grid(row=0, column=1)
# Button to pause
self.pause = False
self.btn_pause = tkinter.Button(
menu_frame,
text="Pause",
width=8,
command=self.pause_video,
highlightbackground=BKG_COLOUR,
state="disabled",
)
self.btn_pause.grid(row=0, column=2)
# Mini menu to select marker colour for salient 'points'
colours = list(COLOUR_MAP.keys())
var = tkinter.StringVar(video_frame)
var.set(colours[0])
self.marker_colour = colours[0]
opt_colour = tkinter.OptionMenu(
video_frame,
var,
*colours,
command=self.set_colour,
)
opt_colour.config(bg=BKG_COLOUR, width=8)
opt_colour.place(x=3, y=3)
# Set up some variables for logging (points and arrows are logged independently)
self.annotation_logs = dict()
self.tkid_to_idx = dict()
self.arrow_head_x, self.arrow_head_y = 0, 0
self.frame_counter, self.mouse_x, self.mouse_y = 0, 0, 0
self.arrows_log_keys = [
"frame_counter",
"arrow_start_x",
"arrow_start_y",
"arrow_head_x",
"arrow_head_y",
"marker_colour",
]
self.points_log_keys = ["frame_counter", "mouse_x", "mouse_y", "marker_colour"]
self.filename = None # File currently loaded
self.vid = None # OpenCV capture instance
self.img = None # Holds current frame of video
self.window.mainloop()
def set_colour(self, value: str) -> None:
"""Set colour of visible marker for double mouse clicks."""
self.marker_colour = value
def shrink(self, c_id: int, x: int, y: int, radius: int) -> None:
"""Shrink a Tk circle object over time before finalling removing it.
Args:
c_id (int): Integer ID of circle/oval object from Tk.
x (int): X coord for circle centre.
y (int): Y coord for circle centre.
radius (int): Circle radius.
"""
if radius > 0.0:
radius -= 0.5
self.canvas.coords(c_id, x - radius, y - radius, x + radius, y + radius)
self.canvas.after(100, self.shrink, c_id, x, y, radius)
else:
self.canvas.delete(c_id) # Remove circle entirely
def log_point(self, event: tkinter.Event) -> None:
"""Log the (x,y) coords of double mouse click during video and the frame number.
Coordinates are given from top left of canvas. A fading marker becomes visible."""
logger.info(
"Point (%d,%d). Frame %d. Colour %s.",
event.x,
event.y,
self.frame_counter,
self.marker_colour,
)
self.mouse_x, self.mouse_y = event.x, event.y
self.arrow_start_x, self.arrow_start_y = (event.x, event.y) # Potential arrow
radius = 8
c_id = self.canvas.create_oval(
self.mouse_x - radius,
self.mouse_y - radius,
self.mouse_x + radius,
self.mouse_y + radius,
fill=COLOUR_MAP[self.marker_colour],
)
self.shrink(c_id, self.mouse_x, self.mouse_y, radius) # Shrink circle over time
# Add relevant keys to logs for current file
for key in self.points_log_keys:
self.annotation_logs[self.filename]["points"].setdefault(key, []).append(
getattr(self, key)
)
def log_click(self, event: tkinter.Event) -> None:
"""Log (x,y) coords of mouse click during video. Check if user is clicking on
existing line object to get ready for further commands (e.g. remove, rotate)."""
self.arrow_start_x, self.arrow_start_y = event.x, event.y
self.selected_tk_object = self.canvas.find_withtag("current")[
0
] # Top most object under mouse
def draw_line(self, event: tkinter.Event) -> None:
"""Draw a line between on coords on press and release of click and log.
The frame number recorded will be that at the time on release of click."""
self.arrow_head_x, self.arrow_head_y = event.x, event.y
# Only draw intentional arrows (i.e. not just a result from regular clicks)
if (
np.linalg.norm(
np.array([self.arrow_start_x, self.arrow_start_y])
- np.array([self.arrow_head_x, self.arrow_head_y])
)
> 20
):
l_id = self.canvas.create_line(
self.arrow_head_x,
self.arrow_head_y,
self.arrow_start_x,
self.arrow_start_y,
fill="yellow",
arrow="first",
)
logger.info(
"Arrow %d (%d,%d) -> (%d, %d). Frame %d. Colour %s.",
l_id,
self.arrow_start_x,
self.arrow_start_y,
self.arrow_head_x,
self.arrow_head_y,
self.frame_counter,
self.marker_colour,
)
# Add arrow coordinates to logs
for key in self.arrows_log_keys:
self.annotation_logs[self.filename]["arrows"].setdefault(
key, []
).append(getattr(self, key))
# Maintain standard indexing starting from 0
self.tkid_to_idx[l_id] = (
len(self.annotation_logs[self.filename]["arrows"]["arrow_start_x"]) - 1
)
self.arrow_start_x, self.arrow_start_y = None, None
def remove_tk_object(self, event: tkinter.Event) -> None:
"""Remove the tk object that is currently selected from the canvas and logs
(only head direction arrows are currently removeable from logs)."""
if self.selected_tk_object:
self.canvas.delete(self.selected_tk_object)
logger.info("Object w/ id %d removed from canvas.", self.selected_tk_object)
# Remove object from our logs
remove_idx = self.tkid_to_idx.get(self.selected_tk_object)
if remove_idx is not None: # Else not a line object and thus not logged
# Remove the object's recorded annotations for all keys
for key in self.arrows_log_keys:
self.annotation_logs[self.filename]["arrows"].setdefault(key, [])
del self.annotation_logs[self.filename]["arrows"][key][remove_idx]
# Decrement the indices larger than the object just removed
for k in self.tkid_to_idx:
if k > self.selected_tk_object:
self.tkid_to_idx[k] -= 1
del self.tkid_to_idx[self.selected_tk_object]
self.selected_tk_object = None
else:
logger.info("No object selected to remove via %s.", event.keysym)
def rotate(self, event: tkinter.Event) -> None:
"""Rotate the selected object by 1 degree (increment or decrement depending
on Up or Down key press). Currently only head direction arrows can be rotated."""
if (
self.selected_tk_object
and self.canvas.type(self.selected_tk_object) == "line"
):
# Calculate angle between arrow and 0 radians East
x0, y0, x1, y1 = self.canvas.coords(self.selected_tk_object)
vec = np.array([x0 - x1, y0 - y1])
unit_vec = vec / np.linalg.norm(vec)
theta = np.arctan2(unit_vec[1], unit_vec[0]) # np.arctan2 takes (y, x)
# Increment or decrement angle
if event.keysym == "Up":
theta += np.deg2rad(1)
elif event.keysym == "Down":
theta -= np.deg2rad(1)
# Rotate arrow around it's origin
radius = np.linalg.norm(np.array([x0, y0]) -
|
np.array([x1, y1])
|
numpy.array
|
from DNN.hans_on_feedforward_neural_network import Feedforward_neural_network
import numpy as np
Net = Feedforward_neural_network()
#--------------------------多元回归实验-----------------------------
# ---------------------------准备数据-------------------------------
#-------------------------------------------------------------------
# 20 维到 3 维的转换
X_data = np.random.uniform(0, 100, size=(1000, 20))
W = np.random.random(size=(20, 3))
Y_data = np.dot(X_data, W)
# 给标签加上高斯白噪声,使之成为非线性关系
Y_data = Y_data +
|
np.random.normal(0, 10, size=Y_data.shape)
|
numpy.random.normal
|
#Contains MeldCohort and MeldSubject classes
from contextlib import contextmanager
from meld_classifier.paths import (
DEMOGRAPHIC_FEATURES_FILE,
CORTEX_LABEL_FILE,
SURFACE_FILE,
DEFAULT_HDF5_FILE_ROOT,
BOUNDARY_ZONE_FILE,
NVERT,
BASE_PATH,
)
import pandas as pd
import numpy as np
import nibabel as nb
import os
import h5py
import glob
import logging
import meld_classifier.mesh_tools as mt
import scipy
class MeldCohort:
"""Class to define cohort-level parameters such as subject ids, mesh"""
def __init__(self, hdf5_file_root=DEFAULT_HDF5_FILE_ROOT, dataset=None, data_dir=BASE_PATH):
self.data_dir = data_dir
self.hdf5_file_root = hdf5_file_root
self.dataset = dataset
self.log = logging.getLogger(__name__)
# class properties (readonly attributes):
# full_feature_list: list of features available in this cohort
self._full_feature_list = None
# surface information known to MeldCohort
# cortex_label: information about which nodes are cortex
self._cortex_label = None
self._cortex_mask = None
# coords: spherical 2D coordinates
self._coords = None
# surf: inflated mesh, surface vertices and triangles
self._surf = None
# surf_partial: partially inflated mesh, surface vertices and triangles
self._surf_partial = None
# surf_area: surface area for each triangle
self._surf_area = None
# adj_mat: sparse adjacency matrix for all vertices
self._adj_mat = None
# lobes: labels for cortical lobes
self._lobes = None
# neighbours: list of neighbours for each vertex
self._neighbours = None
@property
def full_feature_list(self):
"""list of features available in this cohort"""
if self._full_feature_list is None:
self._full_feature_list = []
subject_ids = self.get_subject_ids()
# get union of all features from subjects in this cohort
features = set()
for subj in subject_ids:
features = features.union(MeldSubject(subj, self).get_feature_list().copy())
self._full_feature_list = sorted(list(features))
self.log.info(f"full_feature_list: {self._full_feature_list}")
return self._full_feature_list
@property
def cortex_label(self):
if self._cortex_label is None:
p = os.path.join(self.data_dir, CORTEX_LABEL_FILE)
self._cortex_label = np.sort(nb.freesurfer.io.read_label(p))
return self._cortex_label
@property
def cortex_mask(self):
if self._cortex_mask is None:
self._cortex_mask = np.zeros(NVERT, dtype=bool)
self._cortex_mask[self.cortex_label] = True
return self._cortex_mask
@property
def surf_area(self):
if self._surf_area is None:
p = os.path.join(self.data_dir, "fsaverage_sym/surf/lh.area")
self._surf_area = nb.freesurfer.read_morph_data(p)
return self._surf_area
@property
def surf(self):
"""inflated surface, dict with 'faces' and 'coords'"""
if self._surf is None:
p = os.path.join(self.data_dir, "fsaverage_sym", "surf", "lh.inflated")
self._surf = mt.load_mesh_geometry(p)
return self._surf
@property
def surf_partial(self):
"""partially inflated surface, dict with 'faces' and 'coords'"""
if self._surf_partial is None:
p = os.path.join(self.data_dir, "fsaverage_sym", "surf", "lh.partial_inflated")
vertices, faces = nb.freesurfer.io.read_geometry(p)
self._surf_partial = {"faces": faces, "coords": vertices}
return self._surf_partial
@property
def adj_mat(self):
if self._adj_mat is None:
all_edges = np.vstack(
[self.surf["faces"][:, :2], self.surf["faces"][:, 1:3], self.surf["faces"][:, [2, 0]]]
)
self._adj_mat = scipy.sparse.coo_matrix(
(np.ones(len(all_edges), np.uint8), (all_edges[:, 0], all_edges[:, 1])),
shape=(len(self.surf["coords"]), len(self.surf["coords"])),
).tocsr()
return self._adj_mat
@property
def neighbours(self):
if self._neighbours is None:
self._neighbours = mt.get_neighbours_from_tris(self.surf["faces"])
return self._neighbours
@property
def lobes(self):
if self._lobes is None:
p = os.path.join(self.data_dir, "fsaverage_sym/label/lh.lobes.annot")
self._lobes = nb.freesurfer.read_annot(p)
return self._lobes
@property
def coords(self):
if self._coords is None:
surf = mt.load_mesh_geometry(os.path.join(self.data_dir, SURFACE_FILE))
# spherical 2D coordinates. ignore radius
# spherical_coords = mt.spherical_np(surf["coords"])[:, 1:]
# surf_coords_norm = (surf['coords']-np.min(surf['coords'],axis=0))/(np.max(surf['coords'],axis=0)-np.min(surf['coords'],axis=0))
# norm_coords = (spherical_coords - np.min(spherical_coords, axis=0)) / (
# np.max(spherical_coords, axis=0) - np.min(spherical_coords, axis=0)
# )
# round to have around 1500 unique coordinates
# rounded_norm_coords = np.round(norm_coords * 5, 1) / 5
self._coords = surf["coords"] #rounded_norm_coords
return self._coords
def read_subject_ids_from_dataset(self):
"""Read subject ids from the dataset csv file.
Returns subject_ids, trainval_ids, test_ids"""
assert self.dataset is not None, "please set a valid dataset csv file"
df = pd.read_csv(os.path.join(self.data_dir, self.dataset))
subject_ids = list(df.subject_id)
trainval_ids = list(df[df.split == "trainval"].subject_id)
test_ids = list(df[df.split == "test"].subject_id)
return subject_ids, trainval_ids, test_ids
def get_sites(self):
"""get all valid site codes that exist on this system"""
sites = []
for f in glob.glob(os.path.join(self.data_dir, "MELD_*")):
if os.path.isdir(f):
sites.append(f.split("_")[-1])
return sites
@contextmanager
def _site_hdf5(self, site_code, group, write=False, hdf5_file_root=None):
"""
Hdf5 file handle for specified site_code and group (patient or control).
This function is to be used in a context block as follows:
```
with cohort._site_hdf5('H1', 'patient') as f:
# read information from f
pass
# f is automatically closed outside of the `with` block
```
Args:
site_code: hospital site code, e.g. 'H1'
group: 'patient' or 'control'
write (optional): flag to open hdf5 file with writing permissions, or to create
the hdf5 if it does not exist.
Yields: a pointer to the opened hdf5 file.
"""
if hdf5_file_root is None:
hdf5_file_root = self.hdf5_file_root
p = os.path.join(self.data_dir, f"MELD_{site_code}", hdf5_file_root.format(site_code=site_code, group=group))
# open existing file or create new one
if os.path.isfile(p) and not write:
f = h5py.File(p, "r")
elif os.path.isfile(p) and write:
f = h5py.File(p, "r+")
elif not os.path.isfile(p) and write:
f = h5py.File(p, "a")
else:
f = None
try:
yield f
finally:
if f is not None:
f.close()
def get_subject_ids(self, **kwargs):
"""Output list of subject_ids.
List can be filtered by sites (given as list of site_codes, e.g. 'H2'),
groups (patient / control / both), features (subject_features_to_exclude),
Sites are given as a list of site_codes (e.g. 'H2').
Optionally filter subjects by group (patient or control).
If self.dataset is not none, restrict subjects to subjects in dataset csv file.
subject_features_to_exclude: exclude subjects that dont have this feature
Args:
site_codes (list of str): hospital site codes, e.g. ['H1'].
group (str): 'patient', 'control', or 'both'.
subject_features_to_exclude (list of str): exclude subjects that dont have this feature
subject_features_to_include (list of str): exclude subjects that have this feature
scanners (list of str): list of scanners to include
lesional_only (bool): filter out lesion negative patients
Returns:
subject_ids: the list of subject ids
"""
# parse kwargs:
# get groups
if kwargs.get("group", "both") == "both":
groups = ["patient", "control"]
else:
groups = [kwargs.get("group", "both")]
# get sites
site_codes = kwargs.get("site_codes", self.get_sites())
if isinstance(site_codes, str):
site_codes = [site_codes]
# get scanners
scanners = kwargs.get("scanners", ["3T", "15T"])
if not isinstance(scanners, list):
scanners = [scanners]
lesional_only = kwargs.get("lesional_only", True)
subject_features_to_exclude = kwargs.get("subject_features_to_exclude", [""])
subject_features_to_include = kwargs.get("subject_features_to_include", [""])
# get subjects for specified groups and sites
subject_ids = []
for site_code in site_codes:
for group in groups:
with self._site_hdf5(site_code, group) as f:
if f is None:
continue
cur_scanners = f[site_code].keys()
for scanner in cur_scanners:
subject_ids += list(f[os.path.join(site_code, scanner, group)].keys())
self.log.info(f"total number of subjects: {len(subject_ids)}")
# restrict to ids in dataset (if specified)
if self.dataset is not None:
subjects_in_dataset, _, _ = self.read_subject_ids_from_dataset()
subject_ids = list(np.array(subject_ids)[np.in1d(subject_ids, subjects_in_dataset)])
self.log.info(
f"total number of subjects after restricting to subjects from {self.dataset}: {len(subject_ids)}"
)
# get list of features that is used to filter subjects
# e.g. use this to filter subjects without FLAIR features
_, required_subject_features = self._filter_features(
subject_features_to_exclude,
return_excluded=True,
)
self.log.debug("selecting subjects that have features: {}".format(required_subject_features))
# get list of features that determine whether to exclude subjects
# e.g. use this to filter subjects with FLAIR features
_, undesired_subject_features = self._filter_features(
subject_features_to_include,
return_excluded=True,
)
self.log.debug("selecting subjects that don't have features: {}".format(undesired_subject_features))
# filter ids by scanner, features and whether they have lesions.
filtered_subject_ids = []
for subject_id in subject_ids:
subj = MeldSubject(subject_id, self)
# check scanner
if subj.scanner not in scanners:
continue
# check required features
if not subj.has_features(required_subject_features):
continue
# check undesired features
if subj.has_features(undesired_subject_features) and len(undesired_subject_features) > 0:
continue
# check lesion mask presence
if lesional_only and subj.is_patient and not subj.has_lesion():
continue
# subject has passed all filters, add to list
filtered_subject_ids.append(subject_id)
self.log.info(
f"total number after filtering by scanner {scanners}, features, lesional_only {lesional_only}: {len(filtered_subject_ids)}"
)
return filtered_subject_ids
def get_features(self, features_to_exclude=[""]):
"""
get filtered list of features.
"""
# get list of all features that we want to train models on
# if a subject does not have a feature, 0 is returned for this feature during dataset creation
features = self._filter_features(features_to_exclude=features_to_exclude)
self.log.debug("features that will be loaded in train/test datasets: {}".format(features))
return features
def _filter_features(self, features_to_exclude, return_excluded=False):
"""Return a list of features, with features_to_exclude removed.
Args:
features_to_exclude (list of str): list of features that should be excluded,
NB 'FLAIR' will exclude all FLAIR features but all other features must be exact matches
return_excluded (bool): if True, return list of excluded features.
Returns:
tuple:
features: the list of features with appropriate features excluded.
excluded_features: list of all excluded features. Only returned, if return_exluded is specified.
"""
all_features = self.full_feature_list.copy()
excludable_features = []
filtered_features = self.full_feature_list.copy()
for feature in self.full_feature_list.copy():
for exclude in features_to_exclude:
if exclude == "":
pass
elif exclude == "FLAIR":
if exclude in feature:
filtered_features.remove(feature)
excludable_features.append(feature)
elif feature == exclude:
if exclude in self.full_feature_list: # only remove if still in list
filtered_features.remove(feature)
excludable_features.append(feature)
if return_excluded:
return filtered_features, excludable_features
else:
return filtered_features
def split_hemispheres(self, input_data):
"""
split vector of cortex-masked data back into 2 full overlays,
including zeros for medial wall
Returns:
hemisphere_data: dictionary with keys "left" and "right".
"""
# make sure that input_data has expected format
assert len(input_data) == 2 * len(self.cortex_label)
# split data in two hemispheres
hemisphere_data = {}
for i, hemi in enumerate(["left", "right"]):
feature_data = np.zeros((NVERT,) + input_data.shape[1:])
feature_data[self.cortex_label] = input_data[i * len(self.cortex_label) : (i + 1) * len(self.cortex_label)]
hemisphere_data[hemi] = feature_data
return hemisphere_data
class MeldSubject:
"""
individual patient from meld cohort, can read subject data and other info
"""
def __init__(self, subject_id, cohort):
self.subject_id = subject_id
self.cohort = cohort
self.log = logging.getLogger(__name__)
# unseeded rng for generating random numbers
self.rng = np.random.default_rng()
@property
def scanner(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
return scanner
@property
def group(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
if group == "FCD":
group = "patient"
elif group == "C":
group = "control"
else:
print(
f"Error: incorrect naming scheme used for {self.subject_id}. Unable to determine if patient or control."
)
return group
@property
def site_code(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
return site_code
def surf_dir_path(self, hemi):
"""return path to features dir (surf_dir)"""
return os.path.join(self.site_code, self.scanner, self.group, self.subject_id, hemi)
@property
def is_patient(self):
return self.group == "patient"
@property
def has_flair(self):
return "FLAIR" in " ".join(self.get_feature_list())
def has_lesion(self):
return self.get_lesion_hemisphere() in ["lh", "rh"]
def get_lesion_hemisphere(self):
"""
return 'lh', 'rh', or None
"""
if not self.is_patient:
return None
with self.cohort._site_hdf5(self.site_code, self.group) as f:
surf_dir_lh = f.require_group(self.surf_dir_path("lh"))
if ".on_lh.lesion.mgh" in surf_dir_lh.keys():
return "lh"
surf_dir_rh = f.require_group(self.surf_dir_path("rh"))
if ".on_lh.lesion.mgh" in surf_dir_rh.keys():
return "rh"
return None
def has_features(self, features):
missing_features = np.setdiff1d(features, self.get_feature_list())
return len(missing_features) == 0
def get_feature_list(self, hemi="lh"):
"""Outputs a list of the features a participant has for each hemisphere"""
with self.cohort._site_hdf5(self.site_code, self.group) as f:
keys = list(f[self.surf_dir_path(hemi)].keys())
# remove lesion and boundaries from list of features
if ".on_lh.lesion.mgh" in keys:
keys.remove(".on_lh.lesion.mgh")
if ".on_lh.boundary_zone.mgh" in keys:
keys.remove(".on_lh.boundary_zone.mgh")
return keys
def get_demographic_features(
self, feature_names, csv_file=DEMOGRAPHIC_FEATURES_FILE, normalize=False, default=None
):
"""
Read demographic features from csv file. Features are given as (partial) column titles
Args:
feature_names: list of partial column titles of features that should be returned
csv_path: csv file containing demographics information.
can be raw participants file or qc-ed values.
"{site_code}" is replaced with current site_code.
normalize: implemented for "Age of Onset" and "Duration"
default: default value to be used when subject does not exist.
Either "random" (which will choose a random value from the current
demographics feature column) or any other value which will be used
as default value.
Returns:
list of features, matching structure of feature_names
"""
csv_path = os.path.join(self.cohort.data_dir, csv_file)
return_single = False
if isinstance(feature_names, str):
return_single = True
feature_names = [feature_names]
df = pd.read_csv(csv_path, header=0, encoding="latin")
# get index column
id_col = None
for col in df.keys():
if "ID" in col:
id_col = col
# ensure that found an index column
if id_col is None:
self.log.warning("No ID column found in file, please check the csv file")
return None
df = df.set_index(id_col)
# find desired demographic features
features = []
for desired_name in feature_names:
matched_name = None
for col in df.keys():
if desired_name in col:
if matched_name is not None:
# already found another matching col
self.log.warning(
f"Multiple columns matching {desired_name} found ({matched_name}, {col}), please make search more specific"
)
return None
matched_name = col
# ensure that found necessary data
if matched_name is None:
if "urfer" in desired_name:
matched_name = "Freesurfer_nul"
else:
self.log.warning(f"Unable to find column matching {desired_name}, please double check for typos")
return None
# read feature
# if subject does not exists, add None
if self.subject_id in df.index:
if matched_name == "Freesurfer_nul":
feature = "5.3"
else:
feature = df.loc[self.subject_id][matched_name]
if normalize:
if matched_name == "Age of onset":
feature = np.log(feature + 1)
feature = feature / df[matched_name].max()
elif matched_name == "Duration":
feature = (feature - df[matched_name].min()) / (df[matched_name].max() - df[matched_name].min())
else:
self.log.info(f"demographic feature normalisation not implemented for feature {matched_name}")
elif default == "random":
# unseeded rng for generating random numbers
rng = np.random.default_rng()
feature = np.clip(np.random.normal(0, 0.1) + rng.choice(df[matched_name]), 0, 1)
else:
feature = default
features.append(feature)
if return_single:
return features[0]
return features
def load_feature_values(self, feature, hemi="lh"):
"""
Load and return values of specified feature.
"""
feature_values = np.zeros(NVERT, dtype=np.float32)
# read data from hdf5
with self.cohort._site_hdf5(self.site_code, self.group) as f:
surf_dir = f[self.surf_dir_path(hemi)]
if feature in surf_dir.keys():
feature_values[:] = surf_dir[feature][:]
else:
self.log.debug(f"missing feature: {feature} set to zero")
return feature_values
def load_feature_lesion_data(self, features, hemi="lh", features_to_ignore=[]):
"""
Load all patient's data into memory
Args:
features: list of features to be loaded
hemi: 'lh' or 'rh'
features_to_ignore: list of features that should be replaced with 0 upon loading
Returns:
feature_data, label
"""
# load all features
feature_values = []
for feature in features:
if feature in features_to_ignore:
# append zeros for features_to_ignore
feature_values.append(np.zeros(NVERT, dtype=np.float32))
else:
# read feature_values
feature_values.append(self.load_feature_values(feature, hemi=hemi))
feature_values = np.stack(feature_values, axis=-1)
# load lesion data
lesion_values = np.ceil(self.load_feature_values(".on_lh.lesion.mgh", hemi=hemi)).astype(int)
return feature_values, lesion_values
def load_boundary_zone(self, max_distance=40, feat_name=".on_lh.boundary_zone.mgh"):
"""
load and return boundary zone mask
max_distance - distance from lesion mask to extend boundary zone in mm
30 for training exclusion, 20 for sensitivity testing
"""
cortex_mask = self.cohort.cortex_mask
boundary_zones = np.zeros(2 * sum(cortex_mask)).astype(float)
hemi = self.get_lesion_hemisphere()
for k, h in enumerate(["lh", "rh"]):
if hemi == h:
bz = self.load_feature_values(feat_name, hemi=hemi)
if max_distance is not None:
bz = bz < max_distance
boundary_zones[k * sum(cortex_mask) : (k + 1) * sum(cortex_mask)] = bz[cortex_mask]
else:
bz = np.zeros(len(cortex_mask))
boundary_zones[k * sum(cortex_mask) : (k + 1) * sum(cortex_mask)] = bz[cortex_mask]
return boundary_zones
def get_histology(self):
"""
get histological classification from cleaned up demographics files
"""
histology = self.get_demographic_features("Histo")
return histology
# TODO write test
def write_feature_values(self, feature, feature_values, hemis=["lh", "rh"], hdf5_file=None, hdf5_file_root=None):
"""
write feature to subject's hdf5.
Args:
feature: name of the feature
feature_values: feature values to be written to the hdf5
hemis: hemispheres that should be written. If only one hemisphere is given,
it is assumed that all values given with feature_values belong to this hemisphere.
hdf5_file: uses self.cohort._site_hdf5 by default, but another filename can be specified,
e.g. to write predicted lesions to another hdf5
hdf5_file_root: optional to specify a different root from baseline, if writing to a new file
"""
# check that feature_values have expected length
if hdf5_file_root is None:
hdf5_file_root = self.cohort.hdf5_file_root
assert len(feature_values) == sum(self.cohort.cortex_mask) * len(hemis)
n_vert_cortex = sum(self.cohort.cortex_mask)
# open hdf5 file
if hdf5_file is not None:
if not os.path.isfile(hdf5_file):
hdf5_file_context = h5py.File(hdf5_file, "a")
else:
hdf5_file_context = h5py.File(hdf5_file, "r+")
else:
hdf5_file_context = self.cohort._site_hdf5(
self.site_code, self.group, write=True, hdf5_file_root=hdf5_file_root
)
with hdf5_file_context as f:
for i, hemi in enumerate(hemis):
group = f.require_group(self.surf_dir_path(hemi))
hemi_data = np.zeros(NVERT)
hemi_data[self.cohort.cortex_mask] = feature_values[i * n_vert_cortex : (i + 1) * n_vert_cortex]
dset = group.require_dataset(
feature, shape=(NVERT,), dtype="float32", compression="gzip", compression_opts=9
)
dset[:] = hemi_data
def delete(self, f, feat):
print("delete")
del f[feat]
def get_lesion_area(self):
"""
calculate lesion area as the proportion of the hemisphere that is lesion.
Returns:
lesion_area, lesion_hemisphere, lesion_lobe
"""
hemi = self.get_lesion_hemisphere()
lobes_i, _, lobes_labels = self.cohort.lobes
if hemi is not None:
lesion = self.load_feature_values(".on_lh.lesion.mgh", hemi=hemi).astype(bool)
total_area = np.sum(self.cohort.surf_area[self.cohort.cortex_mask])
lesion_area =
|
np.sum(self.cohort.surf_area[lesion])
|
numpy.sum
|
import numpy as np
import math
import os
def load_obj(dire):
fin = open(dire,'r')
lines = fin.readlines()
fin.close()
vertices = []
triangles = []
for i in range(len(lines)):
line = lines[i].split()
if len(line)==0:
continue
if line[0] == 'v':
x = float(line[1])
y = float(line[2])
z = float(line[3])
vertices.append([x,y,z])
if line[0] == 'f':
x = int(line[1].split("/")[0])
y = int(line[2].split("/")[0])
z = int(line[3].split("/")[0])
triangles.append([x-1,y-1,z-1])
vertices = np.array(vertices, np.float32)
#remove isolated points
triangles_ =
|
np.array(triangles, np.int32)
|
numpy.array
|
# Licensed under an MIT open source license - see LICENSE
"""
SCOUSE - Semi-automated multi-COmponent Universal Spectral-line fitting Engine
Copyright (c) 2016-2018 <NAME>
CONTACT: <EMAIL>
"""
import numpy as np
import sys
import warnings
import pyspeckit
import matplotlib.pyplot as plt
import itertools
import time
from astropy import log
from astropy import units as u
from astropy.utils.console import ProgressBar
from .indiv_spec_description import *
from .parallel_map import *
from .saa_description import add_indiv_spectra, clean_up, merge_models
from .solution_description import fit, print_fit_information
from .verbose_output import print_to_terminal
def initialise_indiv_spectra(scouseobject, verbose=False, njobs=1):
"""
Here, the individual spectra are primed ready for fitting. We create a new
object for each spectrum and they are contained within a dictionary which
can be located within the relavent SAA.
Parameters
----------
scouseobject : Instance of the scousepy class
verbose : bool (optional)
verbose output
njobs : number (optional)
number of cores used for the computation - prep spec is parallelised
"""
# Cycle through potentially multiple wsaa values
for i in range(len(scouseobject.wsaa)):
# Get the relavent SAA dictionary
saa_dict = scouseobject.saa_dict[i]
# initialise the progress bar
if verbose:
count=0
progress_bar = print_to_terminal(stage='s3', step='init',
length=len(saa_dict.keys()),
var=scouseobject.wsaa[i])
for _key in saa_dict.keys():
prep_spec(_key, saa_dict, njobs, scouseobject)
if verbose:
progress_bar.update()
if verbose:
print("")
def prep_spec(_key, saa_dict, njobs, scouseobject):
"""
Prepares the spectra for automated fitting
Parameters
----------
_key : number
key for SAA dictionary entry - used to select the correct SAA
saa_dict : dictionary
dictionary of spectral averaging areas
njobs : number
number of cores used for the computation - prep spec is parallelised
scouseobject : Instance of the scousepy class
"""
# get the relavent SAA
SAA = saa_dict[_key]
# Initialise indiv spectra
indiv_spectra = {}
# We only care about the SAA's that are to be fit at this stage
if SAA.to_be_fit:
if np.size(SAA.indices_flat) != 0.0:
# Parallel
if njobs > 1:
args = [scouseobject, SAA]
inputs = [[k] + args for k in range(len(SAA.indices_flat))]
# Send to parallel_map
indiv_spec = parallel_map(get_indiv_spec,inputs,numcores=njobs)
# flatten the output from parallel map
merged_spec = [spec for spec in indiv_spec if spec is not None]
merged_spec = np.asarray(merged_spec)
for k in range(len(SAA.indices_flat)):
# Add the spectra to the dict
key = SAA.indices_flat[k]
indiv_spectra[key] = merged_spec[k]
else:
for k in range(len(SAA.indices_flat)):
key = SAA.indices_flat[k]
args = [scouseobject, SAA]
inputs = [[k] + args]
inputs = inputs[0]
indiv_spec = get_indiv_spec(inputs)
indiv_spectra[key] = indiv_spec
# add the spectra to the spectral averaging areas
add_indiv_spectra(SAA, indiv_spectra)
def get_indiv_spec(inputs):
"""
Returns a spectrum
Parameters
----------
inputs : list
list containing inputs to parallel map - contains the index of the
relavent spectrum, the scouseobject, and the SAA
"""
idx, scouseobject, SAA = inputs
# get the coordinates of the pixel based on the flattened index
_coords = np.unravel_index(SAA.indices_flat[idx],scouseobject.cube.shape[1:])
# create a pyspeckit spectrum
indiv_spec = spectrum(_coords, \
scouseobject.cube[:,_coords[0], _coords[1]].value, \
idx=SAA.indices_flat[idx], \
scouse=scouseobject)
return indiv_spec
def fit_indiv_spectra(scouseobject, saa_dict, wsaa, njobs=1,
spatial=False, verbose=False, stage=3):
"""
Automated fitting procedure for individual spectra
Parameters
----------
scouseobject : Instance of the scousepy class
saa_dict : dictionary
dictionary of spectral averaging areas
wsaa : number
width of the SAA
njobs : number (optional)
number of cores used for the computation - prep spec is parallelised
spatial : bool (optional)
not implemented yet
verbose : bool (optional)
verbose output
stage : number (optional)
indicates whether the fitting is being performed during stage 3 or 6
"""
if verbose:
if stage == 3:
progress_bar = print_to_terminal(stage='s3', step='fitting',
length=len(saa_dict.keys()),
var=wsaa)
else:
progress_bar = print_to_terminal(stage='s6', step='fitting',
length=len(saa_dict.keys()),
var=wsaa)
for _key in saa_dict.keys():
fitting_spec(_key, scouseobject, saa_dict, wsaa, njobs, spatial)
if verbose:
progress_bar.update()
if verbose:
print("")
def fitting_spec(_key, scouseobject, saa_dict, wsaa, njobs, spatial):
"""
The automated fitting process followed by scouse
Parameters
----------
_key : number
key for SAA dictionary entry - used to select the correct SAA
scouseobject : Instance of the scousepy class
saa_dict : dictionary
dictionary of spectral averaging areas
wsaa : number
width of the SAA
njobs : number
number of cores used for the computation - prep spec is parallelised
spatial : bool
not implemented yet
"""
# get the relavent SAA
SAA = saa_dict[_key]
# We only care about those locations we have SAA fits for.
if SAA.to_be_fit:
# Shhh
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# Generate a template spectrum
template_spectrum = generate_template_spectrum(scouseobject)
log.setLevel(old_log)
# Get the SAA model solution
parent_model = SAA.model
# Parallel
if njobs > 1:
if np.size(SAA.indices_flat) != 0.0:
args = [scouseobject, SAA, parent_model, template_spectrum]
inputs = [[k] + args for k in range(len(SAA.indices_flat))]
# Send to parallel_map
bfs = parallel_map(fit_a_spectrum, inputs, numcores=njobs)
merged_bfs = [core_bf for core_bf in bfs if core_bf is not None]
merged_bfs = np.asarray(merged_bfs)
for k in range(len(SAA.indices_flat)):
# Add the models to the spectra
key = SAA.indices_flat[k]
add_model_parent(SAA.indiv_spectra[key], merged_bfs[k,0])
add_model_dud(SAA.indiv_spectra[key], merged_bfs[k,1])
else:
# If njobs = 1 just cycle through
for k in range(len(SAA.indices_flat)):
key = SAA.indices_flat[k]
args = [scouseobject, SAA, parent_model, template_spectrum]
inputs = [[k] + args]
inputs = inputs[0]
bfs = fit_a_spectrum(inputs)
add_model_parent(SAA.indiv_spectra[key], bfs[0])
add_model_dud(SAA.indiv_spectra[key], bfs[1])
def generate_template_spectrum(scouseobject):
"""
Generate a template spectrum to be passed to the fitter. This will contain
some basic information that will be updated during the fitting process. This
is implemented because the parallelised fitting replaces the spectrum in
memory and things...break
Parameters
----------
scouseobject : Instance of the scousepy class
"""
x=scouseobject.xtrim
y=scouseobject.saa_dict[0][0].ytrim
rms=scouseobject.saa_dict[0][0].rms
return pyspeckit.Spectrum(data=y,
error=np.ones(len(y))*rms,
xarr=x,
doplot=False,
unit=scouseobject.cube.header['BUNIT'],
xarrkwargs={'unit':'km/s',
'refX': scouseobject.cube.wcs.wcs.restfrq*u.Hz,
'velocity_convention': 'radio',
},
verbose=False
)
def get_flux(scouseobject, indiv_spec):
"""
Returns flux for a given spectrum
Parameters
----------
scouseobject : Instance of the scousepy class
indiv_spec : Instance of the fit class
the spectrum to be fit, produced by prep spec
"""
y=scouseobject.cube[:,indiv_spec.coordinates[0],indiv_spec.coordinates[1]]
y=y[scouseobject.trimids]
return y
def get_spec(scouseobject, indiv_spec, template_spectrum):
"""
Here we update the template with values corresponding to the spectrum
we want to fit
Parameters
----------
scouseobject : Instance of the scousepy class
indiv_spec : pyspeckit spectrum
the spectrum to be fit, produced by prep spec
template_spectrum : pyspeckit spectrum
dummy spectrum to be updated
"""
y = get_flux(scouseobject, indiv_spec)
rms=indiv_spec.rms
template_spectrum.data = u.Quantity(y).value
template_spectrum.error = u.Quantity(np.ones(len(y))*rms).value
template_spectrum.specfit.spectofit = u.Quantity(y).value
template_spectrum.specfit.errspec = u.Quantity(np.ones(len(y))*rms).value
return template_spectrum
def fit_a_spectrum(inputs):
"""
Process used for fitting spectra. Returns a best-fit solution and a dud for
every spectrum.
Parameters
----------
inputs : list
list containing inputs to parallel map - contains the spectrum index,
the scouseobject, SAA, the best-fitting model solution to the SAA, and
the template spectrum
"""
idx, scouseobject, SAA, parent_model, template_spectrum = inputs
key = SAA.indices_flat[idx]
spec=None
# Shhh
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# update the template
spec = get_spec(scouseobject, SAA.indiv_spectra[key], template_spectrum)
log.setLevel(old_log)
# begin the fitting process
bf = fitting_process_parent(scouseobject, SAA, key, spec, parent_model)
# if the result is a zero component fit, create a dud spectrum
if bf.ncomps == 0.0:
dud = bf
else:
dud = fitting_process_duds(scouseobject, SAA, key, spec)
return [bf, dud]
def fitting_process_parent(scouseobject, SAA, key, spec, parent_model):
"""
Pyspeckit fitting of an individual spectrum using the parent SAA model
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
"""
# Check the model
happy = False
initfit = True
fit_dud = False
while not happy:
if np.all(np.isfinite(np.array(spec.flux))):
if initfit:
guesses = np.asarray(parent_model.params)
if np.sum(guesses) != 0.0:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
spec.specfit(interactive=False, \
clear_all_connections=True,\
xmin=scouseobject.ppv_vol[0], \
xmax=scouseobject.ppv_vol[1], \
fittype = scouseobject.fittype, \
guesses = guesses,\
verbose=False,\
use_lmfit=True)
log.setLevel(old_log)
modparnames = spec.specfit.fitter.parnames
modncomps = spec.specfit.npeaks
modparams = spec.specfit.modelpars
moderrors = spec.specfit.modelerrs
modrms = spec.error[0]
_inputs = [modparnames, [modncomps], modparams, moderrors, [modrms]]
happy, guesses = check_spec(scouseobject, parent_model, _inputs, happy)
initfit = False
else:
# If no satisfactory model can be found - fit a dud!
fit_dud=True
happy = True
else:
# If no satisfactory model can be found - fit a dud!
fit_dud = True
happy = True
if fit_dud:
bf = fitting_process_duds(scouseobject, SAA, key, spec)
else:
bf = fit(spec, idx=key, scouse=scouseobject)
return bf
def fitting_process_duds(scouseobject, SAA, key, spec):
"""
Fitting duds
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
"""
bf = fit(spec, idx=key, scouse=scouseobject, fit_dud=True,\
noise=SAA.indiv_spectra[key].rms, \
duddata=np.array(spec.flux))
return bf
def check_spec(scouseobject, parent_model, inputs, happy):
"""
This routine controls the fit quality.
Here we are going to check the output spectrum against user-defined
tolerance levels described in Henshaw et al. 2016 and against the SAA fit.
Parameters
----------
scouseobject : Instance of the scousepy class
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
inputs : list
contains various information about the model (see fitting_process_parent)
happy : bool
fitting stops when happy = True
"""
guesses = np.asarray(inputs[2])
condition_passed = np.zeros(3, dtype='bool')
condition_passed, guesses = check_rms(scouseobject, inputs, guesses,
condition_passed)
if condition_passed[0]:
condition_passed, guesses = check_dispersion(scouseobject, inputs,
parent_model, guesses,
condition_passed)
if (condition_passed[0]) and (condition_passed[1]):
condition_passed, guesses = check_velocity(scouseobject, inputs,
parent_model, guesses,
condition_passed)
if np.all(condition_passed):
if (inputs[1][0] == 1):
happy = True
else:
happy, guesses = check_distinct(scouseobject, inputs,
parent_model, guesses,
happy)
return happy, guesses
def unpack_inputs(inputs):
"""
Unpacks the input list
Parameters:
-----------
inputs : list
contains various information about the model (see fitting_process_parent)
"""
parnames = [pname.lower() for pname in inputs[0]]
nparams = np.size(parnames)
ncomponents = inputs[1][0]
params = inputs[2]
errors = inputs[3]
rms = inputs[4][0]
return parnames, nparams, ncomponents, params, errors, rms
def get_index(parnames, namelist):
"""
Searches for a particular parname in a list and returns the index of where
that parname appears
Parameters
----------
parnames : list
list of strings containing the names of the parameters in the pyspeckit
fit. This will vary depending on the input model so keep as general as
possibleself
namelist : list
list of various names used by pyspeckit for parameters in the model
"""
foundname = [pname in namelist for pname in parnames]
foundname = np.array(foundname)
idx = np.where(foundname==True)[0]
return np.asscalar(idx[0])
def check_rms(scouseobject, inputs, guesses, condition_passed):
"""
Check the rms of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
Notes
-----
I'm comparing one of the parameters in _peaknames against the rms value.
This isn't strictly correct for models other than Gaussian, since e.g. Tex
isn't equivalent to the amplitude of the model component. However, in the
absence of anything else to compare, I will leave this for now and think of
something better.
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['tex', 'amp', 'amplitude', 'peak', 'tant', 'tmb']
idx = get_index(parnames, namelist)
# Now check all components to see if they are above the rms threshold
for i in range(int(ncomponents)):
if (params[int((i*nparams)+idx)] < rms*scouseobject.tolerances[0]): # or \
#(params[int((i*nparams)+idx)] < errors[int((i*nparams)+idx)]*scouseobject.tolerances[0]):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[0]=False
else:
condition_passed[0]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_dispersion(scouseobject,inputs,parent_model,guesses,condition_passed):
"""
Check the fwhm of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
fwhmconv = 2.*np.sqrt(2.*np.log(2.))
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idx = get_index(parnames, namelist)
for i in range(int(ncomponents)):
# Find the closest matching component in the parent SAA model
diff = find_closest_match(i, nparams, ncomponents, params, parent_model)
idmin = np.where(diff == np.min(diff))[0]
idmin = idmin[0]
# Work out the relative change in velocity dispersion
relchange = params[int((i*nparams)+idx)]/parent_model.params[int((idmin*nparams)+idx)]
if relchange < 1.:
relchange = 1./relchange
# Does this satisfy the criteria
if (params[int((i*nparams)+idx)]*fwhmconv < scouseobject.cube.header['CDELT3']*scouseobject.tolerances[1]) or \
(relchange > scouseobject.tolerances[2]):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[1]=False
else:
condition_passed[1]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_velocity(scouseobject,inputs,parent_model,guesses,condition_passed):
"""
Check the centroid velocity of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['velocity', 'shift', 'centroid', 'center']
idxv = get_index(parnames, namelist)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idxd = get_index(parnames, namelist)
for i in range(int(ncomponents)):
# Find the closest matching component in the parent SAA model
diff = find_closest_match(i, nparams, ncomponents, params, parent_model)
idmin = np.where(diff == np.min(diff))[0]
idmin = idmin[0]
# Limits for tolerance
lower_lim = parent_model.params[int((idmin*nparams)+idxv)]-(scouseobject.tolerances[3]*parent_model.params[int((idmin*nparams)+idxd)])
upper_lim = parent_model.params[int((idmin*nparams)+idxv)]+(scouseobject.tolerances[3]*parent_model.params[int((idmin*nparams)+idxd)])
# Does this satisfy the criteria
if (params[(i*nparams)+idxv] < lower_lim) or \
(params[(i*nparams)+idxv] > upper_lim):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[2]=False
else:
condition_passed[2]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_distinct(scouseobject,inputs,parent_model,guesses,happy):
"""
Check to see if component pairs can be distinguished in velocity
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['tex', 'amp', 'amplitude', 'peak', 'tant', 'tmb']
idxp = get_index(parnames, namelist)
# Find where the peak is located in the parameter array
namelist = ['velocity', 'shift', 'centroid', 'center']
idxv = get_index(parnames, namelist)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idxd = get_index(parnames, namelist)
fwhmconv = 2.*np.sqrt(2.*np.log(2.))
intlist = [params[int((i*nparams)+idxp)] for i in range(int(ncomponents))]
velolist = [params[int((i*nparams)+idxv)] for i in range(int(ncomponents))]
displist = [params[int((i*nparams)+idxd)] for i in range(int(ncomponents))]
diff = np.zeros(int(ncomponents))
validvs = np.ones(int(ncomponents))
for i in range(int(ncomponents)):
if validvs[i] != 0.0:
# Calculate the velocity difference between all components
for j in range(int(ncomponents)):
diff[j] = abs(velolist[i]-velolist[j])
diff[(diff==0.0)] = np.nan
# Find the minimum difference (i.e. the adjacent component)
idmin = np.where(diff==np.nanmin(diff))[0]
idmin = idmin[0]
adjacent_intensity = intlist[idmin]
adjacent_velocity = velolist[idmin]
adjacent_dispersion = displist[idmin]
# Get the separation between each component and its neighbour
sep =
|
np.abs(velolist[i] - adjacent_velocity)
|
numpy.abs
|
import math
import numpy as np
from scipy import signal
def gaussian_pdf_1d(mu, sigma, length):
'''Generate one dimension Gaussian distribution
- input mu: the mean of pdf
- input sigma: the standard derivation of pdf
- input length: the size of pdf
- output: a row vector represents one dimension Gaussian distribution
'''
# create an array
half_len = length / 2
if np.remainder(length, 2) == 0:
ax = np.arange(-half_len, half_len, 1)
else:
ax = np.arange(-half_len, half_len + 1, 1)
ax = ax.reshape([-1, ax.size])
denominator = sigma * np.sqrt(2 * np.pi)
nominator = np.exp( -np.square(ax - mu) / (2 * sigma * sigma) )
return nominator / denominator
def gaussian_pdf_2d(mu, sigma, row, col):
'''Generate two dimensional Gaussian distribution
- input mu: the mean of pdf
- input sigma: the standard derivation of pdf
- input row: length in row axis
- input column: length in column axis
- output: a 2D matrix represents two dimensional Gaussian distribution
'''
# create row vector as 1D Gaussian pdf
g_row = gaussian_pdf_1d(mu, sigma, row)
# create column vector as 1D Gaussian pdf
g_col = gaussian_pdf_1d(mu, sigma, col).transpose()
return signal.convolve2d(g_row, g_col, mode='full')
def get_derivatives(gray, sigma=0.4):
'''Compute gradient information of the input grayscale image
- Input gray: H x W matrix as image
- Output mag: H x W matrix represents the magnitude of derivatives
- Output magx: H x W matrix represents the magnitude of derivatives along x-axis
- Output magy: H x W matrix represents the magnitude of derivatives along y-axis
- Output ori: H x W matrix represents the orientation of derivatives
'''
mu = 0
sigma = sigma # 0.4, less sigma, more blurred edge
Ga = gaussian_pdf_2d(mu, sigma, 5, 5)
# Filter
dx = np.array([[1, 0, -1]]) # Horizontal
dy = np.array([[1], [0], [-1]]) # Vertical
#dx = np.array([[1, -1]]) # Horizontal
#dy = np.array([[1],[-1]]) # Vertical
# Convolution of image
#Gx = np.convolve(Ga, dx, 'same')
#Gy = np.convolve(Ga, dy, 'same')
#lx = np.convolve(I_gray, Gx, 'same')
#ly = np.convolve(I_gray, Gy, 'same')
Gx = signal.convolve2d(Ga, dx, mode='same', boundary='fill')
Gy = signal.convolve2d(Ga, dy, mode='same', boundary='fill')
lx = signal.convolve2d(gray, Gx, mode='same', boundary='fill')
ly = signal.convolve2d(gray, Gy, mode='same', boundary='fill')
# Magnitude
mag = np.sqrt(lx*lx+ly*ly)
# Angle
angle =
|
np.arctan(ly/lx)
|
numpy.arctan
|
"""
desisim.spec_qa.redshifts
=========================
Module to run high_level QA on a given DESI run
Written by JXP on 3 Sep 2015
"""
from __future__ import print_function, absolute_import, division
import matplotlib
# matplotlib.use('Agg')
import numpy as np
import sys, os, pdb, glob
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.io import fits
from astropy.table import Table, vstack, hstack, MaskedColumn, join
try:
from scipy import constants
C_LIGHT = constants.c/1000.0
except TypeError: # This can happen during documentation builds.
C_LIGHT = 299792458.0/1000.0
import desispec.io
from .utils import elg_flux_lim, get_sty_otype, catastrophic_dv, match_otype
from desiutil.log import get_logger, DEBUG
def calc_dz(simz_tab):
'''Calcualte deltaz/(1+z) for a given simz_tab
'''
dz = (simz_tab['Z']-simz_tab['TRUEZ'])/(1+simz_tab['TRUEZ'])
#
return dz
def calc_dzsig(simz_tab):
'''Calcualte deltaz/sig(z) for a given simz_tab
'''
dzsig = (simz_tab['Z']-simz_tab['TRUEZ'])/simz_tab['ZERR']
#
return dzsig
def calc_obj_stats(simz_tab, objtype):
"""Calculate redshift statistics for a given objtype
Parameters
----------
simz_tab : Table
TODO: document this
objtype : str
Object type, e.g. 'ELG', 'LRG'
Returns
-------
stat_dict : dict
Survey results for a given object type
"""
# zstats
ngood, nfail, nmiss, nlost = zstats(simz_tab, objtype=objtype, count=True, survey=True)
ntot = ngood+nfail+nmiss+nlost
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(simz_tab, objtype=objtype)
# Init
stat_dict = {} #dict(OBJTYPE=objtype)
# N targets (irrespective of the Survey)
stat_dict['N_TARG'] = ntot
# Number of objects with Redshift Analysis
stat_dict['N_zA'] = np.count_nonzero(z_mask & objtype_mask)
# Redshift measured (includes catastrophics)
# For ELGs, cut on OII_Flux too
stat_dict['N_SURVEY'] = np.count_nonzero(survey_mask & objtype_mask & z_mask)
# Catastrophic failures in the survey
stat_dict['N_CAT'] = nfail
if stat_dict['N_SURVEY'] > 0:
stat_dict['CAT_RATE'] = float(nfail)/stat_dict['N_SURVEY']
else:
stat_dict['CAT_RATE'] = 0
# Good redshifts in the survey
stat_dict['N_GOODZ'] = ngood
# Redshift with ZWARN=0 in the survey
stat_dict['N_ZWARN0'] = ngood+nfail
# Survey Efficiency
if stat_dict['N_SURVEY'] > 0:
stat_dict['EFF'] = float(ngood)/float(stat_dict['N_SURVEY'])
else:
stat_dict['EFF'] = 1.
# Purity
if stat_dict['N_ZWARN0'] > 0:
stat_dict['PURITY'] = float(ngood)/float(stat_dict['N_ZWARN0'])
else:
stat_dict['PURITY'] = 1.
# delta z
gdz_tab = slice_simz(simz_tab, objtype=objtype, survey=True, goodz=True, all_zwarn0=True, z_analy=True)
dz = calc_dz(gdz_tab)
if len(dz) == 0:
dz = np.zeros(1)
not_nan = np.isfinite(dz)
stat_dict['MEAN_DZ'] = float(np.mean(dz[not_nan]))
stat_dict['MEDIAN_DZ'] = float(np.median(dz[not_nan]))
stat_dict['RMS_DZ'] = float(np.std(dz[not_nan]))
# Return
return stat_dict
def spectype_confusion(simz_tab, zb_tab=None):
""" Generate a Confusion Matrix for spectral types
See the Confusion_matrix_spectypes Notebook in docs/nb for an example
Parameters
----------
simz_tab : Table
Truth table; may be input from truth.fits
zb_tab : Table (optional)
zcatalog/zbest table; may be input from zcatalog-mini.fits
If provided, used to match the simz_tab to the zbest quantities
Returns
-------
simz_tab : astropy.Table
Merged table of simpsec data
results : dict
Nested dict.
First key is the TRUESPECTYPE
Second key is the SPECTYPE
e.g. results['QSO']['QSO'] reports the number of True QSO classified as QSO
results['QSO']['Galaxy'] reports the number of True QSO classified as Galaxy
"""
# Process simz_tab as need be
if zb_tab is not None:
match_truth_z(simz_tab, zb_tab, mini_read=True)
# Cut down to those processed with the Redshift fitter
measured_z = simz_tab['ZWARN'].mask == False
cut_simz = simz_tab[measured_z]
# Strip those columns
strip_ttypes = np.char.rstrip(cut_simz['TRUESPECTYPE'])
strip_stypes = np.char.rstrip(cut_simz['SPECTYPE'])
# All TRUE, SPEC types
ttypes = np.unique(strip_ttypes)
stypes = np.unique(strip_stypes)
# Init
results = {}
for ttype in ttypes:
results[ttype] = {}
# Fill
for ttype in ttypes:
itrue = strip_ttypes == ttype
# Init correct answer in case there are none
results[ttype][ttype] = 0
# import pdb; pdb.set_trace()
for stype in stypes:
results[ttype][stype] = np.sum(strip_stypes[itrue] == stype)
# Return
return results
def find_zbest_files(fibermap_data):
from desimodel.footprint import radec2pix
# Init
zbest_files = []
# Search for zbest files with healpy
ra_targ = fibermap_data['TARGET_RA'].data
dec_targ = fibermap_data['TARGET_DEC'].data
# Getting some NAN in RA/DEC
good = np.isfinite(ra_targ) & np.isfinite(dec_targ)
pixels = radec2pix(64, ra_targ[good], dec_targ[good])
uni_pixels = np.unique(pixels)
for uni_pix in uni_pixels:
zbest_files.append(desispec.io.findfile('zbest', groupname=uni_pix, nside=64))
# Return
return zbest_files
def load_z(fibermap_files, zbest_files=None, outfil=None):
'''Load input and output redshift values for a set of exposures
Parameters
----------
fibermap_files: list
List of fibermap files; None of these should be calibration..
zbest_files: list, optional
List of zbest output files
Slurped from fibermap info if not provided
outfil: str, optional
Output file for the table
Returns
-------
simz_tab: astropy.Table
Merged table of simpsec data
zb_tab: astropy.Table
Merged table of zbest output
'''
# imports
log = get_logger()
# Init
if zbest_files is None:
flag_load_zbest = True
zbest_files = []
else:
flag_load_zbest = False
# Load up fibermap and simspec tables
fbm_tabs = []
sps_tabs = []
for fibermap_file in fibermap_files:
# zbest?
if flag_load_zbest:
fibermap_data = desispec.io.read_fibermap(fibermap_file)
zbest_files += find_zbest_files(fibermap_data)
log.info('Reading: {:s}'.format(fibermap_file))
# Load simspec (for fibermap too!)
simspec_file = fibermap_file.replace('fibermap','simspec')
sps_hdu = fits.open(simspec_file)
# Make Tables
fbm_tabs.append(Table(sps_hdu['FIBERMAP'].data,masked=True))
truth = Table(sps_hdu['TRUTH'].data,masked=True)
if 'TRUTH_ELG' in sps_hdu:
truth_elg = Table(sps_hdu['TRUTH_ELG'].data)
truth = join(truth, truth_elg['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
truth['OIIFLUX'] = 0.0
sps_tabs.append(truth)
sps_hdu.close()
# Stack + Sort
fbm_tab = vstack(fbm_tabs)
sps_tab = vstack(sps_tabs)
del fbm_tabs, sps_tabs
fbm_tab.sort('TARGETID')
sps_tab.sort('TARGETID')
# Add the version number header keywords from fibermap_files[0]
hdr = fits.getheader(fibermap_files[0].replace('fibermap', 'simspec'))
for key, value in sorted(hdr.items()):
if key.startswith('DEPNAM') or key.startswith('DEPVER'):
fbm_tab.meta[key] = value
# Drop to unique
univ, uni_idx = np.unique(np.array(fbm_tab['TARGETID']),return_index=True)
fbm_tab = fbm_tab[uni_idx]
sps_tab = sps_tab[uni_idx]
# Combine
assert np.all(fbm_tab['TARGETID'] == sps_tab['TARGETID'])
keep_colnames = list()
for colname in sps_tab.colnames:
if colname not in fbm_tab.colnames:
keep_colnames.append(colname)
simz_tab = hstack([fbm_tab,sps_tab[keep_colnames]],join_type='exact')
# Cleanup some names
#simz_tab.rename_column('OBJTYPE_1', 'OBJTYPE')
#simz_tab.rename_column('OBJTYPE_2', 'TRUETYPE')
# Update QSO naming
qsol = np.where( match_otype(simz_tab, 'QSO') & (simz_tab['TRUEZ'] >= 2.1))[0]
simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'
qsot = np.where( match_otype(simz_tab, 'QSO') & (simz_tab['TRUEZ'] < 2.1))[0]
simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'
# Load up zbest files
zb_tabs = []
for zbest_file in zbest_files:
try:
zb_hdu = fits.open(zbest_file)
except FileNotFoundError:
log.error("zbest file {} not found".format(zbest_file))
else:
zb_tabs.append(Table(zb_hdu[1].data))
# Stack
zb_tab = vstack(zb_tabs)
univ, uni_idx = np.unique(np.array(zb_tab['TARGETID']),return_index=True)
zb_tab = zb_tab[uni_idx]
# Return
return simz_tab, zb_tab
def match_truth_z(simz_tab, zb_tab, mini_read=False, outfil=None):
""" Match truth and zbest tables
:param simz_tab: astropy.Table; Either generated from load_z() or read from disk via 'truth.fits'
:param zb_tab: astropy.Table; Either generated from load_z() or read from disk via 'zcatalog-mini.fits'
:param mini_read: bool, optional; Tables were read from the summary tables written to disk
:param outfil: str, optional
:return: simz_tab: modified in place
"""
nsim = len(simz_tab)
# Match up
sim_id = np.array(simz_tab['TARGETID'])
z_id = np.array(zb_tab['TARGETID'])
inz = np.in1d(z_id,sim_id,assume_unique=True)
ins = np.in1d(sim_id,z_id,assume_unique=True)
z_idx = np.arange(z_id.shape[0])[inz]
sim_idx = np.arange(sim_id.shape[0])[ins]
assert np.array_equal(sim_id[sim_idx],z_id[z_idx])
# Fill up
ztags = ['Z','ZERR','ZWARN','SPECTYPE']
# This is for truth and zcat tables read from disk as opposed to the fibermap files
if mini_read:
ztags += ['DESI_TARGET']
# And clean up the QSO names
stypes = np.char.rstrip(simz_tab['TEMPLATETYPE'])
qsol = np.where((stypes == 'QSO') & (simz_tab['TRUEZ'] >= 2.1))[0]
simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'
qsot = np.where((stypes == 'QSO') & (simz_tab['TRUEZ'] < 2.1))[0]
simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'
# Generate the new columns
new_clms = []
mask = np.array([True]*nsim)
mask[sim_idx] = False
for kk,ztag in enumerate(ztags):
# Generate a MaskedColumn
new_clm = MaskedColumn([zb_tab[ztag][z_idx[0]]]*nsim, name=ztag, mask=mask)
#name=new_tags[kk], mask=mask)
# Fill
new_clm[sim_idx] = zb_tab[ztag][z_idx]
# Append
new_clms.append(new_clm)
# Add columns
simz_tab.add_columns(new_clms)
# Write?
if outfil is not None:
simz_tab.write(outfil,overwrite=True)
return
def obj_requirements(zstats, objtype):
"""Assess where a given objtype passes the requirements
Requirements from Doc 318 (August 2014)
Parameters
----------
zstats : Object
This parameter is not documented.
objtype : str
Object type, e.g. 'ELG', 'LRG'
Returns
-------
dict
Pass/fail dict
"""
log = get_logger()
pf_dict = {}
#
all_dict=dict(ELG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.90},
LRG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
BGS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
MWS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
QSO_T={'RMS_DZ':0.0025, 'MEAN_DZ': 0.0004, 'CAT_RATE': 0.05, 'EFF': 0.90},
QSO_L={'RMS_DZ':0.0025, 'CAT_RATE': 0.02, 'EFF': 0.90})
req_dict = all_dict[objtype]
tst_fail = ''
passf = str('PASS')
for key in req_dict:
ipassf = str('PASS')
if key in ['EFF']: # Greater than requirement
if zstats[key] < req_dict[key]:
ipassf = str('FAIL')
tst_fail = tst_fail+key+'-'
log.warning('{:s} failed requirement {:s}: {} < {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
log.debug('{:s} passed requirement {:s}: {} >= {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
if zstats[key] > req_dict[key]:
ipassf = str('FAIL')
tst_fail = tst_fail+key+'-'
log.warning('{:s} failed requirement {:s}: {} > {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
log.debug('{:s} passed requirement {:s}: {} <= {}'.format(objtype, key, zstats[key], req_dict[key]))
# Update
pf_dict[key] = ipassf
if ipassf == str('FAIL'):
passf = str('FAIL')
if passf == str('FAIL'):
tst_fail = tst_fail[:-1]
# log.warning('OBJ={:s} failed tests {:s}'.format(objtype,tst_fail))
#
#pf_dict['FINAL'] = passf
return pf_dict, passf
def zstats(simz_tab, objtype=None, dvlimit=None, count=False, survey=False):
""" Perform statistics on the input truth+z table
good = Satisfies dv criteria and ZWARN==0
fail = Fails dv criteria with ZWARN==0 (catastrophic failures)
miss = Satisfies dv criteria but ZWARN!=0 (missed opportunities)
lost = Fails dv criteria and ZWARN!=0 (lost, but at least we knew it)
Args:
simz_tab:
objtype:
dvlimit: float, optional -- Over-rides object specific dv limits
count: bool, optional
survey: bool, optional -- Restrict to targets meeting the Survey criteria (e.g. ELG flux)
Returns:
if count=True: just the raw counts of each category :: ngood, nfail, nmiss, nlost
else: percentile of each relative to ntot, and ntot
"""
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(
simz_tab, dvlimit=dvlimit, objtype=objtype)
# Score-card
good = zwarn_mask & dv_mask & objtype_mask & z_mask
cat = zwarn_mask & (~dv_mask) & objtype_mask & z_mask
miss = (~zwarn_mask) & dv_mask & objtype_mask & z_mask
lost = (~zwarn_mask) & (~dv_mask) & objtype_mask & z_mask
# Restrict to the Survey design?
tot_msk = objtype_mask & z_mask
if survey:
good &= survey_mask
cat &= survey_mask
miss &= survey_mask
lost &= survey_mask
tot_msk &= survey_mask
#
ngood = np.count_nonzero(good)
nfail = np.count_nonzero(cat)
nmiss = np.count_nonzero(miss)
nlost = np.count_nonzero(lost)
ntot = np.count_nonzero(tot_msk)
# Check
assert(ntot == ngood+nfail+nmiss+nlost)
# Return
if count:
return ngood, nfail, nmiss, nlost
elif ntot == 0:
return (np.nan, np.nan, np.nan, np.nan, 0)
else:
return 100*ngood/ntot, 100*nfail/ntot, 100*nmiss/ntot, 100*nlost/ntot, ntot
def criteria(simz_tab, objtype=None, dvlimit=None):
"""Analyze the input table for various criteria
Parameters
----------
simz_tab : Table
objtype : str, optional -- Restrict analysis to a specific object type
Returns
-------
objtype_mask : ndarray
Match to input objtype (if any given)
z_mask : ndarray
Analyzed by the redshift analysis software
survey_mask : ndarray
Part of the DESI survey (not filler)
dv_mask : ndarray
Satisfies the dv criterion; Either specific to each objtype
or using an input dvlimit
zwarn_mask : ndarray
ZWARN=0
"""
# Init
nrow = len(simz_tab)
stypes = np.char.rstrip(simz_tab['TEMPLATETYPE'].astype(str))
# Object type
if objtype is None:
objtype_mask = np.array([True]*nrow)
else:
if objtype in ['STAR', 'WD', 'QSO']:
objtype_mask = stypes == objtype
else:
objtype_mask = match_otype(simz_tab, objtype) # Use DESI_TARGET when possible
# Redshift analysis
z_mask = simz_tab['Z'].mask == False # Not masked in Table
# Survey
survey_mask = (simz_tab['Z'].mask == False)
elg = np.where(match_otype(simz_tab, 'ELG') & survey_mask)[0]
if len(elg) > 0:
elg_mask = elg_flux_lim(simz_tab['TRUEZ'][elg],
simz_tab['OIIFLUX'][elg])
# Update
survey_mask[elg[~elg_mask]] = False
# zwarn -- Masked array
zwarn_mask = np.array([False]*nrow)
idx = np.where((simz_tab['ZWARN'] == 0) & (simz_tab['ZWARN'].mask == False))[0]
zwarn_mask[idx] = True
# Catastrophic/Good (This gets a bit more messy...)
dv_mask = np.array([True]*nrow)
for obj in np.unique(stypes):
if obj in ['ELG','LRG','QSO_L','QSO_T', 'BGS', 'MWS']: # Use DESI_TARGET when possible
omask = np.where(match_otype(simz_tab, obj))[0] # & (simz_tab['ZWARN']==0))[0]
else:
omask = np.where(stypes == obj)[0]
if dvlimit is None:
try:
dv = catastrophic_dv(obj) # km/s
except:
dv = 1000.
else:
dv = dvlimit
dz = calc_dz(simz_tab[omask]) # dz/1+z
cat = np.where(np.abs(dz)*C_LIGHT > dv)[0]
dv_mask[omask[cat]] = False
# Return
return objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask
def slice_simz(simz_tab, objtype=None, z_analy=False, survey=False,
catastrophic=False, goodz=False, all_zwarn0=False, **kwargs):
"""Slice input simz_tab in one of many ways
Parameters
----------
z_analy : bool, optional
redshift analysis required?
all_zwarn0 : bool, optional
Ignores catastrophic failures in the slicing to return
all sources with ZWARN==0
survey : bool, optional
Only include objects that satisfy the Survey requirements
e.g. ELGs with sufficient OII_flux
catastrophic : bool, optional
Restrict to catastropic failures
goodz : bool, optional
Restrict to good redshifts
all_zwarn0 : bool, optional
Restrict to ZWARN=0 cases
**kwargs : passed to criteria
Returns
-------
simz_table : Table cut by input parameters
"""
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(
simz_tab, objtype=objtype, **kwargs)
# Slice me
final_mask = objtype_mask
if z_analy:
final_mask &= z_mask
if survey:
final_mask &= survey_mask
if catastrophic:
final_mask &= (~dv_mask)
final_mask &= zwarn_mask # Must also have ZWARN=0
if goodz:
final_mask &= dv_mask
final_mask &= zwarn_mask
if all_zwarn0:
final_mask &= zwarn_mask
# Return
return simz_tab[final_mask]
def obj_fig(simz_tab, objtype, summ_stats, outfile=None):
"""Generate QA plot for a given object type
"""
from astropy.stats import sigma_clip
logs = get_logger()
gdz_tab = slice_simz(simz_tab,objtype=objtype, survey=True,goodz=True, all_zwarn0=True)
if objtype == 'ELG':
allgd_tab = slice_simz(simz_tab,objtype=objtype, survey=False,goodz=True, all_zwarn0=True)
if len(gdz_tab) <= 1:
logs.info("Not enough objects of type {:s} for QA".format(objtype))
return
# Plot
sty_otype = get_sty_otype()
fig = plt.figure(figsize=(8, 6.0))
gs = gridspec.GridSpec(2,2)
# Title
fig.suptitle('{:s}: Summary'.format(sty_otype[objtype]['lbl']),
fontsize='large')
# Offset
for kk in range(4):
yoff = 0.
ax= plt.subplot(gs[kk])
if kk == 0:
yval = calc_dzsig(gdz_tab)
ylbl = (r'$(z_{\rm red}-z_{\rm true}) / \sigma(z)$')
ylim = 5.
# Stats with clipping
clip_y = sigma_clip(yval, sigma=5.)
rms = np.std(clip_y)
redchi2 = np.sum(clip_y**2)/np.sum(~clip_y.mask)
#
xtxt = 0.05
ytxt = 1.0
for req_tst in ['EFF','CAT_RATE']:
ytxt -= 0.12
if summ_stats[objtype]['REQ_INDIV'][req_tst] == 'FAIL':
tcolor='red'
else:
tcolor='green'
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(req_tst,
summ_stats[objtype][req_tst]), color=tcolor,
transform=ax.transAxes, ha='left', fontsize='small')
# Additional
ytxt -= 0.12
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format('RMS:', rms),
color='black', transform=ax.transAxes, ha='left', fontsize='small')
ytxt -= 0.12
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(r'$\chi^2_\nu$:',
redchi2), color='black', transform=ax.transAxes,
ha='left', fontsize='small')
else:
yval = calc_dz(gdz_tab)
if kk == 1:
ylbl = (r'$(z_{\rm red}-z_{\rm true}) / (1+z)$')
else:
ylbl = r'$\delta v_{\rm red-true}$ [km/s]'
ylim = max(5.*summ_stats[objtype]['RMS_DZ'],1e-5)
if (np.median(summ_stats[objtype]['MEDIAN_DZ']) >
summ_stats[objtype]['RMS_DZ']):
yoff = summ_stats[objtype]['MEDIAN_DZ']
if kk==1:
# Stats
xtxt = 0.05
ytxt = 1.0
dx = ((ylim/2.)//0.0001 +1)*0.0001
ax.xaxis.set_major_locator(plt.MultipleLocator(dx))
for stat in ['RMS_DZ','MEAN_DZ', 'MEDIAN_DZ']:
ytxt -= 0.12
try:
pfail = summ_stats[objtype]['REQ_INDIV'][stat]
except KeyError:
tcolor='black'
else:
if pfail == 'FAIL':
tcolor='red'
else:
tcolor='green'
ax.text(xtxt, ytxt, '{:s}: {:.5f}'.format(stat,
summ_stats[objtype][stat]), color=tcolor,
transform=ax.transAxes, ha='left', fontsize='small')
# Histogram
if kk < 2:
binsz = ylim/10.
#i0, i1 = int( np.min(yval) / binsz) - 1, int( np.max(yval) / binsz) + 1
i0, i1 = int(-ylim/binsz) - 1, int( ylim/ binsz) + 1
rng = tuple( binsz*np.array([i0,i1]) )
nbin = i1-i0
# Histogram
hist, edges = np.histogram(yval, range=rng, bins=nbin)
xhist = (edges[1:] + edges[:-1])/2.
#ax.hist(xhist, color='black', bins=edges, weights=hist)#, histtype='step')
ax.hist(xhist, color=sty_otype[objtype]['color'], bins=edges, weights=hist)#, histtype='step')
ax.set_xlabel(ylbl)
ax.set_xlim(-ylim, ylim)
else:
if kk == 2:
lbl = r'$z_{\rm true}$'
xval = gdz_tab['TRUEZ']
xmin,xmax=np.min(xval),np.max(xval)
dx = np.maximum(1,(xmax-xmin)//0.5)*0.1
ax.xaxis.set_major_locator(plt.MultipleLocator(dx))
#xmin,xmax=0.6,1.65
elif kk == 3:
if objtype == 'ELG':
lbl = r'[OII] Flux ($10^{-16}$)'
#xval = gdz_tab['OIIFLUX']*1e16
xval = allgd_tab['OIIFLUX']*1e16
yval = calc_dz(allgd_tab)
# Avoid NAN
gdy = np.isfinite(yval)
xval = xval[gdy]
yval = yval[gdy]
xmin,xmax=0.5,20
ax.set_xscale("log", nonposx='clip')
elif objtype == 'QSO':
lbl = 'g (Mag)'
xval = 22.5 - 2.5 * np.log10(gdz_tab['FLUX_G'])
xmin,xmax=np.min(xval),np.max(xval)
else:
lbl = 'r (Mag)'
xval = 22.5 - 2.5 * np.log10(gdz_tab['FLUX_R'])
xmin,xmax=np.min(xval),
|
np.max(xval)
|
numpy.max
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 1