prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
"""
Functions read in monthly data from the 6 AMIP experiments.
Data is available over the 1979-2016 period (38 years) and sorted by month (12).
The AMIP simulations use SC-WACCM4 with historical forcings and RCP 4.5.
The experiment with all forcings is called AMQS. Note that the first year
(1978) is removed due to model spin-up.
Notes
-----
Author : <NAME>
Date : 25 March 2020
Usage
-----
[1] readAMIP6(variable,experiment,level,detrend,sliceeq)
"""
def readAMIP6(variableq,experiment,level,detrend,sliceeq,period):
"""
Function reads monthly data from all 6 AMIP experiment
Parameters
----------
variableq : string
variable name to read
experiment : string
experiment name (CSST, CSIC, AMIP, AMS, AMQ, AMQS)
level : string
Height of variable (surface or profile)
detrend : binary
True/False whether to remove a linear trend at all grid points
sliceeq : binary
True/False whether to slice at the equator for only northern hemisphere
period : string
Time of analysis
Returns
-------
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
time : 1d numpy array
standard time (months since 1978-1-15, 00:00:00)
lev : 1d numpy array
levels (17)
var : 5d numpy array or 6d numpy array
[ensemble,year,month,lat,lon] or [ensemble,year,month,level,lat,lon]
Usage
-----
lat,lon,time,lev,var = readAMIP6(variableq,experiment,level,detrend)
"""
print('\n>>> Using readAMIP6 function! \n')
###########################################################################
###########################################################################
###########################################################################
### Import modules
import numpy as np
from netCDF4 import Dataset
import calc_Detrend as DT
import calc_Utilities as UT
### Declare knowns
ensembles = 10
months = 12
years = np.arange(1979,2016+1,1)
### Directory for experiments (remote server - Seley)
directorydata = '/seley/zlabe/simu/'
###########################################################################
###########################################################################
variable = variableq
###########################################################################
###########################################################################
###########################################################################
### Read in lat,lon,time from known file
if level == 'surface': # 3d variables
dataq = Dataset(directorydata + '%s1/monthly/T2M_1978-2016.nc' % experiment)
time = dataq.variables['time'][12:]
lev = 'surface'
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
###########################################################################
###########################################################################
if sliceeq == False:
### Create empty variable
varq = np.empty((ensembles,time.shape[0],
lat.shape[0],lon.shape[0]))
varq[:,:,:,:] = np.nan ### fill with nans
elif sliceeq == True:
### Slice for Northern Hemisphere
latq = np.where(lat >= 0)[0]
lat = lat[latq]
### Create empty variable
varq = np.empty((ensembles,time.shape[0],
lat.shape[0],lon.shape[0]))
varq[:,:,:,:] = np.nan ### fill with nans
print('SLICE for Northern Hemisphere!')
else:
print(ValueError('Selected wrong slicing!'))
###########################################################################
###########################################################################
elif level == 'profile': # 4d variables
dataq = Dataset(directorydata + '%s1/monthly/TEMP_1978-2016.nc' % experiment)
time = dataq.variables['time'][12:]
lev = dataq.variables['level'][:]
lat = dataq.variables['latitude'][:]
lon = dataq.variables['longitude'][:]
dataq.close()
###########################################################################
###########################################################################
if sliceeq == False:
### Create empty variable
varq = np.empty((ensembles,time.shape[0],lev.shape[0],
lat.shape[0],lon.shape[0]))
varq[:,:,:,:,:] = np.nan ### fill with nans
elif sliceeq == True:
### Slice for Northern Hemisphere
latq =
|
np.where(lat >= 0)
|
numpy.where
|
import pandas as pd
import math
import numpy as np
def matchCheck(colNo, weightl, impactl):
# print(colNo)
# print(weightl)
# print(impactl)
if colNo != weightl or colNo != impactl or impactl != weightl:
raise Exception("Number of weights, number of impacts and number of columns (from 2nd to last columns) must be same")
def checkNumeric(df):
columns = list(df)[1:]
for col in columns:
if not pd.api.types.is_integer_dtype(df[col].dtypes) and not pd.api.types.is_float_dtype(df[col].dtypes) :
# print()
raise Exception("From 2nd to last columns must contain numeric values only.")
return True
def validateWeight(weights):
for wt in weights:
r_wt = wt.replace(".","",1)
if wt.isnumeric() == False:
raise Exception("weights must be separated by ','")
def validateImpact(impacts):
# print(impacts)
for imp in impacts:
# print(imp)
if imp!='-' and imp!='+':
raise Exception("Impacts must be either +ve or -ve")
def euclidean(series,value):
return math.sqrt(sum((series-value)**2))
def topsis(df,weights,impacts):
columns = list(df)[1:]
df_norm = pd.DataFrame()
i = 0
Vp = []
Vn = []
for col in columns:
div = den(df[col])
df_norm[col] = df[col].apply(lambda x: (x/div)*weights[i])
if impacts[i] == '+':
Vp.append(df_norm[col].max())
Vn.append(df_norm[col].min())
else:
Vn.append(df_norm[col].max())
Vp.append(df_norm[col].min())
i+=1
score = []
for r in range(0,len(df_norm)):
Sp = euclidean(df_norm.iloc[r,:],Vp)
Sn = euclidean(df_norm.iloc[r,:],Vn)
# print(Sp,Sn)
sc = Sn/(Sp+Sn)
score.append(sc)
# print(score)
# print(df_norm)
# print(Vp)
# print(Vn)
score = np.array(score)
indices = np.sort(score)
df["P"] = score
df["Score"] = (df['P'].rank(method='max', ascending=False))
def performTOPSIS(df, weights_str, impacts_str):
weights = weights_str.split(",")
impacts = impacts_str.split(",")
validateWeight(weights)
validateImpact(impacts)
matchCheck(df.shape[1]-1,len(weights), len(impacts))
checkNumeric(df)
new_df = topsis(df,
|
np.float_(weights)
|
numpy.float_
|
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
cmap = cm.ScalarMappable(colors.Normalize(1, 5200), cm.viridis)
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
#will want to remove old when the updates come in from Katie
#normal =[]
#fast=[]
#obsDist = []
#normal_03 =[]
#fast_03=[]
#obsDist_03 = []
#normal_1 =[]
#fast_1 =[]
#obsDist_1 = []
#normal_10 =[]
#fast_10=[]
#obsDist_10 = []
#normal_30 =[]
#fast_30=[]
#obsDist_30 = []
#normal_100 =[]
#fast_100 =[]
#obsDist_100 = []
#normal_1000 =[]
#fast_1000 =[]
#obsDist_1000 = []
#normal_overall =[]
#fast_overall=[]
#obsDist_overall = []
#normal_overall_03 =[]
#fast_overall_03=[]
#obsDist_overall_03 = []
#normal_overall_1 =[]
#fast_overall_1 =[]
#obsDist_overall_1 = []
#normal_overall_10 =[]
#fast_overall_10=[]
#obsDist_overall_10 = []
#normal_overall_30 =[]
#fast_overall_30=[]
#obsDist_overall_30 = []
#normal_overall_100 =[]
#fast_overall_100 =[]
#obsDist_overall_100 = []
#normal_overall_1000=[]
#fast_overall_1000 =[]
#obsDist_overall_1000 = []
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
colorvalue_normal = []
colorvalue_fast = []
colorvalue_obsDist = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
for filenormal_ in sorted(allFiles_normal):
filename1 = filenormal_[60:]
fileid1 = filename1.strip('output_file.csv')
colorvalue1 = int(fileid1)
colorvalue_normal.append(colorvalue1)
print ("I'm starting " + fileid1)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri1 = datnormal1["NstarsTRILEGAL"][0]
print("N_tri1 = ", N_tri1)
m1hAll01, m1b1 = np.histogram(datnormal["m1"], bins=mbins)
dm11 = np.diff(m1b1)
m1val1 = m1b1[:-1] + dm11/2.
fb1 = np.sum(m1hAll01*dm11*fbFit(m1val1))
N_mult1 = N_tri1*fb1
##########################################################
PeriodIn1 = datnormal['p']
if len(PeriodIn1) == 0.:
continue
if N_tri1 == 0:
continue
else:
# input period -- 'p' in data file
print('length period in = ', len(PeriodIn1))
PeriodOut1 = datnormal['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean1 = datnormal['appMagMean'] #apparent magnitude, will use to make cuts for 24 (default), 22, and then Kepler's range (?? -- brighter than LSST can manage-- to 19) OR 19.5 (SNR = 10)
print('length period out = ', len(PeriodOut1))
observable1 = np.where(PeriodOut1 != -999)[0]
observable1_03 = np.where(PeriodIn1[observable1] <= 0.3)[0]
observable1_1 = np.where(PeriodIn1[observable1] <= 1)[0]
observable1_10 = np.where(PeriodIn1[observable1] <= 10)[0]
observable1_30 = np.where(PeriodIn1[observable1] <= 30)[0]
observable1_100 = np.where(PeriodIn1[observable1] <= 100)[0]
observable1_1000 = np.where(PeriodIn1[observable1] <= 1000)[0]
observable1_22 = np.where(appMagMean1[observable1] <= 22.)[0]
observable1_03_22 = np.where(appMagMean1[observable1_03] <= 22.)[0]
observable1_1_22 = np.where(appMagMean1[observable1_1] <= 22.)[0]
observable1_10_22 = np.where(appMagMean1[observable1_10] <= 22.)[0]
observable1_30_22 = np.where(appMagMean1[observable1_30] <= 22.)[0]
observable1_100_22 = np.where(appMagMean1[observable1_100] <= 22.)[0]
observable1_1000_22 = np.where(appMagMean1[observable1_1000] <= 22.)[0]
observable1_195 = np.where(appMagMean1[observable1] <= 19.5)[0]
observable1_03_195 = np.where(appMagMean1[observable1_03] <= 19.5)[0]
observable1_1_195 = np.where(appMagMean1[observable1_1] <= 19.5)[0]
observable1_10_195 = np.where(appMagMean1[observable1_10] <= 19.5)[0]
observable1_30_195 = np.where(appMagMean1[observable1_30] <= 19.5)[0]
observable1_100_195 = np.where(appMagMean1[observable1_100] <= 19.5)[0]
observable1_1000_195 = np.where(appMagMean1[observable1_1000] <= 19.5)[0]
Sigma_Period_Whole1 = abs(PeriodOut1 - PeriodIn1)/PeriodIn1
Sigma_Period_Half1 = abs(PeriodOut1 - 0.5*PeriodIn1)/(0.5*PeriodIn1)
Sigma_Period_Twice1 = abs(PeriodOut1 - 2*PeriodIn1)/(2*PeriodIn1)
#print(type(Sigma_Period_Twice1))
#print("Sigma_Period_Twice1: ", Sigma_Period_Twice1)
#print(type(Sigma_Period_Half1))
#print("Sigma_Period_Half1: ", Sigma_Period_Half1)
#print(type(Sigma_Period_Whole1))
#print("Sigma_Period_Whole1: ", Sigma_Period_Whole1)
#recover_twice1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Twice1), Sigma_Period_Twice1 <= 0.1))[0]
recover_twice1 = np.where(Sigma_Period_Twice1 <= 0.1)[0]
recover_twice1_03 = np.where(PeriodIn1[recover_twice1] <= 0.3)[0]
recover_twice1_1 = np.where(PeriodIn1[recover_twice1] <= 1)[0]
recover_twice1_10 = np.where(PeriodIn1[recover_twice1] <= 10)[0]
recover_twice1_30 = np.where(PeriodIn1[recover_twice1] <= 30)[0]
recover_twice1_100 = np.where(PeriodIn1[recover_twice1] <= 100)[0]
recover_twice1_1000 = np.where(PeriodIn1[recover_twice1] <= 1000)[0]
#recover_half1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Half1), Sigma_Period_Half1 <= 0.1))[0]
recover_half1 = np.where(Sigma_Period_Half1 <= 0.1)[0]
recover_half1_03 = np.where(PeriodIn1[recover_half1] <= 0.3)[0]
recover_half1_1 = np.where(PeriodIn1[recover_half1] <= 1)[0]
recover_half1_10 = np.where(PeriodIn1[recover_half1] <= 10)[0]
recover_half1_30 = np.where(PeriodIn1[recover_half1] <= 30)[0]
recover_half1_100 = np.where(PeriodIn1[recover_half1] <= 100)[0]
recover_half1_1000 = np.where(PeriodIn1[recover_half1] <= 1000)[0]
#recover_whole1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Whole1), Sigma_Period_Whole1 <= 0.1))[0]
recover_whole1 = np.where(Sigma_Period_Whole1 <= 0.1)[0]
recover_whole1_03 = np.where(PeriodIn1[recover_whole1] <= 0.3)[0]
recover_whole1_1 = np.where(PeriodIn1[recover_whole1] <= 1)[0]
recover_whole1_10 = np.where(PeriodIn1[recover_whole1] <= 10)[0]
recover_whole1_30 = np.where(PeriodIn1[recover_whole1] <= 30)[0]
recover_whole1_100 = np.where(PeriodIn1[recover_whole1] <= 100)[0]
recover_whole1_1000 = np.where(PeriodIn1[recover_whole1] <= 1000)[0]
recoverable1 = np.concatenate((recover_twice1, recover_whole1, recover_half1), axis=0)
recoverable1_03 = np.concatenate((recover_twice1_03, recover_whole1_03, recover_half1_03), axis=0)
recoverable1_1 = np.concatenate((recover_twice1_1, recover_whole1_1, recover_half1_1), axis=0)
recoverable1_10 = np.concatenate((recover_twice1_10, recover_whole1_10, recover_half1_10), axis=0)
recoverable1_30 = np.concatenate((recover_twice1_30, recover_whole1_30, recover_half1_30), axis=0)
recoverable1_100 = np.concatenate((recover_twice1_100, recover_whole1_100, recover_half1_100), axis=0)
recoverable1_1000 = np.concatenate((recover_twice1_1000, recover_whole1_1000, recover_half1_1000), axis=0)
recoverable1_22 = np.where(appMagMean1[recoverable1] <= 22.)[0]
recoverable1_03_22 = np.where(appMagMean1[recoverable1_03] <= 22.)[0]
recoverable1_1_22 = np.where(appMagMean1[recoverable1_1] <= 22.)[0]
recoverable1_10_22 = np.where(appMagMean1[recoverable1_10] <= 22.)[0]
recoverable1_30_22 = np.where(appMagMean1[recoverable1_30] <= 22.)[0]
recoverable1_100_22 = np.where(appMagMean1[recoverable1_100] <= 22.)[0]
recoverable1_1000_22 = np.where(appMagMean1[recoverable1_1000] <= 22.)[0]
recoverable1_195 = np.where(appMagMean1[recoverable1] <= 19.5)[0]
recoverable1_03_195 = np.where(appMagMean1[recoverable1_03] <= 19.5)[0]
recoverable1_1_195 = np.where(appMagMean1[recoverable1_1] <= 19.5)[0]
recoverable1_10_195 = np.where(appMagMean1[recoverable1_10] <= 19.5)[0]
recoverable1_30_195 = np.where(appMagMean1[recoverable1_30] <= 19.5)[0]
recoverable1_100_195 = np.where(appMagMean1[recoverable1_100] <= 19.5)[0]
recoverable1_1000_195 = np.where(appMagMean1[recoverable1_1000] <= 19.5)[0]
P03 = np.where(PeriodIn1 <= 0.3)[0]
P1 = np.where(PeriodIn1 <= 1)[0]
P10 = np.where(PeriodIn1 <= 10)[0]
P30 = np.where(PeriodIn1 <= 30)[0]
P100 = np.where(PeriodIn1 <= 100)[0]
P1000 = np.where(PeriodIn1 <= 1000)[0]
N_all1 = float(len(PeriodIn1)) #unnormalized
N_all1_03 = float(len(P03))
N_all1_1 = float(len(P1))
N_all1_10 = float(len(P10))
N_all1_30 = float(len(P30))
N_all1_100 = float(len(P100))
N_all1_1000 = float(len(P1000))
N_all1_22 = float(len(np.where(appMagMean1 <= 22))) #unnormalized
N_all1_03_22 = float(len(np.where(appMagMean1[P03] <= 22)))
N_all1_1_22 = float(len(np.where(appMagMean1[P1] <= 22)))
N_all1_10_22 = float(len(np.where(appMagMean1[P10] <= 22)))
N_all1_30_22 = float(len(np.where(appMagMean1[P30] <= 22)))
N_all1_100_22 = float(len(np.where(appMagMean1[P100] <= 22)))
N_all1_1000_22 = float(len(np.where(appMagMean1[P1000] <= 22)))
N_all1_195 = float(len(np.where(appMagMean1 <= 19.5))) #unnormalized
N_all1_03_195 = float(len(np.where(appMagMean1[P03] <= 19.5)))
N_all1_1_195 = float(len(np.where(appMagMean1[P1] <= 19.5)))
N_all1_10_195 = float(len(np.where(appMagMean1[P10] <= 19.5)))
N_all1_30_195 = float(len(np.where(appMagMean1[P30] <= 19.5)))
N_all1_100_195 = float(len(np.where(appMagMean1[P100] <= 19.5)))
N_all1_1000_195 = float(len(np.where(appMagMean1[P1000] <= 19.5)))
#NORMALIZED FROM HERE vv
N_all1_norm = (N_all1/N_all1)*N_mult1 #normalized
N_all1_03norm = (N_all1_03/N_all1)*N_mult1
N_all1_1norm = (N_all1_1/N_all1)*N_mult1
N_all1_10norm = (N_all1_10/N_all1)*N_mult1
N_all1_30norm = (N_all1_30/N_all1)*N_mult1
N_all1_100norm = (N_all1_100/N_all1)*N_mult1
N_all1_1000norm = (N_all1_1000/N_all1)*N_mult1
N_observable1 = (float(len(observable1))/float(N_all1))*N_mult1
N_observable1_03 = (float(len(observable1_03))/float(N_all1))*N_mult1
N_observable1_1 = (float(len(observable1_1))/float(N_all1))*N_mult1
N_observable1_10 = (float(len(observable1_10))/float(N_all1))*N_mult1
N_observable1_30 = (float(len(observable1_30))/float(N_all1))*N_mult1
N_observable1_100 = (float(len(observable1_100))/float(N_all1))*N_mult1
N_observable1_1000 = (float(len(observable1_1000))/float(N_all1))*N_mult1
N_recoverable1 = (float(len(recoverable1))/float(N_all1))*N_mult1
N_recoverable1_03 = (float(len(recoverable1_03))/float(N_all1))*N_mult1
N_recoverable1_1 = (float(len(recoverable1_1))/float(N_all1))*N_mult1
N_recoverable1_10 = (float(len(recoverable1_10))/float(N_all1))*N_mult1
N_recoverable1_30 = (float(len(recoverable1_30))/float(N_all1))*N_mult1
N_recoverable1_100 = (float(len(recoverable1_100))/float(N_all1))*N_mult1
N_recoverable1_1000 = (float(len(recoverable1_1000))/float(N_all1))*N_mult1
N_all1_norm_22 = (N_all1_22/N_all1)*N_mult1 #normalized
N_all1_03norm_22 = (N_all1_03_22/N_all1)*N_mult1
N_all1_1norm_22 = (N_all1_1_22/N_all1)*N_mult1
N_all1_10norm_22 = (N_all1_10_22/N_all1)*N_mult1
N_all1_30norm_22 = (N_all1_30_22/N_all1)*N_mult1
N_all1_100norm_22 = (N_all1_100_22/N_all1)*N_mult1
N_all1_1000norm_22 = (N_all1_1000_22/N_all1)*N_mult1
N_observable1_22 = (float(len(observable1_22))/float(N_all1))*N_mult1
N_observable1_03_22 = (float(len(observable1_03_22))/float(N_all1))*N_mult1
N_observable1_1_22 = (float(len(observable1_1_22))/float(N_all1))*N_mult1
N_observable1_10_22 = (float(len(observable1_10_22))/float(N_all1))*N_mult1
N_observable1_30_22 = (float(len(observable1_30_22))/float(N_all1))*N_mult1
N_observable1_100_22 = (float(len(observable1_100_22))/float(N_all1))*N_mult1
N_observable1_1000_22 = (float(len(observable1_1000_22))/float(N_all1))*N_mult1
N_recoverable1_22 = (float(len(recoverable1_22))/float(N_all1))*N_mult1
N_recoverable1_03_22 = (float(len(recoverable1_03_22))/float(N_all1))*N_mult1
N_recoverable1_1_22 = (float(len(recoverable1_1_22))/float(N_all1))*N_mult1
N_recoverable1_10_22 = (float(len(recoverable1_10_22))/float(N_all1))*N_mult1
N_recoverable1_30_22 = (float(len(recoverable1_30_22))/float(N_all1))*N_mult1
N_recoverable1_100_22 = (float(len(recoverable1_100_22))/float(N_all1))*N_mult1
N_recoverable1_1000_22 = (float(len(recoverable1_1000_22))/float(N_all1))*N_mult1
N_all1_norm_195 = (N_all1_195/N_all1)*N_mult1 #normalized
N_all1_03norm_195 = (N_all1_03_195/N_all1)*N_mult1
N_all1_1norm_195 = (N_all1_1_195/N_all1)*N_mult1
N_all1_10norm_195 = (N_all1_10_195/N_all1)*N_mult1
N_all1_30norm_195 = (N_all1_30_195/N_all1)*N_mult1
N_all1_100norm_195 = (N_all1_100_195/N_all1)*N_mult1
N_all1_1000norm_195 = (N_all1_1000_195/N_all1)*N_mult1
N_observable1_195 = (float(len(observable1_195))/float(N_all1))*N_mult1
N_observable1_03_195 = (float(len(observable1_03_195))/float(N_all1))*N_mult1
N_observable1_1_195 = (float(len(observable1_1_195))/float(N_all1))*N_mult1
N_observable1_10_195 = (float(len(observable1_10_195))/float(N_all1))*N_mult1
N_observable1_30_195 = (float(len(observable1_30_195))/float(N_all1))*N_mult1
N_observable1_100_195 = (float(len(observable1_100_195))/float(N_all1))*N_mult1
N_observable1_1000_195 = (float(len(observable1_1000_195))/float(N_all1))*N_mult1
N_recoverable1_195 = (float(len(recoverable1_195))/float(N_all1))*N_mult1
N_recoverable1_03_195 = (float(len(recoverable1_03_195))/float(N_all1))*N_mult1
N_recoverable1_1_195 = (float(len(recoverable1_1_195))/float(N_all1))*N_mult1
N_recoverable1_10_195 = (float(len(recoverable1_10_195))/float(N_all1))*N_mult1
N_recoverable1_30_195 = (float(len(recoverable1_30_195))/float(N_all1))*N_mult1
N_recoverable1_100_195 = (float(len(recoverable1_100_195))/float(N_all1))*N_mult1
N_recoverable1_1000_195 = (float(len(recoverable1_1000_195))/float(N_all1))*N_mult1
N_totalnormal_array.append(float(N_all1_norm))
N_totalobservablenormal_array.append(float(N_observable1))
N_totalrecoverablenormal_array.append(float(N_recoverable1))
N_totalnormal_array_03.append(float(N_all1_03norm))
N_totalobservablenormal_array_03.append(float(N_observable1_03))
N_totalrecoverablenormal_array_03.append(float(N_recoverable1_03))
N_totalnormal_array_1.append(float(N_all1_1norm))
N_totalobservablenormal_array_1.append(float(N_observable1_1))
N_totalrecoverablenormal_array_1.append(float(N_recoverable1_1))
N_totalnormal_array_10.append(float(N_all1_10norm))
N_totalobservablenormal_array_10.append(float(N_observable1_10))
N_totalrecoverablenormal_array_10.append(float(N_recoverable1_10))
N_totalnormal_array_30.append(float(N_all1_30norm))
N_totalobservablenormal_array_30.append(float(N_observable1_30))
N_totalrecoverablenormal_array_30.append(float(N_recoverable1_30))
N_totalnormal_array_100.append(float(N_all1_100norm))
N_totalobservablenormal_array_100.append(float(N_observable1_100))
N_totalrecoverablenormal_array_100.append(float(N_recoverable1_100))
N_totalnormal_array_1000.append(float(N_all1_1000norm))
N_totalobservablenormal_array_1000.append(float(N_observable1_1000))
N_totalrecoverablenormal_array_1000.append(float(N_recoverable1_1000))
N_totalnormal22_array.append(float(N_all1_norm_22))
N_totalobservablenormal22_array.append(float(N_observable1_22))
N_totalrecoverablenormal22_array.append(float(N_recoverable1_22))
N_totalnormal22_array_03.append(float(N_all1_03norm_22))
N_totalobservablenormal22_array_03.append(float(N_observable1_03_22))
N_totalrecoverablenormal22_array_03.append(float(N_recoverable1_03_22))
N_totalnormal22_array_1.append(float(N_all1_1norm_22))
N_totalobservablenormal22_array_1.append(float(N_observable1_1_22))
N_totalrecoverablenormal22_array_1.append(float(N_recoverable1_1_22))
N_totalnormal22_array_10.append(float(N_all1_10norm_22))
N_totalobservablenormal22_array_10.append(float(N_observable1_10_22))
N_totalrecoverablenormal22_array_10.append(float(N_recoverable1_10_22))
N_totalnormal22_array_30.append(float(N_all1_30norm_22))
N_totalobservablenormal22_array_30.append(float(N_observable1_30_22))
N_totalrecoverablenormal22_array_30.append(float(N_recoverable1_30_22))
N_totalnormal22_array_100.append(float(N_all1_100norm_22))
N_totalobservablenormal22_array_100.append(float(N_observable1_100_22))
N_totalrecoverablenormal22_array_100.append(float(N_recoverable1_100_22))
N_totalnormal22_array_1000.append(float(N_all1_1000norm_22))
N_totalobservablenormal22_array_1000.append(float(N_observable1_1000_22))
N_totalrecoverablenormal22_array_1000.append(float(N_recoverable1_1000_22))
N_totalnormal195_array.append(float(N_all1_norm_195))
N_totalobservablenormal195_array.append(float(N_observable1_195))
N_totalrecoverablenormal195_array.append(float(N_recoverable1_195))
N_totalnormal195_array_03.append(float(N_all1_03norm_195))
N_totalobservablenormal195_array_03.append(float(N_observable1_03_195))
N_totalrecoverablenormal195_array_03.append(float(N_recoverable1_03_195))
N_totalnormal195_array_1.append(float(N_all1_1norm_195))
N_totalobservablenormal195_array_1.append(float(N_observable1_1_195))
N_totalrecoverablenormal195_array_1.append(float(N_recoverable1_1_195))
N_totalnormal195_array_10.append(float(N_all1_10norm_195))
N_totalobservablenormal195_array_10.append(float(N_observable1_10_195))
N_totalrecoverablenormal195_array_10.append(float(N_recoverable1_10_195))
N_totalnormal195_array_30.append(float(N_all1_30norm_195))
N_totalobservablenormal195_array_30.append(float(N_observable1_30_195))
N_totalrecoverablenormal195_array_30.append(float(N_recoverable1_30_195))
N_totalnormal195_array_100.append(float(N_all1_100norm_195))
N_totalobservablenormal195_array_100.append(float(N_observable1_100_195))
N_totalrecoverablenormal195_array_100.append(float(N_recoverable1_100_195))
N_totalnormal195_array_1000.append(float(N_all1_1000norm_195))
N_totalobservablenormal195_array_1000.append(float(N_observable1_1000_195))
N_totalrecoverablenormal195_array_1000.append(float(N_recoverable1_1000_195))
N_totalnormal = np.sum(N_totalnormal_array)
N_totalnormal_03 = np.sum(N_totalnormal_array_03)
N_totalnormal_1 = np.sum(N_totalnormal_array_1)
N_totalnormal_10 = np.sum(N_totalnormal_array_10)
N_totalnormal_30 = np.sum(N_totalnormal_array_30)
N_totalnormal_100 = np.sum(N_totalnormal_array_100)
N_totalnormal_1000 = np.sum(N_totalnormal_array_1000)
N_totalobservablenormal = np.sum(N_totalobservablenormal_array)
N_totalobservablenormal_03 = np.sum(N_totalobservablenormal_array_03)
N_totalobservablenormal_1 = np.sum(N_totalobservablenormal_array_1)
N_totalobservablenormal_10 = np.sum(N_totalobservablenormal_array_10)
N_totalobservablenormal_30 = np.sum(N_totalobservablenormal_array_30)
N_totalobservablenormal_100 = np.sum(N_totalobservablenormal_array_100)
N_totalobservablenormal_1000 = np.sum(N_totalobservablenormal_array_1000)
N_totalrecoverablenormal = np.sum(N_totalrecoverablenormal_array)
N_totalrecoverablenormal_03 = np.sum(N_totalrecoverablenormal_array_03)
N_totalrecoverablenormal_1 = np.sum(N_totalrecoverablenormal_array_1)
N_totalrecoverablenormal_10 = np.sum(N_totalrecoverablenormal_array_10)
N_totalrecoverablenormal_30 = np.sum(N_totalrecoverablenormal_array_30)
N_totalrecoverablenormal_100 = np.sum(N_totalrecoverablenormal_array_100)
N_totalrecoverablenormal_1000 = np.sum(N_totalrecoverablenormal_array_1000)
N_totalnormal22 = np.sum(N_totalnormal22_array)
N_totalnormal22_03 = np.sum(N_totalnormal22_array_03)
N_totalnormal22_1 = np.sum(N_totalnormal22_array_1)
N_totalnormal22_10 = np.sum(N_totalnormal22_array_10)
N_totalnormal22_30 = np.sum(N_totalnormal22_array_30)
N_totalnormal22_100 = np.sum(N_totalnormal22_array_100)
N_totalnormal22_1000 = np.sum(N_totalnormal22_array_1000)
N_totalobservablenormal22 = np.sum(N_totalobservablenormal22_array)
N_totalobservablenormal22_03 = np.sum(N_totalobservablenormal22_array_03)
N_totalobservablenormal22_1 = np.sum(N_totalobservablenormal22_array_1)
N_totalobservablenormal22_10 = np.sum(N_totalobservablenormal22_array_10)
N_totalobservablenormal22_30 = np.sum(N_totalobservablenormal22_array_30)
N_totalobservablenormal22_100 = np.sum(N_totalobservablenormal22_array_100)
N_totalobservablenormal22_1000 = np.sum(N_totalobservablenormal22_array_1000)
N_totalrecoverablenormal22 = np.sum(N_totalrecoverablenormal22_array)
N_totalrecoverablenormal22_03 = np.sum(N_totalrecoverablenormal22_array_03)
N_totalrecoverablenormal22_1 = np.sum(N_totalrecoverablenormal22_array_1)
N_totalrecoverablenormal22_10 = np.sum(N_totalrecoverablenormal22_array_10)
N_totalrecoverablenormal22_30 = np.sum(N_totalrecoverablenormal22_array_30)
N_totalrecoverablenormal22_100 = np.sum(N_totalrecoverablenormal22_array_100)
N_totalrecoverablenormal22_1000 = np.sum(N_totalrecoverablenormal22_array_1000)
N_totalnormal195 = np.sum(N_totalnormal195_array)
N_totalnormal195_03 = np.sum(N_totalnormal195_array_03)
N_totalnormal195_1 = np.sum(N_totalnormal195_array_1)
N_totalnormal195_10 = np.sum(N_totalnormal195_array_10)
N_totalnormal195_30 = np.sum(N_totalnormal195_array_30)
N_totalnormal195_100 = np.sum(N_totalnormal195_array_100)
N_totalnormal195_1000 = np.sum(N_totalnormal195_array_1000)
N_totalobservablenormal195 = np.sum(N_totalobservablenormal195_array)
N_totalobservablenormal195_03 = np.sum(N_totalobservablenormal195_array_03)
N_totalobservablenormal195_1 = np.sum(N_totalobservablenormal195_array_1)
N_totalobservablenormal195_10 = np.sum(N_totalobservablenormal195_array_10)
N_totalobservablenormal195_30 = np.sum(N_totalobservablenormal195_array_30)
N_totalobservablenormal195_100 = np.sum(N_totalobservablenormal195_array_100)
N_totalobservablenormal195_1000 = np.sum(N_totalobservablenormal195_array_1000)
N_totalrecoverablenormal195 = np.sum(N_totalrecoverablenormal195_array)
N_totalrecoverablenormal195_03 = np.sum(N_totalrecoverablenormal195_array_03)
N_totalrecoverablenormal195_1 = np.sum(N_totalrecoverablenormal195_array_1)
N_totalrecoverablenormal195_10 = np.sum(N_totalrecoverablenormal195_array_10)
N_totalrecoverablenormal195_30 = np.sum(N_totalrecoverablenormal195_array_30)
N_totalrecoverablenormal195_100 = np.sum(N_totalrecoverablenormal195_array_100)
N_totalrecoverablenormal195_1000 = np.sum(N_totalrecoverablenormal195_array_1000)
print("N_totalnormal = ", N_totalnormal, "N_totalobservablenormal = ", N_totalobservablenormal, "N_totalrecoverablenormal = ", N_totalrecoverablenormal)
wholerecoverypercent_normal = (N_totalrecoverablenormal/N_totalobservablenormal)*100
wholerecoverypercent_normal_03 = (N_totalrecoverablenormal_03/N_totalobservablenormal_03)*100
wholerecoverypercent_normal_1 = (N_totalrecoverablenormal_1/N_totalobservablenormal_1)*100
wholerecoverypercent_normal_10 = (N_totalrecoverablenormal_10/N_totalobservablenormal_10)*100
wholerecoverypercent_normal_30 = (N_totalrecoverablenormal_30/N_totalobservablenormal_30)*100
wholerecoverypercent_normal_100 = (N_totalrecoverablenormal_100/N_totalobservablenormal_100)*100
wholerecoverypercent_normal_1000 = (N_totalrecoverablenormal_1000/N_totalobservablenormal_1000)*100
sigmanormal = ((N_totalrecoverablenormal**(1/2))/N_totalobservablenormal)*100
sigmanormal_03 = ((N_totalrecoverablenormal_03**(1/2))/N_totalobservablenormal_03)*100
sigmanormal_1 = ((N_totalrecoverablenormal_1**(1/2))/N_totalobservablenormal_1)*100
sigmanormal_10 = ((N_totalrecoverablenormal_10**(1/2))/N_totalobservablenormal_10)*100
sigmanormal_30 = ((N_totalrecoverablenormal_30**(1/2))/N_totalobservablenormal_30)*100
sigmanormal_100 = ((N_totalrecoverablenormal_100**(1/2))/N_totalobservablenormal_100)*100
sigmanormal_1000 = ((N_totalrecoverablenormal_1000**(1/2))/N_totalobservablenormal_1000)*100
print("wholerecoverypercent_normal = ", wholerecoverypercent_normal, "wholerecoverypercent_normal_03 = ", wholerecoverypercent_normal_03, "wholerecoverypercent_normal_1 = ", wholerecoverypercent_normal_1, "wholerecoverypercent_normal_10 = ", wholerecoverypercent_normal_10, "wholerecoverypercent_normal_30 = ", wholerecoverypercent_normal_30, "wholerecoverypercent_normal_100 = ", wholerecoverypercent_normal_100, "wholerecoverypercent_normal_1000 = ", wholerecoverypercent_normal_1000)
print("sigmanormal = ", sigmanormal, "sigmanormal_03 = ", sigmanormal_03, "sigmanormal_1 = ", sigmanormal_1, "sigmanormal_10 = ", sigmanormal_10, "sigmanormal_30 = ", sigmanormal_30, "sigmanormal_100 = ", sigmanormal_100, "sigmanormal_1000 = ", sigmanormal_1000)
overallrecoverypercent_normal = (N_totalrecoverablenormal/N_totalnormal)*100
overallrecoverypercent_normal_03 = (N_totalrecoverablenormal_03/N_totalnormal_03)*100
overallrecoverypercent_normal_1 = (N_totalrecoverablenormal_1/N_totalnormal_1)*100
overallrecoverypercent_normal_10 = (N_totalrecoverablenormal_10/N_totalnormal_10)*100
overallrecoverypercent_normal_30 = (N_totalrecoverablenormal_30/N_totalnormal_30)*100
overallrecoverypercent_normal_100 = (N_totalrecoverablenormal_100/N_totalnormal_100)*100
overallrecoverypercent_normal_1000 = (N_totalrecoverablenormal_1000/N_totalnormal_1000)*100
overallsigmanormal = ((N_totalrecoverablenormal**(1/2))/N_totalnormal)*100
overallsigmanormal_03 = ((N_totalrecoverablenormal_03**(1/2))/N_totalnormal_03)*100
overallsigmanormal_1 = ((N_totalrecoverablenormal_1**(1/2))/N_totalnormal_1)*100
overallsigmanormal_10 = ((N_totalrecoverablenormal_10**(1/2))/N_totalnormal_10)*100
overallsigmanormal_30 = ((N_totalrecoverablenormal_30**(1/2))/N_totalnormal_30)*100
overallsigmanormal_100 = ((N_totalrecoverablenormal_100**(1/2))/N_totalnormal_100)*100
overallsigmanormal_1000 = ((N_totalrecoverablenormal_1000**(1/2))/N_totalnormal_1000)*100
print("overallrecoverypercent_normal = ", overallrecoverypercent_normal, "overallrecoverypercent_normal_03 = ", overallrecoverypercent_normal_03, "overallrecoverypercent_normal_1 = ", overallrecoverypercent_normal_1, "overallrecoverypercent_normal_10 = ", overallrecoverypercent_normal_10, "overallrecoverypercent_normal_30 = ", overallrecoverypercent_normal_30, "overallrecoverypercent_normal_100 = ", overallrecoverypercent_normal_100, "overallrecoverypercent_normal_1000 = ", overallrecoverypercent_normal_1000)
print("overallsigmanormal = ", overallsigmanormal, "overallsigmanormal_03 = ", overallsigmanormal_03, "overallsigmanormal_1 = ", overallsigmanormal_1, "overallsigmanormal_10 = ", overallsigmanormal_10, "overallsigmanormal_30 = ", overallsigmanormal_30, "overallsigmanormal_100 = ", overallsigmanormal_100, "overallsigmanormal_1000 = ", overallsigmanormal_1000)
wholerecoverypercent_normal22 = (N_totalrecoverablenormal22/N_totalobservablenormal22)*100
wholerecoverypercent_normal22_03 = (N_totalrecoverablenormal22_03/N_totalobservablenormal22_03)*100
wholerecoverypercent_normal22_1 = (N_totalrecoverablenormal22_1/N_totalobservablenormal22_1)*100
wholerecoverypercent_normal22_10 = (N_totalrecoverablenormal22_10/N_totalobservablenormal22_10)*100
wholerecoverypercent_normal22_30 = (N_totalrecoverablenormal22_30/N_totalobservablenormal22_30)*100
wholerecoverypercent_normal22_100 = (N_totalrecoverablenormal22_100/N_totalobservablenormal22_100)*100
wholerecoverypercent_normal22_1000 = (N_totalrecoverablenormal22_1000/N_totalobservablenormal22_1000)*100
sigmanormal22 = ((N_totalrecoverablenormal22**(1/2))/N_totalobservablenormal22)*100
sigmanormal22_03 = ((N_totalrecoverablenormal22_03**(1/2))/N_totalobservablenormal22_03)*100
sigmanormal22_1 = ((N_totalrecoverablenormal22_1**(1/2))/N_totalobservablenormal22_1)*100
sigmanormal22_10 = ((N_totalrecoverablenormal22_10**(1/2))/N_totalobservablenormal22_10)*100
sigmanormal22_30 = ((N_totalrecoverablenormal22_30**(1/2))/N_totalobservablenormal22_30)*100
sigmanormal22_100 = ((N_totalrecoverablenormal22_100**(1/2))/N_totalobservablenormal22_100)*100
sigmanormal22_1000 = ((N_totalrecoverablenormal22_1000**(1/2))/N_totalobservablenormal22_1000)*100
print("wholerecoverypercent_normal22 = ", wholerecoverypercent_normal22, "wholerecoverypercent_normal22_03 = ", wholerecoverypercent_normal22_03, "wholerecoverypercent_normal22_1 = ", wholerecoverypercent_normal22_1, "wholerecoverypercent_normal22_10 = ", wholerecoverypercent_normal22_10, "wholerecoverypercent_normal22_30 = ", wholerecoverypercent_normal22_30, "wholerecoverypercent_normal22_100 = ", wholerecoverypercent_normal22_100, "wholerecoverypercent_normal22_1000 = ", wholerecoverypercent_normal22_1000)
print("sigmanormal22 = ", sigmanormal22, "sigmanormal22_03 = ", sigmanormal22_03, "sigmanormal22_1 = ", sigmanormal22_1, "sigmanormal22_10 = ", sigmanormal22_10, "sigmanormal22_30 = ", sigmanormal22_30, "sigmanormal22_100 = ", sigmanormal22_100, "sigmanormal22_1000 = ", sigmanormal22_1000)
overallrecoverypercent_normal22 = (N_totalrecoverablenormal22/N_totalnormal22)*100
overallrecoverypercent_normal22_03 = (N_totalrecoverablenormal22_03/N_totalnormal22_03)*100
overallrecoverypercent_normal22_1 = (N_totalrecoverablenormal22_1/N_totalnormal22_1)*100
overallrecoverypercent_normal22_10 = (N_totalrecoverablenormal22_10/N_totalnormal22_10)*100
overallrecoverypercent_normal22_30 = (N_totalrecoverablenormal22_30/N_totalnormal22_30)*100
overallrecoverypercent_normal22_100 = (N_totalrecoverablenormal22_100/N_totalnormal22_100)*100
overallrecoverypercent_normal22_1000 = (N_totalrecoverablenormal22_1000/N_totalnormal22_1000)*100
overallsigmanormal22 = ((N_totalrecoverablenormal22**(1/2))/N_totalnormal22)*100
overallsigmanormal22_03 = ((N_totalrecoverablenormal22_03**(1/2))/N_totalnormal22_03)*100
overallsigmanormal22_1 = ((N_totalrecoverablenormal22_1**(1/2))/N_totalnormal22_1)*100
overallsigmanormal22_10 = ((N_totalrecoverablenormal22_10**(1/2))/N_totalnormal22_10)*100
overallsigmanormal22_30 = ((N_totalrecoverablenormal22_30**(1/2))/N_totalnormal22_30)*100
overallsigmanormal22_100 = ((N_totalrecoverablenormal22_100**(1/2))/N_totalnormal22_100)*100
overallsigmanormal22_1000 = ((N_totalrecoverablenormal22_1000**(1/2))/N_totalnormal22_1000)*100
print("overallrecoverypercent_normal22 = ", overallrecoverypercent_normal22, "overallrecoverypercent_normal22_03 = ", overallrecoverypercent_normal22_03, "overallrecoverypercent_normal22_1 = ", overallrecoverypercent_normal22_1, "overallrecoverypercent_normal22_10 = ", overallrecoverypercent_normal22_10, "overallrecoverypercent_normal22_30 = ", overallrecoverypercent_normal22_30, "overallrecoverypercent_normal22_100 = ", overallrecoverypercent_normal22_100, "overallrecoverypercent_normal22_1000 = ", overallrecoverypercent_normal22_1000)
print("overallsigmanormal22 = ", overallsigmanormal22, "overallsigmanormal22_03 = ", overallsigmanormal22_03, "overallsigmanormal22_1 = ", overallsigmanormal22_1, "overallsigmanormal22_10 = ", overallsigmanormal22_10, "overallsigmanormal22_30 = ", overallsigmanormal22_30, "overallsigmanormal22_100 = ", overallsigmanormal22_100, "overallsigmanormal22_1000 = ", overallsigmanormal22_1000)
wholerecoverypercent_normal195 = (N_totalrecoverablenormal195/N_totalobservablenormal195)*100
wholerecoverypercent_normal195_03 = (N_totalrecoverablenormal195_03/N_totalobservablenormal195_03)*100
wholerecoverypercent_normal195_1 = (N_totalrecoverablenormal195_1/N_totalobservablenormal195_1)*100
wholerecoverypercent_normal195_10 = (N_totalrecoverablenormal195_10/N_totalobservablenormal195_10)*100
wholerecoverypercent_normal195_30 = (N_totalrecoverablenormal195_30/N_totalobservablenormal195_30)*100
wholerecoverypercent_normal195_100 = (N_totalrecoverablenormal195_100/N_totalobservablenormal195_100)*100
wholerecoverypercent_normal195_1000 = (N_totalrecoverablenormal195_1000/N_totalobservablenormal195_1000)*100
sigmanormal195 = ((N_totalrecoverablenormal195**(1/2))/N_totalobservablenormal195)*100
sigmanormal195_03 = ((N_totalrecoverablenormal195_03**(1/2))/N_totalobservablenormal195_03)*100
sigmanormal195_1 = ((N_totalrecoverablenormal195_1**(1/2))/N_totalobservablenormal195_1)*100
sigmanormal195_10 = ((N_totalrecoverablenormal195_10**(1/2))/N_totalobservablenormal195_10)*100
sigmanormal195_30 = ((N_totalrecoverablenormal195_30**(1/2))/N_totalobservablenormal195_30)*100
sigmanormal195_100 = ((N_totalrecoverablenormal195_100**(1/2))/N_totalobservablenormal195_100)*100
sigmanormal195_1000 = ((N_totalrecoverablenormal195_1000**(1/2))/N_totalobservablenormal195_1000)*100
print("wholerecoverypercent_normal195 = ", wholerecoverypercent_normal195, "wholerecoverypercent_normal195_03 = ", wholerecoverypercent_normal195_03, "wholerecoverypercent_normal195_1 = ", wholerecoverypercent_normal195_1, "wholerecoverypercent_normal195_10 = ", wholerecoverypercent_normal195_10, "wholerecoverypercent_normal195_30 = ", wholerecoverypercent_normal195_30, "wholerecoverypercent_normal195_100 = ", wholerecoverypercent_normal195_100, "wholerecoverypercent_normal195_1000 = ", wholerecoverypercent_normal195_1000)
print("sigmanormal195 = ", sigmanormal195, "sigmanormal195_03 = ", sigmanormal195_03, "sigmanormal195_1 = ", sigmanormal195_1, "sigmanormal195_10 = ", sigmanormal195_10, "sigmanormal195_30 = ", sigmanormal195_30, "sigmanormal195_100 = ", sigmanormal195_100, "sigmanormal195_1000 = ", sigmanormal195_1000)
overallrecoverypercent_normal195 = (N_totalrecoverablenormal195/N_totalnormal195)*100
overallrecoverypercent_normal195_03 = (N_totalrecoverablenormal195_03/N_totalnormal195_03)*100
overallrecoverypercent_normal195_1 = (N_totalrecoverablenormal195_1/N_totalnormal195_1)*100
overallrecoverypercent_normal195_10 = (N_totalrecoverablenormal195_10/N_totalnormal195_10)*100
overallrecoverypercent_normal195_30 = (N_totalrecoverablenormal195_30/N_totalnormal195_30)*100
overallrecoverypercent_normal195_100 = (N_totalrecoverablenormal195_100/N_totalnormal195_100)*100
overallrecoverypercent_normal195_1000 = (N_totalrecoverablenormal195_1000/N_totalnormal195_1000)*100
overallsigmanormal195 = ((N_totalrecoverablenormal195**(1/2))/N_totalnormal195)*100
overallsigmanormal195_03 = ((N_totalrecoverablenormal195_03**(1/2))/N_totalnormal195_03)*100
overallsigmanormal195_1 = ((N_totalrecoverablenormal195_1**(1/2))/N_totalnormal195_1)*100
overallsigmanormal195_10 = ((N_totalrecoverablenormal195_10**(1/2))/N_totalnormal195_10)*100
overallsigmanormal195_30 = ((N_totalrecoverablenormal195_30**(1/2))/N_totalnormal195_30)*100
overallsigmanormal195_100 = ((N_totalrecoverablenormal195_100**(1/2))/N_totalnormal195_100)*100
overallsigmanormal195_1000 = ((N_totalrecoverablenormal195_1000**(1/2))/N_totalnormal195_1000)*100
print("overallrecoverypercent_normal195 = ", overallrecoverypercent_normal195, "overallrecoverypercent_normal195_03 = ", overallrecoverypercent_normal195_03, "overallrecoverypercent_normal195_1 = ", overallrecoverypercent_normal195_1, "overallrecoverypercent_normal195_10 = ", overallrecoverypercent_normal195_10, "overallrecoverypercent_normal195_30 = ", overallrecoverypercent_normal195_30, "overallrecoverypercent_normal195_100 = ", overallrecoverypercent_normal195_100, "overallrecoverypercent_normal195_1000 = ", overallrecoverypercent_normal195_1000)
print("overallsigmanormal195 = ", overallsigmanormal195, "overallsigmanormal195_03 = ", overallsigmanormal195_03, "overallsigmanormal195_1 = ", overallsigmanormal195_1, "overallsigmanormal195_10 = ", overallsigmanormal195_10, "overallsigmanormal195_30 = ", overallsigmanormal195_30, "overallsigmanormal195_100 = ", overallsigmanormal195_100, "overallsigmanormal195_1000 = ", overallsigmanormal195_1000)
print("binarypercent_22 = ", (N_totalnormal22/N_totalnormal)*100, "+/-", ((N_totalnormal22**(1/2))/N_totalnormal)*100)
print("binarypercent_195 = ", (N_totalnormal195/N_totalnormal)*100, "+/-", ((N_totalnormal195**(1/2))/N_totalnormal)*100)
print("binarypercent_03 = ", (N_totalnormal_03/N_totalnormal)*100, "+/-", ((N_totalnormal_03**(1/2))/N_totalnormal)*100)
print("binarypercent_1 = ", (N_totalnormal_1/N_totalnormal)*100, "+/-", ((N_totalnormal_1**(1/2))/N_totalnormal)*100)
print("binarypercent_10 = ", (N_totalnormal_10/N_totalnormal)*100, "+/-", ((N_totalnormal_10**(1/2))/N_totalnormal)*100)
print("binarypercent_30 = ", (N_totalnormal_30/N_totalnormal)*100, "+/-", ((N_totalnormal_30**(1/2))/N_totalnormal)*100)
print("binarypercent_100 = ", (N_totalnormal_100/N_totalnormal)*100, "+/-", ((N_totalnormal_100**(1/2))/N_totalnormal)*100)
print("binarypercent_1000 = ", (N_totalnormal_1000/N_totalnormal)*100, "+/-", ((N_totalnormal_1000**(1/2))/N_totalnormal)*100)
print("observablepercent_03 = ", (N_totalobservablenormal_03/N_totalnormal_03)*100, "+/-", ((N_totalobservablenormal_03**(1/2))/N_totalnormal_03)*100)
print("observablepercent_1 = ", (N_totalobservablenormal_1/N_totalnormal_1)*100, "+/-", ((N_totalobservablenormal_1**(1/2))/N_totalnormal_1)*100)
print("observablepercent_10 = ", (N_totalobservablenormal_10/N_totalnormal_10)*100, "+/-", ((N_totalobservablenormal_10**(1/2))/N_totalnormal_10)*100)
print("observablepercent_30 = ", (N_totalobservablenormal_30/N_totalnormal_30)*100, "+/-", ((N_totalobservablenormal_30**(1/2))/N_totalnormal_30)*100)
print("observablepercent_100 = ", (N_totalobservablenormal_100/N_totalnormal_100)*100, "+/-", ((N_totalobservablenormal_100**(1/2))/N_totalnormal_100)*100)
print("observablepercent_1000 = ", (N_totalobservablenormal_1000/N_totalnormal_1000)*100, "+/-", ((N_totalobservablenormal_1000**(1/2))/N_totalnormal_1000)*100)
print("observablepercent = ", (N_totalobservablenormal/N_totalnormal)*100, "+/-", ((N_totalobservablenormal**(1/2))/N_totalnormal)*100)
print("observablepercent22 = ", (N_totalobservablenormal22/N_totalnormal22)*100, "+/-", ((N_totalobservablenormal22**(1/2))/N_totalnormal22)*100)
print("observablepercent195 = ", (N_totalobservablenormal195/N_totalnormal195)*100, "+/-", ((N_totalobservablenormal195**(1/2))/N_totalnormal195)*100)
for filefast_ in sorted(allFiles_fast):
filename2 = filefast_[69:] #when file path no longer has /old in it, will be filefast_[65:]
fileid2 = filename2.strip('output_file.csv')
colorvalue2 = int(fileid2)
colorvalue_fast.append(colorvalue2)
print ("I'm starting " + fileid2)
datfast = pd.read_csv(filefast_, sep = ',', header=2)
##########################################################
datfast1 = pd.read_csv(filefast_, sep = ',', header=0, nrows=1)
N_tri2 = datfast1["NstarsTRILEGAL"][0]
print("N_tri2 = ", N_tri2)
m1hAll02, m1b2 = np.histogram(datfast["m1"], bins=mbins)
dm12 = np.diff(m1b2)
m1val2 = m1b2[:-1] + dm12/2.
fb2 = np.sum(m1hAll02*dm12*fbFit(m1val2))
N_mult2 = N_tri2*fb2
##########################################################
PeriodIn2 = datfast['p'] # input period -- 'p' in data file
if len(PeriodIn2) == 0.:
continue
if N_tri2 == 0:
continue
else:
PeriodOut2 = datfast['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean2 = datfast['appMagMean'] #when file path is back to fast/output_files vs. fast/old/output_files, this should be shanged to appMagMean_r
observable2 = np.where(PeriodOut2 != -999)[0]
observable2_03 = np.where(PeriodIn2[observable2] <= 0.3)[0]
observable2_1 = np.where(PeriodIn2[observable2] <= 1)[0]
observable2_10 = np.where(PeriodIn2[observable2] <= 10)[0]
observable2_30 = np.where(PeriodIn2[observable2] <= 30)[0]
observable2_100 = np.where(PeriodIn2[observable2] <= 100)[0]
observable2_1000 = np.where(PeriodIn2[observable2] <= 1000)[0]
observable2_22 = np.where(appMagMean2[observable2] <= 22.)[0]
observable2_03_22 = np.where(appMagMean2[observable2_03] <= 22.)[0]
observable2_1_22 = np.where(appMagMean2[observable2_1] <= 22.)[0]
observable2_10_22 = np.where(appMagMean2[observable2_10] <= 22.)[0]
observable2_30_22 = np.where(appMagMean2[observable2_30] <= 22.)[0]
observable2_100_22 = np.where(appMagMean2[observable2_100] <= 22.)[0]
observable2_1000_22 = np.where(appMagMean2[observable2_1000] <= 22.)[0]
observable2_195 = np.where(appMagMean2[observable2] <= 19.5)[0]
observable2_03_195 = np.where(appMagMean2[observable2_03] <= 19.5)[0]
observable2_1_195 = np.where(appMagMean2[observable2_1] <= 19.5)[0]
observable2_10_195 = np.where(appMagMean2[observable2_10] <= 19.5)[0]
observable2_30_195 = np.where(appMagMean2[observable2_30] <= 19.5)[0]
observable2_100_195 = np.where(appMagMean2[observable2_100] <= 19.5)[0]
observable2_1000_195 = np.where(appMagMean2[observable2_1000] <= 19.5)[0]
Sigma_Period_Whole2 = abs(PeriodOut2 - PeriodIn2)/PeriodIn2
Sigma_Period_Half2 = abs(PeriodOut2 - 0.5*PeriodIn2)/(0.5*PeriodIn2)
Sigma_Period_Twice2 = abs(PeriodOut2 - 2*PeriodIn2)/(2*PeriodIn2)
recover_twice2 = np.where(Sigma_Period_Twice2 <= 0.1)[0]
recover_twice2_03 = np.where(PeriodIn2[recover_twice2] <= 0.3)[0]
recover_twice2_1 = np.where(PeriodIn2[recover_twice2] <= 1)[0]
recover_twice2_10 = np.where(PeriodIn2[recover_twice2] <= 10)[0]
recover_twice2_30 = np.where(PeriodIn2[recover_twice2] <= 30)[0]
recover_twice2_100 = np.where(PeriodIn2[recover_twice2] <= 100)[0]
recover_twice2_1000 = np.where(PeriodIn2[recover_twice2] <= 1000)[0]
#recover_half1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Half1), Sigma_Period_Half1 <= 0.1))[0]
recover_half2 = np.where(Sigma_Period_Half2 <= 0.1)[0]
recover_half2_03 = np.where(PeriodIn2[recover_half2] <= 0.3)[0]
recover_half2_1 = np.where(PeriodIn2[recover_half2] <= 1)[0]
recover_half2_10 = np.where(PeriodIn2[recover_half2] <= 10)[0]
recover_half2_30 = np.where(PeriodIn2[recover_half2] <= 30)[0]
recover_half2_100 = np.where(PeriodIn2[recover_half2] <= 100)[0]
recover_half2_1000 = np.where(PeriodIn2[recover_half2] <= 1000)[0]
#recover_whole1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Whole1), Sigma_Period_Whole1 <= 0.1))[0]
recover_whole2 = np.where(Sigma_Period_Whole2 <= 0.1)[0]
recover_whole2_03 = np.where(PeriodIn2[recover_whole2] <= 0.3)[0]
recover_whole2_1 = np.where(PeriodIn2[recover_whole2] <= 1)[0]
recover_whole2_10 = np.where(PeriodIn2[recover_whole2] <= 10)[0]
recover_whole2_30 = np.where(PeriodIn2[recover_whole2] <= 30)[0]
recover_whole2_100 = np.where(PeriodIn2[recover_whole2] <= 100)[0]
recover_whole2_1000 = np.where(PeriodIn2[recover_whole2] <= 1000)[0]
recoverable2 = np.concatenate((recover_twice2, recover_whole2, recover_half2), axis=0)
recoverable2_03 = np.concatenate((recover_twice2_03, recover_whole2_03, recover_half2_03), axis=0)
recoverable2_1 = np.concatenate((recover_twice2_1, recover_whole2_1, recover_half2_1), axis=0)
recoverable2_10 = np.concatenate((recover_twice2_10, recover_whole2_10, recover_half2_10), axis=0)
recoverable2_30 = np.concatenate((recover_twice2_30, recover_whole2_30, recover_half2_30), axis=0)
recoverable2_100 = np.concatenate((recover_twice2_100, recover_whole2_100, recover_half2_100), axis=0)
recoverable2_1000 = np.concatenate((recover_twice2_1000, recover_whole2_1000, recover_half2_1000), axis=0)
recoverable2_22 = np.where(appMagMean2[recoverable2] <= 22.)[0]
recoverable2_03_22 = np.where(appMagMean2[recoverable2_03] <= 22.)[0]
recoverable2_1_22 = np.where(appMagMean2[recoverable2_1] <= 22.)[0]
recoverable2_10_22 = np.where(appMagMean2[recoverable2_10] <= 22.)[0]
recoverable2_30_22 = np.where(appMagMean2[recoverable2_30] <= 22.)[0]
recoverable2_100_22 = np.where(appMagMean2[recoverable2_100] <= 22.)[0]
recoverable2_1000_22 = np.where(appMagMean2[recoverable2_1000] <= 22.)[0]
recoverable2_195 = np.where(appMagMean2[recoverable2] <= 19.5)[0]
recoverable2_03_195 = np.where(appMagMean2[recoverable2_03] <= 19.5)[0]
recoverable2_1_195 = np.where(appMagMean2[recoverable2_1] <= 19.5)[0]
recoverable2_10_195 = np.where(appMagMean2[recoverable2_10] <= 19.5)[0]
recoverable2_30_195 = np.where(appMagMean2[recoverable2_30] <= 19.5)[0]
recoverable2_100_195 = np.where(appMagMean2[recoverable2_100] <= 19.5)[0]
recoverable2_1000_195 = np.where(appMagMean2[recoverable2_1000] <= 19.5)[0]
P03 = np.where(PeriodIn2 <= 0.3)[0]
P1 = np.where(PeriodIn2 <= 1)[0]
P10 = np.where(PeriodIn2 <= 10)[0]
P30 = np.where(PeriodIn2 <= 30)[0]
P100 = np.where(PeriodIn2 <= 100)[0]
P1000 = np.where(PeriodIn2 <= 1000)[0]
N_all2 = float(len(PeriodIn2)) #unnormalized
N_all2_03 = float(len(P03))
N_all2_1 = float(len(P1))
N_all2_10 = float(len(P10))
N_all2_30 = float(len(P30))
N_all2_100 = float(len(P100))
N_all2_1000 = float(len(P1000))
N_all2_22 = float(len(np.where(appMagMean2 <= 22))) #unnormalized
N_all2_03_22 = float(len(np.where(appMagMean2[P03] <= 22)))
N_all2_1_22 = float(len(np.where(appMagMean2[P1] <= 22)))
N_all2_10_22 = float(len(np.where(appMagMean2[P10] <= 22)))
N_all2_30_22 = float(len(np.where(appMagMean2[P30] <= 22)))
N_all2_100_22 = float(len(np.where(appMagMean2[P100] <= 22)))
N_all2_1000_22 = float(len(np.where(appMagMean2[P1000] <= 22)))
N_all2_195 = float(len(np.where(appMagMean2 <= 19.5))) #unnormalized
N_all2_03_195 = float(len(np.where(appMagMean2[P03] <= 19.5)))
N_all2_1_195 = float(len(np.where(appMagMean2[P1] <= 19.5)))
N_all2_10_195 = float(len(np.where(appMagMean2[P10] <= 19.5)))
N_all2_30_195 = float(len(np.where(appMagMean2[P30] <= 19.5)))
N_all2_100_195 = float(len(np.where(appMagMean2[P100] <= 19.5)))
N_all2_1000_195 = float(len(np.where(appMagMean2[P1000] <= 19.5)))
#NORMALIZED FROM HERE vv
N_all2_norm = (N_all2/N_all2)*N_mult2 #normalized
N_all2_03norm = (N_all2_03/N_all2)*N_mult2
N_all2_1norm = (N_all2_1/N_all2)*N_mult2
N_all2_10norm = (N_all2_10/N_all2)*N_mult2
N_all2_30norm = (N_all2_30/N_all2)*N_mult2
N_all2_100norm = (N_all2_100/N_all2)*N_mult2
N_all2_1000norm = (N_all2_1000/N_all2)*N_mult2
N_observable2 = (float(len(observable2))/float(N_all2))*N_mult2
N_observable2_03 = (float(len(observable2_03))/float(N_all2))*N_mult2
N_observable2_1 = (float(len(observable2_1))/float(N_all2))*N_mult2
N_observable2_10 = (float(len(observable2_10))/float(N_all2))*N_mult2
N_observable2_30 = (float(len(observable2_30))/float(N_all2))*N_mult2
N_observable2_100 = (float(len(observable2_100))/float(N_all2))*N_mult2
N_observable2_1000 = (float(len(observable2_1000))/float(N_all2))*N_mult2
N_recoverable2 = (float(len(recoverable2))/float(N_all2))*N_mult2
N_recoverable2_03 = (float(len(recoverable2_03))/float(N_all2))*N_mult2
N_recoverable2_1 = (float(len(recoverable2_1))/float(N_all2))*N_mult2
N_recoverable2_10 = (float(len(recoverable2_10))/float(N_all2))*N_mult2
N_recoverable2_30 = (float(len(recoverable2_30))/float(N_all2))*N_mult2
N_recoverable2_100 = (float(len(recoverable2_100))/float(N_all2))*N_mult2
N_recoverable2_1000 = (float(len(recoverable2_1000))/float(N_all2))*N_mult2
N_all2_norm_22 = (N_all2_22/N_all2)*N_mult2 #normalized
N_all2_03norm_22 = (N_all2_03_22/N_all2)*N_mult2
N_all2_1norm_22 = (N_all2_1_22/N_all2)*N_mult2
N_all2_10norm_22 = (N_all2_10_22/N_all2)*N_mult2
N_all2_30norm_22 = (N_all2_30_22/N_all2)*N_mult2
N_all2_100norm_22 = (N_all2_100_22/N_all2)*N_mult2
N_all2_1000norm_22 = (N_all2_1000_22/N_all2)*N_mult2
N_observable2_22 = (float(len(observable2_22))/float(N_all2))*N_mult2
N_observable2_03_22 = (float(len(observable2_03_22))/float(N_all2))*N_mult2
N_observable2_1_22 = (float(len(observable2_1_22))/float(N_all2))*N_mult2
N_observable2_10_22 = (float(len(observable2_10_22))/float(N_all2))*N_mult2
N_observable2_30_22 = (float(len(observable2_30_22))/float(N_all2))*N_mult2
N_observable2_100_22 = (float(len(observable2_100_22))/float(N_all2))*N_mult2
N_observable2_1000_22 = (float(len(observable2_1000_22))/float(N_all2))*N_mult2
N_recoverable2_22 = (float(len(recoverable2_22))/float(N_all2))*N_mult2
N_recoverable2_03_22 = (float(len(recoverable2_03_22))/float(N_all2))*N_mult2
N_recoverable2_1_22 = (float(len(recoverable2_1_22))/float(N_all2))*N_mult2
N_recoverable2_10_22 = (float(len(recoverable2_10_22))/float(N_all2))*N_mult2
N_recoverable2_30_22 = (float(len(recoverable2_30_22))/float(N_all2))*N_mult2
N_recoverable2_100_22 = (float(len(recoverable2_100_22))/float(N_all2))*N_mult2
N_recoverable2_1000_22 = (float(len(recoverable2_1000_22))/float(N_all2))*N_mult2
N_all2_norm_195 = (N_all2_195/N_all2)*N_mult2 #normalized
N_all2_03norm_195 = (N_all2_03_195/N_all2)*N_mult2
N_all2_1norm_195 = (N_all2_1_195/N_all2)*N_mult2
N_all2_10norm_195 = (N_all2_10_195/N_all2)*N_mult2
N_all2_30norm_195 = (N_all2_30_195/N_all2)*N_mult2
N_all2_100norm_195 = (N_all2_100_195/N_all2)*N_mult2
N_all2_1000norm_195 = (N_all2_1000_195/N_all2)*N_mult2
N_observable2_195 = (float(len(observable2_195))/float(N_all2))*N_mult2
N_observable2_03_195 = (float(len(observable2_03_195))/float(N_all2))*N_mult2
N_observable2_1_195 = (float(len(observable2_1_195))/float(N_all2))*N_mult2
N_observable2_10_195 = (float(len(observable2_10_195))/float(N_all2))*N_mult2
N_observable2_30_195 = (float(len(observable2_30_195))/float(N_all2))*N_mult2
N_observable2_100_195 = (float(len(observable2_100_195))/float(N_all2))*N_mult2
N_observable2_1000_195 = (float(len(observable2_1000_195))/float(N_all2))*N_mult2
N_recoverable2_195 = (float(len(recoverable2_195))/float(N_all2))*N_mult2
N_recoverable2_03_195 = (float(len(recoverable2_03_195))/float(N_all2))*N_mult2
N_recoverable2_1_195 = (float(len(recoverable2_1_195))/float(N_all2))*N_mult2
N_recoverable2_10_195 = (float(len(recoverable2_10_195))/float(N_all2))*N_mult2
N_recoverable2_30_195 = (float(len(recoverable2_30_195))/float(N_all2))*N_mult2
N_recoverable2_100_195 = (float(len(recoverable2_100_195))/float(N_all2))*N_mult2
N_recoverable2_1000_195 = (float(len(recoverable2_1000_195))/float(N_all2))*N_mult2
N_totalfast_array.append(float(N_all2_norm))
N_totalobservablefast_array.append(float(N_observable2))
N_totalrecoverablefast_array.append(float(N_recoverable2))
N_totalfast_array_03.append(float(N_all2_03norm))
N_totalobservablefast_array_03.append(float(N_observable2_03))
N_totalrecoverablefast_array_03.append(float(N_recoverable2_03))
N_totalfast_array_1.append(float(N_all2_1norm))
N_totalobservablefast_array_1.append(float(N_observable2_1))
N_totalrecoverablefast_array_1.append(float(N_recoverable2_1))
N_totalfast_array_10.append(float(N_all2_10norm))
N_totalobservablefast_array_10.append(float(N_observable2_10))
N_totalrecoverablefast_array_10.append(float(N_recoverable2_10))
N_totalfast_array_30.append(float(N_all2_30norm))
N_totalobservablefast_array_30.append(float(N_observable2_30))
N_totalrecoverablefast_array_30.append(float(N_recoverable2_30))
N_totalfast_array_100.append(float(N_all2_100norm))
N_totalobservablefast_array_100.append(float(N_observable2_100))
N_totalrecoverablefast_array_100.append(float(N_recoverable2_100))
N_totalfast_array_1000.append(float(N_all2_1000norm))
N_totalobservablefast_array_1000.append(float(N_observable2_1000))
N_totalrecoverablefast_array_1000.append(float(N_recoverable2_1000))
N_totalfast22_array.append(float(N_all2_norm_22))
N_totalobservablefast22_array.append(float(N_observable2_22))
N_totalrecoverablefast22_array.append(float(N_recoverable2_22))
N_totalfast22_array_03.append(float(N_all2_03norm_22))
N_totalobservablefast22_array_03.append(float(N_observable2_03_22))
N_totalrecoverablefast22_array_03.append(float(N_recoverable2_03_22))
N_totalfast22_array_1.append(float(N_all2_1norm_22))
N_totalobservablefast22_array_1.append(float(N_observable2_1_22))
N_totalrecoverablefast22_array_1.append(float(N_recoverable2_1_22))
N_totalfast22_array_10.append(float(N_all2_10norm_22))
N_totalobservablefast22_array_10.append(float(N_observable2_10_22))
N_totalrecoverablefast22_array_10.append(float(N_recoverable2_10_22))
N_totalfast22_array_30.append(float(N_all2_30norm_22))
N_totalobservablefast22_array_30.append(float(N_observable2_30_22))
N_totalrecoverablefast22_array_30.append(float(N_recoverable2_30_22))
N_totalfast22_array_100.append(float(N_all2_100norm_22))
N_totalobservablefast22_array_100.append(float(N_observable2_100_22))
N_totalrecoverablefast22_array_100.append(float(N_recoverable2_100_22))
N_totalfast22_array_1000.append(float(N_all2_1000norm_22))
N_totalobservablefast22_array_1000.append(float(N_observable2_1000_22))
N_totalrecoverablefast22_array_1000.append(float(N_recoverable2_1000_22))
N_totalfast195_array.append(float(N_all2_norm_195))
N_totalobservablefast195_array.append(float(N_observable2_195))
N_totalrecoverablefast195_array.append(float(N_recoverable2_195))
N_totalfast195_array_03.append(float(N_all2_03norm_195))
N_totalobservablefast195_array_03.append(float(N_observable2_03_195))
N_totalrecoverablefast195_array_03.append(float(N_recoverable2_03_195))
N_totalfast195_array_1.append(float(N_all2_1norm_195))
N_totalobservablefast195_array_1.append(float(N_observable2_1_195))
N_totalrecoverablefast195_array_1.append(float(N_recoverable2_1_195))
N_totalfast195_array_10.append(float(N_all2_10norm_195))
N_totalobservablefast195_array_10.append(float(N_observable2_10_195))
N_totalrecoverablefast195_array_10.append(float(N_recoverable2_10_195))
N_totalfast195_array_30.append(float(N_all2_30norm_195))
N_totalobservablefast195_array_30.append(float(N_observable2_30_195))
N_totalrecoverablefast195_array_30.append(float(N_recoverable2_30_195))
N_totalfast195_array_100.append(float(N_all2_100norm_195))
N_totalobservablefast195_array_100.append(float(N_observable2_100_195))
N_totalrecoverablefast195_array_100.append(float(N_recoverable2_100_195))
N_totalfast195_array_1000.append(float(N_all2_1000norm_195))
N_totalobservablefast195_array_1000.append(float(N_observable2_1000_195))
N_totalrecoverablefast195_array_1000.append(float(N_recoverable2_1000_195))
N_totalfast = np.sum(N_totalfast_array)
N_totalfast_03 = np.sum(N_totalfast_array_03)
N_totalfast_1 = np.sum(N_totalfast_array_1)
N_totalfast_10 = np.sum(N_totalfast_array_10)
N_totalfast_30 = np.sum(N_totalfast_array_30)
N_totalfast_100 = np.sum(N_totalfast_array_100)
N_totalfast_1000 = np.sum(N_totalfast_array_1000)
N_totalobservablefast = np.sum(N_totalobservablefast_array)
N_totalobservablefast_03 = np.sum(N_totalobservablefast_array_03)
N_totalobservablefast_1 = np.sum(N_totalobservablefast_array_1)
N_totalobservablefast_10 = np.sum(N_totalobservablefast_array_10)
N_totalobservablefast_30 = np.sum(N_totalobservablefast_array_30)
N_totalobservablefast_100 = np.sum(N_totalobservablefast_array_100)
N_totalobservablefast_1000 = np.sum(N_totalobservablefast_array_1000)
N_totalrecoverablefast = np.sum(N_totalrecoverablefast_array)
N_totalrecoverablefast_03 = np.sum(N_totalrecoverablefast_array_03)
N_totalrecoverablefast_1 = np.sum(N_totalrecoverablefast_array_1)
N_totalrecoverablefast_10 = np.sum(N_totalrecoverablefast_array_10)
N_totalrecoverablefast_30 = np.sum(N_totalrecoverablefast_array_30)
N_totalrecoverablefast_100 = np.sum(N_totalrecoverablefast_array_100)
N_totalrecoverablefast_1000 = np.sum(N_totalrecoverablefast_array_1000)
N_totalfast22 = np.sum(N_totalfast22_array)
N_totalfast22_03 = np.sum(N_totalfast22_array_03)
N_totalfast22_1 = np.sum(N_totalfast22_array_1)
N_totalfast22_10 = np.sum(N_totalfast22_array_10)
N_totalfast22_30 = np.sum(N_totalfast22_array_30)
N_totalfast22_100 = np.sum(N_totalfast22_array_100)
N_totalfast22_1000 = np.sum(N_totalfast22_array_1000)
N_totalobservablefast22 = np.sum(N_totalobservablefast22_array)
N_totalobservablefast22_03 = np.sum(N_totalobservablefast22_array_03)
N_totalobservablefast22_1 = np.sum(N_totalobservablefast22_array_1)
N_totalobservablefast22_10 = np.sum(N_totalobservablefast22_array_10)
N_totalobservablefast22_30 = np.sum(N_totalobservablefast22_array_30)
N_totalobservablefast22_100 = np.sum(N_totalobservablefast22_array_100)
N_totalobservablefast22_1000 = np.sum(N_totalobservablefast22_array_1000)
N_totalrecoverablefast22 = np.sum(N_totalrecoverablefast22_array)
N_totalrecoverablefast22_03 = np.sum(N_totalrecoverablefast22_array_03)
N_totalrecoverablefast22_1 = np.sum(N_totalrecoverablefast22_array_1)
N_totalrecoverablefast22_10 = np.sum(N_totalrecoverablefast22_array_10)
N_totalrecoverablefast22_30 = np.sum(N_totalrecoverablefast22_array_30)
N_totalrecoverablefast22_100 = np.sum(N_totalrecoverablefast22_array_100)
N_totalrecoverablefast22_1000 = np.sum(N_totalrecoverablefast22_array_1000)
N_totalfast195 = np.sum(N_totalfast195_array)
N_totalfast195_03 = np.sum(N_totalfast195_array_03)
N_totalfast195_1 = np.sum(N_totalfast195_array_1)
N_totalfast195_10 = np.sum(N_totalfast195_array_10)
N_totalfast195_30 = np.sum(N_totalfast195_array_30)
N_totalfast195_100 = np.sum(N_totalfast195_array_100)
N_totalfast195_1000 = np.sum(N_totalfast195_array_1000)
N_totalobservablefast195 = np.sum(N_totalobservablefast195_array)
N_totalobservablefast195_03 = np.sum(N_totalobservablefast195_array_03)
N_totalobservablefast195_1 = np.sum(N_totalobservablefast195_array_1)
N_totalobservablefast195_10 = np.sum(N_totalobservablefast195_array_10)
N_totalobservablefast195_30 = np.sum(N_totalobservablefast195_array_30)
N_totalobservablefast195_100 = np.sum(N_totalobservablefast195_array_100)
N_totalobservablefast195_1000 = np.sum(N_totalobservablefast195_array_1000)
N_totalrecoverablefast195 = np.sum(N_totalrecoverablefast195_array)
N_totalrecoverablefast195_03 = np.sum(N_totalrecoverablefast195_array_03)
N_totalrecoverablefast195_1 = np.sum(N_totalrecoverablefast195_array_1)
N_totalrecoverablefast195_10 = np.sum(N_totalrecoverablefast195_array_10)
N_totalrecoverablefast195_30 = np.sum(N_totalrecoverablefast195_array_30)
N_totalrecoverablefast195_100 = np.sum(N_totalrecoverablefast195_array_100)
N_totalrecoverablefast195_1000 = np.sum(N_totalrecoverablefast195_array_1000)
print("N_totalfast = ", N_totalfast, "N_totalobservablefast = ", N_totalobservablefast, "N_totalrecoverablefast = ", N_totalrecoverablefast)
wholerecoverypercent_fast = (N_totalrecoverablefast/N_totalobservablefast)*100
wholerecoverypercent_fast_03 = (N_totalrecoverablefast_03/N_totalobservablefast_03)*100
wholerecoverypercent_fast_1 = (N_totalrecoverablefast_1/N_totalobservablefast_1)*100
wholerecoverypercent_fast_10 = (N_totalrecoverablefast_10/N_totalobservablefast_10)*100
wholerecoverypercent_fast_30 = (N_totalrecoverablefast_30/N_totalobservablefast_30)*100
wholerecoverypercent_fast_100 = (N_totalrecoverablefast_100/N_totalobservablefast_100)*100
wholerecoverypercent_fast_1000 = (N_totalrecoverablefast_1000/N_totalobservablefast_1000)*100
sigmafast = ((N_totalrecoverablefast**(1/2))/N_totalobservablefast)*100
sigmafast_03 = ((N_totalrecoverablefast_03**(1/2))/N_totalobservablefast_03)*100
sigmafast_1 = ((N_totalrecoverablefast_1**(1/2))/N_totalobservablefast_1)*100
sigmafast_10 = ((N_totalrecoverablefast_10**(1/2))/N_totalobservablefast_10)*100
sigmafast_30 = ((N_totalrecoverablefast_30**(1/2))/N_totalobservablefast_30)*100
sigmafast_100 = ((N_totalrecoverablefast_100**(1/2))/N_totalobservablefast_100)*100
sigmafast_1000 = ((N_totalrecoverablefast_1000**(1/2))/N_totalobservablefast_1000)*100
print("wholerecoverypercent_fast = ", wholerecoverypercent_fast, "wholerecoverypercent_fast_03 = ", wholerecoverypercent_fast_03, "wholerecoverypercent_fast_1 = ", wholerecoverypercent_fast_1, "wholerecoverypercent_fast_10 = ", wholerecoverypercent_fast_10, "wholerecoverypercent_fast_30 = ", wholerecoverypercent_fast_30, "wholerecoverypercent_fast_100 = ", wholerecoverypercent_fast_100, "wholerecoverypercent_fast_1000 = ", wholerecoverypercent_fast_1000)
print("sigmafast = ", sigmafast, "sigmafast_03 = ", sigmafast_03, "sigmafast_1 = ", sigmafast_1, "sigmafast_10 = ", sigmafast_10, "sigmafast_30 = ", sigmafast_30, "sigmafast_100 = ", sigmafast_100, "sigmafast_1000 = ", sigmafast_1000)
overallrecoverypercent_fast = (N_totalrecoverablefast/N_totalfast)*100
overallrecoverypercent_fast_03 = (N_totalrecoverablefast_03/N_totalfast_03)*100
overallrecoverypercent_fast_1 = (N_totalrecoverablefast_1/N_totalfast_1)*100
overallrecoverypercent_fast_10 = (N_totalrecoverablefast_10/N_totalfast_10)*100
overallrecoverypercent_fast_30 = (N_totalrecoverablefast_30/N_totalfast_30)*100
overallrecoverypercent_fast_100 = (N_totalrecoverablefast_100/N_totalfast_100)*100
overallrecoverypercent_fast_1000 = (N_totalrecoverablefast_1000/N_totalfast_1000)*100
overallsigmafast = ((N_totalrecoverablefast**(1/2))/N_totalfast)*100
overallsigmafast_03 = ((N_totalrecoverablefast_03**(1/2))/N_totalfast_03)*100
overallsigmafast_1 = ((N_totalrecoverablefast_1**(1/2))/N_totalfast_1)*100
overallsigmafast_10 = ((N_totalrecoverablefast_10**(1/2))/N_totalfast_10)*100
overallsigmafast_30 = ((N_totalrecoverablefast_30**(1/2))/N_totalfast_30)*100
overallsigmafast_100 = ((N_totalrecoverablefast_100**(1/2))/N_totalfast_100)*100
overallsigmafast_1000 = ((N_totalrecoverablefast_1000**(1/2))/N_totalfast_1000)*100
print("overallrecoverypercent_fast = ", overallrecoverypercent_fast, "overallrecoverypercent_fast_03 = ", overallrecoverypercent_fast_03, "overallrecoverypercent_fast_1 = ", overallrecoverypercent_fast_1, "overallrecoverypercent_fast_10 = ", overallrecoverypercent_fast_10, "overallrecoverypercent_fast_30 = ", overallrecoverypercent_fast_30, "overallrecoverypercent_fast_100 = ", overallrecoverypercent_fast_100, "overallrecoverypercent_fast_1000 = ", overallrecoverypercent_fast_1000)
print("overallsigmafast = ", overallsigmafast, "overallsigmafast_03 = ", overallsigmafast_03, "overallsigmafast_1 = ", overallsigmafast_1, "overallsigmafast_10 = ", overallsigmafast_10, "overallsigmafast_30 = ", overallsigmafast_30, "overallsigmafast_100 = ", overallsigmafast_100, "overallsigmafast_1000 = ", overallsigmafast_1000)
wholerecoverypercent_fast22 = (N_totalrecoverablefast22/N_totalobservablefast22)*100
wholerecoverypercent_fast22_03 = (N_totalrecoverablefast22_03/N_totalobservablefast22_03)*100
wholerecoverypercent_fast22_1 = (N_totalrecoverablefast22_1/N_totalobservablefast22_1)*100
wholerecoverypercent_fast22_10 = (N_totalrecoverablefast22_10/N_totalobservablefast22_10)*100
wholerecoverypercent_fast22_30 = (N_totalrecoverablefast22_30/N_totalobservablefast22_30)*100
wholerecoverypercent_fast22_100 = (N_totalrecoverablefast22_100/N_totalobservablefast22_100)*100
wholerecoverypercent_fast22_1000 = (N_totalrecoverablefast22_1000/N_totalobservablefast22_1000)*100
sigmafast22 = ((N_totalrecoverablefast22**(1/2))/N_totalobservablefast22)*100
sigmafast22_03 = ((N_totalrecoverablefast22_03**(1/2))/N_totalobservablefast22_03)*100
sigmafast22_1 = ((N_totalrecoverablefast22_1**(1/2))/N_totalobservablefast22_1)*100
sigmafast22_10 = ((N_totalrecoverablefast22_10**(1/2))/N_totalobservablefast22_10)*100
sigmafast22_30 = ((N_totalrecoverablefast22_30**(1/2))/N_totalobservablefast22_30)*100
sigmafast22_100 = ((N_totalrecoverablefast22_100**(1/2))/N_totalobservablefast22_100)*100
sigmafast22_1000 = ((N_totalrecoverablefast22_1000**(1/2))/N_totalobservablefast22_1000)*100
print("wholerecoverypercent_fast22 = ", wholerecoverypercent_fast22, "wholerecoverypercent_fast22_03 = ", wholerecoverypercent_fast22_03, "wholerecoverypercent_fast22_1 = ", wholerecoverypercent_fast22_1, "wholerecoverypercent_fast22_10 = ", wholerecoverypercent_fast22_10, "wholerecoverypercent_fast22_30 = ", wholerecoverypercent_fast22_30, "wholerecoverypercent_fast22_100 = ", wholerecoverypercent_fast22_100, "wholerecoverypercent_fast22_1000 = ", wholerecoverypercent_fast22_1000)
print("sigmafast22 = ", sigmafast22, "sigmafast22_03 = ", sigmafast22_03, "sigmafast22_1 = ", sigmafast22_1, "sigmafast22_10 = ", sigmafast22_10, "sigmafast22_30 = ", sigmafast22_30, "sigmafast22_100 = ", sigmafast22_100, "sigmafast22_1000 = ", sigmafast22_1000)
overallrecoverypercent_fast22 = (N_totalrecoverablefast22/N_totalfast22)*100
overallrecoverypercent_fast22_03 = (N_totalrecoverablefast22_03/N_totalfast22_03)*100
overallrecoverypercent_fast22_1 = (N_totalrecoverablefast22_1/N_totalfast22_1)*100
overallrecoverypercent_fast22_10 = (N_totalrecoverablefast22_10/N_totalfast22_10)*100
overallrecoverypercent_fast22_30 = (N_totalrecoverablefast22_30/N_totalfast22_30)*100
overallrecoverypercent_fast22_100 = (N_totalrecoverablefast22_100/N_totalfast22_100)*100
overallrecoverypercent_fast22_1000 = (N_totalrecoverablefast22_1000/N_totalfast22_1000)*100
overallsigmafast22 = ((N_totalrecoverablefast22**(1/2))/N_totalfast22)*100
overallsigmafast22_03 = ((N_totalrecoverablefast22_03**(1/2))/N_totalfast22_03)*100
overallsigmafast22_1 = ((N_totalrecoverablefast22_1**(1/2))/N_totalfast22_1)*100
overallsigmafast22_10 = ((N_totalrecoverablefast22_10**(1/2))/N_totalfast22_10)*100
overallsigmafast22_30 = ((N_totalrecoverablefast22_30**(1/2))/N_totalfast22_30)*100
overallsigmafast22_100 = ((N_totalrecoverablefast22_100**(1/2))/N_totalfast22_100)*100
overallsigmafast22_1000 = ((N_totalrecoverablefast22_1000**(1/2))/N_totalfast22_1000)*100
print("overallrecoverypercent_fast22 = ", overallrecoverypercent_fast22, "overallrecoverypercent_fast22_03 = ", overallrecoverypercent_fast22_03, "overallrecoverypercent_fast22_1 = ", overallrecoverypercent_fast22_1, "overallrecoverypercent_fast22_10 = ", overallrecoverypercent_fast22_10, "overallrecoverypercent_fast22_30 = ", overallrecoverypercent_fast22_30, "overallrecoverypercent_fast22_100 = ", overallrecoverypercent_fast22_100, "overallrecoverypercent_fast22_1000 = ", overallrecoverypercent_fast22_1000)
print("overallsigmafast22 = ", overallsigmafast22, "overallsigmafast22_03 = ", overallsigmafast22_03, "overallsigmafast22_1 = ", overallsigmafast22_1, "overallsigmafast22_10 = ", overallsigmafast22_10, "overallsigmafast22_30 = ", overallsigmafast22_30, "overallsigmafast22_100 = ", overallsigmafast22_100, "overallsigmafast22_1000 = ", overallsigmafast22_1000)
wholerecoverypercent_fast195 = (N_totalrecoverablefast195/N_totalobservablefast195)*100
wholerecoverypercent_fast195_03 = (N_totalrecoverablefast195_03/N_totalobservablefast195_03)*100
wholerecoverypercent_fast195_1 = (N_totalrecoverablefast195_1/N_totalobservablefast195_1)*100
wholerecoverypercent_fast195_10 = (N_totalrecoverablefast195_10/N_totalobservablefast195_10)*100
wholerecoverypercent_fast195_30 = (N_totalrecoverablefast195_30/N_totalobservablefast195_30)*100
wholerecoverypercent_fast195_100 = (N_totalrecoverablefast195_100/N_totalobservablefast195_100)*100
wholerecoverypercent_fast195_1000 = (N_totalrecoverablefast195_1000/N_totalobservablefast195_1000)*100
sigmafast195 = ((N_totalrecoverablefast195**(1/2))/N_totalobservablefast195)*100
sigmafast195_03 = ((N_totalrecoverablefast195_03**(1/2))/N_totalobservablefast195_03)*100
sigmafast195_1 = ((N_totalrecoverablefast195_1**(1/2))/N_totalobservablefast195_1)*100
sigmafast195_10 = ((N_totalrecoverablefast195_10**(1/2))/N_totalobservablefast195_10)*100
sigmafast195_30 = ((N_totalrecoverablefast195_30**(1/2))/N_totalobservablefast195_30)*100
sigmafast195_100 = ((N_totalrecoverablefast195_100**(1/2))/N_totalobservablefast195_100)*100
sigmafast195_1000 = ((N_totalrecoverablefast195_1000**(1/2))/N_totalobservablefast195_1000)*100
print("wholerecoverypercent_fast195 = ", wholerecoverypercent_fast195, "wholerecoverypercent_fast195_03 = ", wholerecoverypercent_fast195_03, "wholerecoverypercent_fast195_1 = ", wholerecoverypercent_fast195_1, "wholerecoverypercent_fast195_10 = ", wholerecoverypercent_fast195_10, "wholerecoverypercent_fast195_30 = ", wholerecoverypercent_fast195_30, "wholerecoverypercent_fast195_100 = ", wholerecoverypercent_fast195_100, "wholerecoverypercent_fast195_1000 = ", wholerecoverypercent_fast195_1000)
print("sigmafast195 = ", sigmafast195, "sigmafast195_03 = ", sigmafast195_03, "sigmafast195_1 = ", sigmafast195_1, "sigmafast195_10 = ", sigmafast195_10, "sigmafast195_30 = ", sigmafast195_30, "sigmafast195_100 = ", sigmafast195_100, "sigmafast195_1000 = ", sigmafast195_1000)
overallrecoverypercent_fast195 = (N_totalrecoverablefast195/N_totalfast195)*100
overallrecoverypercent_fast195_03 = (N_totalrecoverablefast195_03/N_totalfast195_03)*100
overallrecoverypercent_fast195_1 = (N_totalrecoverablefast195_1/N_totalfast195_1)*100
overallrecoverypercent_fast195_10 = (N_totalrecoverablefast195_10/N_totalfast195_10)*100
overallrecoverypercent_fast195_30 = (N_totalrecoverablefast195_30/N_totalfast195_30)*100
overallrecoverypercent_fast195_100 = (N_totalrecoverablefast195_100/N_totalfast195_100)*100
overallrecoverypercent_fast195_1000 = (N_totalrecoverablefast195_1000/N_totalfast195_1000)*100
overallsigmafast195 = ((N_totalrecoverablefast195**(1/2))/N_totalfast195)*100
overallsigmafast195_03 = ((N_totalrecoverablefast195_03**(1/2))/N_totalfast195_03)*100
overallsigmafast195_1 = ((N_totalrecoverablefast195_1**(1/2))/N_totalfast195_1)*100
overallsigmafast195_10 = ((N_totalrecoverablefast195_10**(1/2))/N_totalfast195_10)*100
overallsigmafast195_30 = ((N_totalrecoverablefast195_30**(1/2))/N_totalfast195_30)*100
overallsigmafast195_100 = ((N_totalrecoverablefast195_100**(1/2))/N_totalfast195_100)*100
overallsigmafast195_1000 = ((N_totalrecoverablefast195_1000**(1/2))/N_totalfast195_1000)*100
print("overallrecoverypercent_fast195 = ", overallrecoverypercent_fast195, "overallrecoverypercent_fast195_03 = ", overallrecoverypercent_fast195_03, "overallrecoverypercent_fast195_1 = ", overallrecoverypercent_fast195_1, "overallrecoverypercent_fast195_10 = ", overallrecoverypercent_fast195_10, "overallrecoverypercent_fast195_30 = ", overallrecoverypercent_fast195_30, "overallrecoverypercent_fast195_100 = ", overallrecoverypercent_fast195_100, "overallrecoverypercent_fast195_1000 = ", overallrecoverypercent_fast195_1000)
print("overallsigmafast195 = ", overallsigmafast195, "overallsigmafast195_03 = ", overallsigmafast195_03, "overallsigmafast195_1 = ", overallsigmafast195_1, "overallsigmafast195_10 = ", overallsigmafast195_10, "overallsigmafast195_30 = ", overallsigmafast195_30, "overallsigmafast195_100 = ", overallsigmafast195_100, "overallsigmafast195_1000 = ", overallsigmafast195_1000)
for fileobsDist_ in sorted(allFiles_obsDist):
filename3 = fileobsDist_[77:] #when file path no longer has /old in it, will be fileobsDist_[73:]
fileid3 = filename3.strip('output_file.csv')
colorvalue3 = int(fileid3)
colorvalue_obsDist.append(colorvalue3)
print ("I'm starting " + fileid3)
datobsDist = pd.read_csv(fileobsDist_, sep = ',', header=2)
##########################################################
datobsDist1 = pd.read_csv(fileobsDist_, sep = ',', header=0, nrows=1)
N_tri3 = datibsDist1["NstarsTRILEGAL"][0]
print("N_tri3 = ", N_tri3)
m1hAll03, m1b3 = np.histogram(datobDist["m1"], bins=mbins)
dm13 = np.diff(m1b3)
m1val3 = m1b3[:-1] + dm13/2.
fb3 = np.sum(m1hAll03*dm13*fbFit(m1val3))
N_mult3 = N_tri3*fb3
##########################################################
PeriodIn3 = datobsDist['p'] # input period -- 'p' in data file
if len(PeriodIn3) ==0.:
continue
if N_tri3 == 0:
continue
else:
PeriodOut3 = datobsDist['LSM_PERIOD'] #LSM_PERIOD in data file
appMagMean3 = datobsDist['appMagMean'] #when file path is back to fast/obsDist/output_files vs. fast/old/obsDist/output_files, this should be shanged to appMagMean_r
observable3 = np.where(PeriodOut3 != -999)[0]
observable3_03 = np.where(PeriodIn3[observable3] <= 0.3)[0]
observable3_1 = np.where(PeriodIn3[observable3] <= 1)[0]
observable3_10 = np.where(PeriodIn3[observable3] <= 10)[0]
observable3_30 = np.where(PeriodIn3[observable3] <= 30)[0]
observable3_100 = np.where(PeriodIn3[observable3] <= 100)[0]
observable3_1000 = np.where(PeriodIn3[observable3] <= 1000)[0]
observable3_22 = np.where(appMagMean3[observable3] <= 22.)[0]
observable3_03_22 = np.where(appMagMean3[observable3_03] <= 22.)[0]
observable3_1_22 = np.where(appMagMean3[observable3_1] <= 22.)[0]
observable3_10_22 = np.where(appMagMean3[observable3_10] <= 22.)[0]
observable3_30_22 = np.where(appMagMean3[observable3_30] <= 22.)[0]
observable3_100_22 = np.where(appMagMean3[observable3_100] <= 22.)[0]
observable3_1000_22 = np.where(appMagMean3[observable3_1000] <= 22.)[0]
observable3_195 = np.where(appMagMean3[observable3] <= 19.5)[0]
observable3_03_195 = np.where(appMagMean3[observable3_03] <= 19.5)[0]
observable3_1_195 = np.where(appMagMean3[observable3_1] <= 19.5)[0]
observable3_10_195 = np.where(appMagMean3[observable3_10] <= 19.5)[0]
observable3_30_195 = np.where(appMagMean3[observable3_30] <= 19.5)[0]
observable3_100_195 = np.where(appMagMean3[observable3_100] <= 19.5)[0]
observable3_1000_195 = np.where(appMagMean3[observable3_1000] <= 19.5)[0]
Sigma_Period_Whole3 = abs(PeriodOut3 - PeriodIn3)/PeriodIn3
Sigma_Period_Half3 = abs(PeriodOut3 - 0.5*PeriodIn3)/(0.5*PeriodIn3)
Sigma_Period_Twice3 = abs(PeriodOut3 - 2*PeriodIn3)/(2*PeriodIn3)
recover_twice3 = np.where(Sigma_Period_Twice3 <= 0.1)[0]
recover_twice3_03 = np.where(PeriodIn3[recover_twice3] <= 0.3)[0]
recover_twice3_1 = np.where(PeriodIn3[recover_twice3] <= 1)[0]
recover_twice3_10 = np.where(PeriodIn3[recover_twice3] <= 10)[0]
recover_twice3_30 = np.where(PeriodIn3[recover_twice3] <= 30)[0]
recover_twice3_100 = np.where(PeriodIn3[recover_twice3] <= 100)[0]
recover_twice3_1000 = np.where(PeriodIn3[recover_twice3] <= 1000)[0]
#recover_half1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Half1), Sigma_Period_Half1 <= 0.1))[0]
recover_half3 = np.where(Sigma_Period_Half3 <= 0.1)[0]
recover_half3_03 = np.where(PeriodIn3[recover_half3] <= 0.3)[0]
recover_half3_1 = np.where(PeriodIn3[recover_half3] <= 1)[0]
recover_half3_10 = np.where(PeriodIn3[recover_half3] <= 10)[0]
recover_half3_30 = np.where(PeriodIn3[recover_half3] <= 30)[0]
recover_half3_100 = np.where(PeriodIn3[recover_half3] <= 100)[0]
recover_half3_1000 = np.where(PeriodIn3[recover_half3] <= 1000)[0]
#recover_whole1 = np.where(np.logical_and(np.isfinite(Sigma_Period_Whole1), Sigma_Period_Whole1 <= 0.1))[0]
recover_whole3 = np.where(Sigma_Period_Whole3 <= 0.1)[0]
recover_whole3_03 = np.where(PeriodIn3[recover_whole3] <= 0.3)[0]
recover_whole3_1 = np.where(PeriodIn3[recover_whole3] <= 1)[0]
recover_whole3_10 = np.where(PeriodIn3[recover_whole3] <= 10)[0]
recover_whole3_30 = np.where(PeriodIn3[recover_whole3] <= 30)[0]
recover_whole3_100 = np.where(PeriodIn3[recover_whole3] <= 100)[0]
recover_whole3_1000 = np.where(PeriodIn3[recover_whole3] <= 1000)[0]
recoverable3 = np.concatenate((recover_twice3, recover_whole3, recover_half3), axis=0)
recoverable3_03 = np.concatenate((recover_twice3_03, recover_whole3_03, recover_half3_03), axis=0)
recoverable3_1 = np.concatenate((recover_twice3_1, recover_whole3_1, recover_half3_1), axis=0)
recoverable3_10 = np.concatenate((recover_twice3_10, recover_whole3_10, recover_half3_10), axis=0)
recoverable3_30 = np.concatenate((recover_twice3_30, recover_whole3_30, recover_half3_30), axis=0)
recoverable3_100 = np.concatenate((recover_twice3_100, recover_whole3_100, recover_half3_100), axis=0)
recoverable3_1000 = np.concatenate((recover_twice3_1000, recover_whole3_1000, recover_half3_1000), axis=0)
recoverable3_22 = np.where(appMagMean3[recoverable3] <= 22.)[0]
recoverable3_03_22 = np.where(appMagMean3[recoverable3_03] <= 22.)[0]
recoverable3_1_22 = np.where(appMagMean3[recoverable3_1] <= 22.)[0]
recoverable3_10_22 = np.where(appMagMean3[recoverable3_10] <= 22.)[0]
recoverable3_30_22 = np.where(appMagMean3[recoverable3_30] <= 22.)[0]
recoverable3_100_22 = np.where(appMagMean3[recoverable3_100] <= 22.)[0]
recoverable3_1000_22 = np.where(appMagMean3[recoverable3_1000] <= 22.)[0]
recoverable3_195 = np.where(appMagMean3[recoverable3] <= 19.5)[0]
recoverable3_03_195 = np.where(appMagMean3[recoverable3_03] <= 19.5)[0]
recoverable3_1_195 = np.where(appMagMean3[recoverable3_1] <= 19.5)[0]
recoverable3_10_195 = np.where(appMagMean3[recoverable3_10] <= 19.5)[0]
recoverable3_30_195 = np.where(appMagMean3[recoverable3_30] <= 19.5)[0]
recoverable3_100_195 = np.where(appMagMean3[recoverable3_100] <= 19.5)[0]
recoverable3_1000_195 = np.where(appMagMean3[recoverable3_1000] <= 19.5)[0]
P03 = np.where(PeriodIn3 <= 0.3)[0]
P1 = np.where(PeriodIn3 <= 1)[0]
P10 = np.where(PeriodIn3 <= 10)[0]
P30 = np.where(PeriodIn3 <= 30)[0]
P100 = np.where(PeriodIn3 <= 100)[0]
P1000 = np.where(PeriodIn3 <= 1000)[0]
N_all3 = float(len(PeriodIn3)) #unnormalized
N_all3_03 = float(len(P03))
N_all3_1 = float(len(P1))
N_all3_10 = float(len(P10))
N_all3_30 = float(len(P30))
N_all3_100 = float(len(P100))
N_all3_1000 = float(len(P1000))
N_all3_22 = float(len(np.where(appMagMean3 <= 22))) #unnormalized
N_all3_03_22 = float(len(np.where(appMagMean3[P03] <= 22)))
N_all3_1_22 = float(len(np.where(appMagMean3[P1] <= 22)))
N_all3_10_22 = float(len(np.where(appMagMean3[P10] <= 22)))
N_all3_30_22 = float(len(np.where(appMagMean3[P30] <= 22)))
N_all3_100_22 = float(len(np.where(appMagMean3[P100] <= 22)))
N_all3_1000_22 = float(len(np.where(appMagMean3[P1000] <= 22)))
N_all3_195 = float(len(np.where(appMagMean3 <= 19.5))) #unnormalized
N_all3_03_195 = float(len(np.where(appMagMean3[P03] <= 19.5)))
N_all3_1_195 = float(len(np.where(appMagMean3[P1] <= 19.5)))
N_all3_10_195 = float(len(np.where(appMagMean3[P10] <= 19.5)))
N_all3_30_195 = float(len(np.where(appMagMean3[P30] <= 19.5)))
N_all3_100_195 = float(len(np.where(appMagMean3[P100] <= 19.5)))
N_all3_1000_195 = float(len(np.where(appMagMean3[P1000] <= 19.5)))
#NORMALIZED FROM HERE vv
N_all3_norm = (N_all3/N_all3)*N_mult3 #normalized
N_all3_03norm = (N_all3_03/N_all3)*N_mult3
N_all3_1norm = (N_all3_1/N_all3)*N_mult3
N_all3_10norm = (N_all3_10/N_all3)*N_mult3
N_all3_30norm = (N_all3_30/N_all3)*N_mult3
N_all3_100norm = (N_all3_100/N_all3)*N_mult3
N_all3_1000norm = (N_all3_1000/N_all3)*N_mult3
N_observable3 = (float(len(observable3))/float(N_all3))*N_mult3
N_observable3_03 = (float(len(observable3_03))/float(N_all3))*N_mult3
N_observable3_1 = (float(len(observable3_1))/float(N_all3))*N_mult3
N_observable3_10 = (float(len(observable3_10))/float(N_all3))*N_mult3
N_observable3_30 = (float(len(observable3_30))/float(N_all3))*N_mult3
N_observable3_100 = (float(len(observable3_100))/float(N_all3))*N_mult3
N_observable3_1000 = (float(len(observable3_1000))/float(N_all3))*N_mult3
N_recoverable3 = (float(len(recoverable3))/float(N_all3))*N_mult3
N_recoverable3_03 = (float(len(recoverable3_03))/float(N_all3))*N_mult3
N_recoverable3_1 = (float(len(recoverable3_1))/float(N_all3))*N_mult3
N_recoverable3_10 = (float(len(recoverable3_10))/float(N_all3))*N_mult3
N_recoverable3_30 = (float(len(recoverable3_30))/float(N_all3))*N_mult3
N_recoverable3_100 = (float(len(recoverable3_100))/float(N_all3))*N_mult3
N_recoverable3_1000 = (float(len(recoverable3_1000))/float(N_all3))*N_mult3
N_all3_norm_22 = (N_all3_22/N_all3)*N_mult3 #normalized
N_all3_03norm_22 = (N_all3_03_22/N_all3)*N_mult3
N_all3_1norm_22 = (N_all3_1_22/N_all3)*N_mult3
N_all3_10norm_22 = (N_all3_10_22/N_all3)*N_mult3
N_all3_30norm_22 = (N_all3_30_22/N_all3)*N_mult3
N_all3_100norm_22 = (N_all3_100_22/N_all3)*N_mult3
N_all3_1000norm_22 = (N_all3_1000_22/N_all3)*N_mult3
N_observable3_22 = (float(len(observable3_22))/float(N_all3))*N_mult3
N_observable3_03_22 = (float(len(observable3_03_22))/float(N_all3))*N_mult3
N_observable3_1_22 = (float(len(observable3_1_22))/float(N_all3))*N_mult3
N_observable3_10_22 = (float(len(observable3_10_22))/float(N_all3))*N_mult3
N_observable3_30_22 = (float(len(observable3_30_22))/float(N_all3))*N_mult3
N_observable3_100_22 = (float(len(observable3_100_22))/float(N_all3))*N_mult3
N_observable3_1000_22 = (float(len(observable3_1000_22))/float(N_all3))*N_mult3
N_recoverable3_22 = (float(len(recoverable3_22))/float(N_all3))*N_mult3
N_recoverable3_03_22 = (float(len(recoverable3_03_22))/float(N_all3))*N_mult3
N_recoverable3_1_22 = (float(len(recoverable3_1_22))/float(N_all3))*N_mult3
N_recoverable3_10_22 = (float(len(recoverable3_10_22))/float(N_all3))*N_mult3
N_recoverable3_30_22 = (float(len(recoverable3_30_22))/float(N_all3))*N_mult3
N_recoverable3_100_22 = (float(len(recoverable3_100_22))/float(N_all3))*N_mult3
N_recoverable3_1000_22 = (float(len(recoverable3_1000_22))/float(N_all3))*N_mult3
N_all3_norm_195 = (N_all3_195/N_all3)*N_mult3 #normalized
N_all3_03norm_195 = (N_all3_03_195/N_all3)*N_mult3
N_all3_1norm_195 = (N_all3_1_195/N_all3)*N_mult3
N_all3_10norm_195 = (N_all3_10_195/N_all3)*N_mult3
N_all3_30norm_195 = (N_all3_30_195/N_all3)*N_mult3
N_all3_100norm_195 = (N_all3_100_195/N_all3)*N_mult3
N_all3_1000norm_195 = (N_all3_1000_195/N_all3)*N_mult3
N_observable3_195 = (float(len(observable3_195))/float(N_all3))*N_mult3
N_observable3_03_195 = (float(len(observable3_03_195))/float(N_all3))*N_mult3
N_observable3_1_195 = (float(len(observable3_1_195))/float(N_all3))*N_mult3
N_observable3_10_195 = (float(len(observable3_10_195))/float(N_all3))*N_mult3
N_observable3_30_195 = (float(len(observable3_30_195))/float(N_all3))*N_mult3
N_observable3_100_195 = (float(len(observable3_100_195))/float(N_all3))*N_mult3
N_observable3_1000_195 = (float(len(observable3_1000_195))/float(N_all3))*N_mult3
N_recoverable3_195 = (float(len(recoverable3_195))/float(N_all3))*N_mult3
N_recoverable3_03_195 = (float(len(recoverable3_03_195))/float(N_all3))*N_mult3
N_recoverable3_1_195 = (float(len(recoverable3_1_195))/float(N_all3))*N_mult3
N_recoverable3_10_195 = (float(len(recoverable3_10_195))/float(N_all3))*N_mult3
N_recoverable3_30_195 = (float(len(recoverable3_30_195))/float(N_all3))*N_mult3
N_recoverable3_100_195 = (float(len(recoverable3_100_195))/float(N_all3))*N_mult3
N_recoverable3_1000_195 = (float(len(recoverable3_1000_195))/float(N_all3))*N_mult3
N_totalobsDist_array.append(float(N_all3_norm))
N_totalobservableobsDist_array.append(float(N_observable3))
N_totalrecoverableobsDist_array.append(float(N_recoverable3))
N_totalobsDist_array_03.append(float(N_all3_03norm))
N_totalobservableobsDist_array_03.append(float(N_observable3_03))
N_totalrecoverableobsDist_array_03.append(float(N_recoverable3_03))
N_totalobsDist_array_1.append(float(N_all3_1norm))
N_totalobservableobsDist_array_1.append(float(N_observable3_1))
N_totalrecoverableobsDist_array_1.append(float(N_recoverable3_1))
N_totalobsDist_array_10.append(float(N_all3_10norm))
N_totalobservableobsDist_array_10.append(float(N_observable3_10))
N_totalrecoverableobsDist_array_10.append(float(N_recoverable3_10))
N_totalobsDist_array_30.append(float(N_all3_30norm))
N_totalobservableobsDist_array_30.append(float(N_observable3_30))
N_totalrecoverableobsDist_array_30.append(float(N_recoverable3_30))
N_totalobsDist_array_100.append(float(N_all3_100norm))
N_totalobservableobsDist_array_100.append(float(N_observable3_100))
N_totalrecoverableobsDist_array_100.append(float(N_recoverable3_100))
N_totalobsDist_array_1000.append(float(N_all3_1000norm))
N_totalobservableobsDist_array_1000.append(float(N_observable3_1000))
N_totalrecoverableobsDist_array_1000.append(float(N_recoverable3_1000))
N_totalobsDist22_array.append(float(N_all3_norm_22))
N_totalobservableobsDist22_array.append(float(N_observable3_22))
N_totalrecoverableobsDist22_array.append(float(N_recoverable3_22))
N_totalobsDist22_array_03.append(float(N_all3_03norm_22))
N_totalobservableobsDist22_array_03.append(float(N_observable3_03_22))
N_totalrecoverableobsDist22_array_03.append(float(N_recoverable3_03_22))
N_totalobsDist22_array_1.append(float(N_all3_1norm_22))
N_totalobservableobsDist22_array_1.append(float(N_observable3_1_22))
N_totalrecoverableobsDist22_array_1.append(float(N_recoverable3_1_22))
N_totalobsDist22_array_10.append(float(N_all3_10norm_22))
N_totalobservableobsDist22_array_10.append(float(N_observable3_10_22))
N_totalrecoverableobsDist22_array_10.append(float(N_recoverable3_10_22))
N_totalobsDist22_array_30.append(float(N_all3_30norm_22))
N_totalobservableobsDist22_array_30.append(float(N_observable3_30_22))
N_totalrecoverableobsDist22_array_30.append(float(N_recoverable3_30_22))
N_totalobsDist22_array_100.append(float(N_all3_100norm_22))
N_totalobservableobsDist22_array_100.append(float(N_observable3_100_22))
N_totalrecoverableobsDist22_array_100.append(float(N_recoverable3_100_22))
N_totalobsDist22_array_1000.append(float(N_all3_1000norm_22))
N_totalobservableobsDist22_array_1000.append(float(N_observable3_1000_22))
N_totalrecoverableobsDist22_array_1000.append(float(N_recoverable3_1000_22))
N_totalobsDist195_array.append(float(N_all3_norm_195))
N_totalobservableobsDist195_array.append(float(N_observable3_195))
N_totalrecoverableobsDist195_array.append(float(N_recoverable3_195))
N_totalobsDist195_array_03.append(float(N_all3_03norm_195))
N_totalobservableobsDist195_array_03.append(float(N_observable3_03_195))
N_totalrecoverableobsDist195_array_03.append(float(N_recoverable3_03_195))
N_totalobsDist195_array_1.append(float(N_all3_1norm_195))
N_totalobservableobsDist195_array_1.append(float(N_observable3_1_195))
N_totalrecoverableobsDist195_array_1.append(float(N_recoverable3_1_195))
N_totalobsDist195_array_10.append(float(N_all3_10norm_195))
N_totalobservableobsDist195_array_10.append(float(N_observable3_10_195))
N_totalrecoverableobsDist195_array_10.append(float(N_recoverable3_10_195))
N_totalobsDist195_array_30.append(float(N_all3_30norm_195))
N_totalobservableobsDist195_array_30.append(float(N_observable3_30_195))
N_totalrecoverableobsDist195_array_30.append(float(N_recoverable3_30_195))
N_totalobsDist195_array_100.append(float(N_all3_100norm_195))
N_totalobservableobsDist195_array_100.append(float(N_observable3_100_195))
N_totalrecoverableobsDist195_array_100.append(float(N_recoverable3_100_195))
N_totalobsDist195_array_1000.append(float(N_all3_1000norm_195))
N_totalobservableobsDist195_array_1000.append(float(N_observable3_1000_195))
N_totalrecoverableobsDist195_array_1000.append(float(N_recoverable3_1000_195))
N_totalobsDist = np.sum(N_totalobsDist_array)
N_totalobsDist_03 = np.sum(N_totalobsDist_array_03)
N_totalobsDist_1 = np.sum(N_totalobsDist_array_1)
N_totalobsDist_10 = np.sum(N_totalobsDist_array_10)
N_totalobsDist_30 = np.sum(N_totalobsDist_array_30)
N_totalobsDist_100 = np.sum(N_totalobsDist_array_100)
N_totalobsDist_1000 = np.sum(N_totalobsDist_array_1000)
N_totalobservableobsDist = np.sum(N_totalobservableobsDist_array)
N_totalobservableobsDist_03 = np.sum(N_totalobservableobsDist_array_03)
N_totalobservableobsDist_1 = np.sum(N_totalobservableobsDist_array_1)
N_totalobservableobsDist_10 = np.sum(N_totalobservableobsDist_array_10)
N_totalobservableobsDist_30 = np.sum(N_totalobservableobsDist_array_30)
N_totalobservableobsDist_100 = np.sum(N_totalobservableobsDist_array_100)
N_totalobservableobsDist_1000 = np.sum(N_totalobservableobsDist_array_1000)
N_totalrecoverableobsDist = np.sum(N_totalrecoverableobsDist_array)
N_totalrecoverableobsDist_03 = np.sum(N_totalrecoverableobsDist_array_03)
N_totalrecoverableobsDist_1 = np.sum(N_totalrecoverableobsDist_array_1)
N_totalrecoverableobsDist_10 = np.sum(N_totalrecoverableobsDist_array_10)
N_totalrecoverableobsDist_30 = np.sum(N_totalrecoverableobsDist_array_30)
N_totalrecoverableobsDist_100 = np.sum(N_totalrecoverableobsDist_array_100)
N_totalrecoverableobsDist_1000 = np.sum(N_totalrecoverableobsDist_array_1000)
N_totalobsDist22 = np.sum(N_totalobsDist22_array)
N_totalobsDist22_03 = np.sum(N_totalobsDist22_array_03)
N_totalobsDist22_1 = np.sum(N_totalobsDist22_array_1)
N_totalobsDist22_10 = np.sum(N_totalobsDist22_array_10)
N_totalobsDist22_30 = np.sum(N_totalobsDist22_array_30)
N_totalobsDist22_100 =
|
np.sum(N_totalobsDist22_array_100)
|
numpy.sum
|
import pandas as pd
import plotly.plotly as py
from plotly.graph_objs import *
import plotly
import numpy as np
import plotly.tools as tls
plotly.tools.set_credentials_file(username='iwayankit', api_key='9syhwIKBYVUPY7uX20I9')
df = pd.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
#filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data',
header=None,
sep=',')
df.columns=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid', 'class']
#df.colums=['alcohol','malic_acid','ash','alcalinity','magnesium','t_phenols','flavanoids','nonfla','proantho','color_intensity','hue','diluted','proline']
df.dropna(how="all", inplace=True) # drops the empty line at file-end
# split data table into data X and class labels y
X = df.ix[:,0:4].values
y = df.ix[:,4].values
from sklearn.preprocessing import StandardScaler
X_std = StandardScaler().fit_transform(X)
# plotting histograms
traces = []
legend = {0:False, 1:False, 2:False, 3:True}
colors = {'Iris-setosa': 'rgb(31, 119, 180)',
'Iris-versicolor': 'rgb(255, 127, 14)',
'Iris-virginica': 'rgb(44, 160, 44)'}
for col in range(4):
for key in colors:
traces.append(Histogram(x=X[y==key, col],
opacity=0.75,
xaxis='x%s' %(col+1),
marker=Marker(color=colors[key]),
name=key,
showlegend=legend[col]))
data = Data(traces)
layout = Layout(barmode='overlay',
xaxis=XAxis(domain=[0, 0.25], title='sepal length (cm)'),
xaxis2=XAxis(domain=[0.3, 0.5], title='sepal width (cm)'),
xaxis3=XAxis(domain=[0.55, 0.75], title='petal length (cm)'),
xaxis4=XAxis(domain=[0.8, 1], title='petal width (cm)'),
yaxis=YAxis(title='count'),
title='Distribution of the different Iris flower features')
fig = Figure(data=data, layout=layout)
py.plot(fig)
df.tail()
mean_vec = np.mean(X_std, axis=0)
cov_mat = (X_std - mean_vec).T.dot((X_std - mean_vec)) / (X_std.shape[0]-1)
print('Covariance matrix \n%s' %cov_mat)
print('NumPy covariance matrix: \n%s' %np.cov(X_std.T))
cor_mat1 = np.corrcoef(X_std.T)
eig_vals, eig_vecs = np.linalg.eig(cor_mat1)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
cor_mat2 = np.corrcoef(X.T)
eig_vals, eig_vecs = np.linalg.eig(cor_mat2)
print('Eigenvectors \n%s' %eig_vecs)
print('\nEigenvalues \n%s' %eig_vals)
u,s,v =
|
np.linalg.svd(X_std.T)
|
numpy.linalg.svd
|
# -*- coding: utf-8 -*-
import pandas as pd
import os
import sys
import numpy as np
# Load datasets
ids = [ name for name in os.listdir(".") if os.path.isdir(name) ]
ids_decap = [s[:-1] if s[-6:] in ["mirnaA","mirnaB"] else s for s in ids]
# Map samples
metadata = pd.read_csv(sys.argv[1]).loc[:,["ID","Source"]]
#metadata = pd.read_csv("../../../Data/NGS/TCGA_pilot/metadata.txt").loc[:,["ID","Source"]]
metadata.drop_duplicates(inplace=True)
metadata.set_index("ID",inplace=True)
metadata = metadata.loc[ids_decap,:]
id_names = list(set([s1+"-"+s2 if sum([s2 in n for n in ids])==1 else s1+"-"+s2+"1" for s1,s2 in zip(metadata.loc[:,"Source"],ids)]))
id_names.extend([s[:-1]+"2" for s in id_names if s[-7:]=="_mirna1"])
# Initialize dataframe
merged = pd.DataFrame(columns=['contig', 'position', 'variantID', 'refAllele', 'altAllele', 'refCount',
'altCount', 'totalCount', 'lowMAPQDepth', 'lowBaseQDepth', 'rawDepth',
'otherBases', 'improperPairs', 'sample', 'modif_id'])
ids=
|
np.array(ids)
|
numpy.array
|
import numpy as np
from matplotlib import pyplot as plt
import stat_tools as st
from datetime import datetime,timedelta
import ephem
from skimage.morphology import remove_small_objects
from scipy.ndimage import morphology,sobel
from scipy.ndimage.filters import maximum_filter,laplace
import mncc, geo
from scipy import interpolate, stats
import pickle
BND_RED_THRESH, BND_RBR_THRESH = 2, 0.012
DRED_THRESH, DRBR_THRESH = 150, 157
STD_RED_THRESH, STD_RBR_THRESH = 1.2, 0.012
static_mask_path='~/data/masks/'
coordinate = {'HD2C':[40.87203321,-72.87348295],'HD815_2':[40.87189059,-72.873687],\
'HD490':[40.865968816,-72.884647222], 'HD1B':[40.8575056,-72.8547344], \
'HD1A':[40.8580088,-72.8575717], 'HD1C':[40.85785,-72.8597],
'HD5A':[40.947353, -72.899617], 'HD5B':[40.948044,-72.898372],
'HD3A':[40.897122,-72.879053], 'HD3B':[40.8975,-72.877497],
'HD4A':[40.915708,-72.892406],'HD4B':[40.917275,-72.891592],
'HD2B':[40.872341,-72.874354]}
params = {'HD2C':[2821.0000,1442.8231,1421.0000,0.1700,-0.0135,-2.4368,0.3465,-0.0026,-0.0038],\
'HD815_2':[2821.0000,1424,1449.0000,0.0310,-0.0114,-0.9816,0.3462,-0.0038,-0.0030 ],\
'HD490':[2843.0000,1472.9511,1482.6685,0.1616,0.0210,-0.5859,0.3465,-0.0043,-0.0030], \
'HD1B':[2830.0007,1473.2675,1459.7203,-0.0986,-0.0106,-1.2440,0.3441,-0.0015,-0.0042], \
'HD1A':[2826.5389,1461.0000,1476.6598,-0.0097,0.0030,2.9563,0.3415,0.0004,-0.0044], \
'HD1C':[2812.7874,1475.1453,1415.0000,0.1410,-0.0126,0.4769,0.3441,0.0004,-0.0046],
'HD4A':[ 2815.9408,1411.8050,1500.0000,-0.0300,-0.0341,-1.4709,0.3555,-0.0136,0.0005 ], \
'HD4B':[ 2832.5996,1429.9573,1465.0000,-0.0340,-0.0352,0.4037,0.3468,-0.0111,-0.0003 ], \
'HD5A':[2813.7462,1472.2066,1446.3682,0.3196,-0.0200,-1.9636,0.3444,-0.0008,-0.0042], \
'HD5B':[2812.1208,1470.1824,1465.0000,-0.1228,-0.0020,-0.5258,0.3441,-0.0001,-0.0042],\
# 'HD3A':[2807.8902,1436.1619,1439.3879,-0.3942,0.0527,2.4658,0.3334,0.0129,-0.0085],\
'HD3A':[ 2826.5457,1461.8204,1465.0000,-0.4073,0.0054,1.9957,0.3571,-0.0177,0.0009 ],\
'HD3B':[ 2821.2941,1469.8294,1465.0000,0.1918,-0.0149,-0.7192,0.3619,-0.0248,0.0043 ],\
'HD2B':[2810.0000,1428.1154,1438.3745,0.1299,0.0167,2.0356,0.3480,-0.0049,-0.0025]}
deg2rad=np.pi/180
class camera:
###variable with the suffix '0' means it is for the raw, undistorted image
def __init__(self, camID, max_theta=70,nx=2000,ny=2000):
self.camID=camID
self.lat, self.lon=coordinate[camID]
nx0=ny0=params[camID][0]
nr0=(nx0+ny0)/4
xstart=int(params[camID][2]-nx0/2+0.5); ystart=int(params[camID][1]-ny0/2+0.5)
self.nx0=int(nx0+0.5); self.ny0=int(ny0+0.5)
# self.cx,self.cy=params[camID][2:0:-1]
self.max_theta=max_theta
#####compute the zenith and azimuth angles for each pixel
x0,y0=np.meshgrid(np.linspace(-self.nx0//2,self.nx0//2,self.nx0),np.linspace(-self.ny0//2,self.ny0//2,self.ny0));
r0=np.sqrt(x0**2+y0**2)/nr0;
self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]
self.rotation,self.beta,self.azm=params[camID][3:6]
roots=np.zeros(51)
rr=np.arange(51)/100.0
self.c1,self.c2,self.c3=params[camID][6:9]
for i,ref in enumerate(rr):
roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])
theta0 = np.interp(r0/2,rr,roots)
phi0 = np.arctan2(x0,y0) - self.rotation ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition
phi0 = phi0%(2*np.pi)
#####correction for the mis-pointing error
k=np.array((np.sin(self.azm),
|
np.cos(self.azm)
|
numpy.cos
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import xavier_initializer
from tensorflow.contrib.distributions import RelaxedOneHotCategorical as Gumbel
from utils.utils import softmax
""" Module containing shared functions and structures for DGMS """
glorotNormal = xavier_initializer(uniform=False)
initNormal = tf.random_normal_initializer(stddev=1e-3)
""" Probability functions """
def gaussianLogDensity(inputs, mu, log_var):
""" Gaussian log density """
D = tf.cast(tf.shape(inputs)[-1], tf.float32)
xc = inputs - mu
return -0.5 * (tf.reduce_sum((xc * xc) / tf.exp(log_var), axis=-1) +
tf.reduce_sum(log_var, axis=-1) + D * tf.log(2.0 * np.pi))
def gaussianLogDensity_axis(inputs, mu, log_var):
""" Gaussian log density, but with no summing along axes """
xc = inputs - mu
return -0.5 * ((xc * xc) / tf.exp(log_var) + log_var + tf.log(2.0 * np.pi))
def gaussianLogDensity_np(inputs, mu, log_var):
""" Gaussian log density, using numpy """
D = inputs.shape[-1]
xc = inputs - mu
return -0.5 * (np.sum((xc * xc) / np.exp(log_var), axis=-1) +
np.sum(log_var, axis=-1) + D * np.log(2.0 * np.pi))
def standardNormalLogDensity(inputs):
""" Standard normal log density """
mu = tf.zeros_like(inputs)
log_var = tf.log(tf.ones_like(inputs))
return gaussianLogDensity(inputs, mu, log_var)
def bernoulliLogDensity(inputs, logits):
""" Bernoulli log density """
return -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(
labels=inputs, logits=logits), axis=-1)
def multinoulliLogDensity(inputs, preds, on_priors=False):
""" Categorical log density """
if on_priors is False:
return -tf.nn.softmax_cross_entropy_with_logits(
labels=inputs, logits=preds)
else:
return tf.reduce_sum(inputs * tf.log(preds + 1e-10), axis=-1)
def multinoulliUniformLogDensity(logits, dim=-1, order=True,):
""" Uniform Categorical log density """
if order is True:
labels = tf.divide(tf.ones_like(logits),
tf.norm(tf.ones_like(logits),
ord=1, axis=dim,
keepdims=True))
return - tf.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
dim=dim)
else:
labels = tf.ones_like(logits)
return -tf.nn.softmax_cross_entropy_with_logits(labels=logits,
logits=labels,
dim=dim)
def discreteUniformKL(logits, n_size, dim=-1):
""" KL divergence for discrete/categorical probabilties
returns KL(q||p) where q is a tf tensor in logits and
p, not given, is uniform.
"""
return - tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.nn.softmax(logits),
logits=logits,
dim=dim) + tf.log(n_size)
def discreteKL(q_logits, p_true, n_size, dim=-1):
""" KL divergence for discrete/categorical probabilties
returns KL(q||p) where q is a tf tensor in logits and p is given a p_true
is a probability vector.
"""
q_prob = tf.nn.softmax(q_logits)
return - tf.nn.softmax_cross_entropy_with_logits_v2(
labels=q_prob, logits=q_logits, dim=dim) - tf.reduce_sum(
q_prob * tf.log(p_true + 1e-10), axis=dim)
def discreteUniformKL_np(logits, n_size, dim=-1):
""" KL divergence for discrete/categorical probabilties
returns KL(q||p) where q is a np array in logits and
p, not given, is uniform.
"""
return - softmax(logits) + np.log(n_size)
def discreteUniformKL_np_probs(probs, n_size, dim=-1):
""" KL divergence for discrete/categorical probabilties
returns KL(q||p) where q is a np array of probabilities and
p, not given, is uniform.
"""
return np.sum(probs * np.log(probs + 10e-10), axis=dim) + np.log(n_size)
def gumbelLogDensity(inputs, logits, temp):
""" log density of a Gumbel distribution for tf inputs"""
dist = Gumbel(temperature=temp, logits=logits)
return dist.log_prob(inputs)
def sampleNormal(mu, logvar, mc_samps):
""" return a reparameterized sample from a Gaussian distribution """
shape = tf.concat([tf.constant([mc_samps]), tf.shape(mu)], axis=-1)
eps = tf.random_normal(shape, dtype=tf.float32)
return mu + eps * tf.sqrt(tf.exp(logvar))
def sampleNormal_np(mu, logvar, mc_samps):
""" return a reparameterized sample from a Gaussian distribution """
eps =
|
np.random.normal(size=mu.shape[0])
|
numpy.random.normal
|
import os
import re
from time import time, sleep
# import h5py
from scipy.io import loadmat
import numpy as np
from PIL import Image
# from cv2 import imread, resize, INTER_CUBIC, INTER_AREA
GAN_space = "fc6"
generator = None
def load_GAN(name=GAN_space):
from Generator import Generator
generator = Generator(name=GAN_space)
return generator
#%%
# def read_image(image_fpath):
# # BGR is flipped to RGB. why BGR?:
# # Note In the case of color images, the decoded images will have the channels stored in B G R order.
# # https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html
# imarr = imread(image_fpath)[:, :, ::-1]
# return imarr
#
#
# def write_images(imgs, names, path, size=None, timeout=0.5, format='bmp'):
# """
# Saves images as 24-bit bmp files to given path with given names
# :param imgs: list of images as numpy arrays with shape (w, h, c) and dtype uint8
# :param names: filenames of images **including or excluding** '.bmp'
# :param path: path to save to
# :param size: size (pixels) to resize image to; default is unchanged
# :param timeout: timeout for trying to write each image
# :return: None
# """
# for im_arr, name in zip(imgs, names):
# if size is not None and im_arr.shape[1] != size:
# if im_arr.shape[1] < size: # upsampling
# im_arr = resize(im_arr, (size, size), interpolation=INTER_CUBIC)
# else: # downsampling
# im_arr = resize(im_arr, (size, size), interpolation=INTER_AREA)
# img = Image.fromarray(im_arr)
# trying = True
# t0 = time()
# if name.rfind("."+format ) != len(name) - 4:
# name += "."+format
# while trying and time() - t0 < timeout:
# try:
# img.save(os.path.join(path, name))
# trying = False
# except IOError as e:
# if e.errno != 35:
# raise
# sleep(0.01)
def write_codes(codes, names, path, timeout=0.5):
"""
Saves codes as npy files (1 in each file) to given path with given names
:param codes: list of images as numpy arrays with shape (w, h, c) and dtype uint8. NOTE only thing in a .npy file is a single code.
:param names: filenames of images, excluding extension. number of names should be paired with codes.
:param path: path to save to
:param timeout: timeout for trying to write each code
:return: None
"""
for name, code in zip(names, codes):
trying = True
t0 = time()
while trying and time() - t0 < timeout:
try:
np.save(os.path.join(path, name), code, allow_pickle=False)
trying = False
# File "/Users/wuxiao/Documents/MCO/Rotations/Kreiman Lab/scripts/Playtest6/utils_old.py", line
# 56, in write_codes
# np.save(os.path.join(path, name), code, allow_pickle=False)
# File "/usr/local/lib/python3.6/site-packages/numpy/lib/npyio.py", line 514, in save
# fid.close()
# OSError: [Errno 89] Operation canceled
except (OSError, IOError) as e:
if e.errno != 35 and e.errno != 89:
raise
sleep(0.01)
def savez(fpath, save_kwargs, timeout=1):
"""
wraps numpy.savez, implementing OSError tolerance within timeout
"Save several arrays into a single file in uncompressed ``.npz`` format." DUMP EVERYTHING!
"""
trying = True
t0 = time()
while trying and time() - t0 < timeout:
try:
np.savez(fpath, **save_kwargs)
except IOError as e:
if e.errno != 35:
raise
sleep(0.01)
save_scores = savez # a synonym for backwards compatibility
def load_codes(codedir, size):
""" load all the *.npy files in the `codedir`. and randomly sample # `size` of them.
make sure enough codes for requested size
"""
codefns = sorted([fn for fn in os.listdir(codedir) if '.npy' in fn])
assert size <= len(codefns), 'not enough codes (%d) to satisfy size (%d)' % (len(codefns), size)
# load codes
codes = []
for codefn in np.random.choice(codefns, size=min(len(codefns), size), replace=False):
code = np.load(os.path.join(codedir, codefn), allow_pickle=False).flatten()
codes.append(code)
codes = np.array(codes)
return codes
def load_codes2(codedir, size):
""" unlike load_codes, also returns name of load """
# make sure enough codes for requested size
codefns = sorted([fn for fn in os.listdir(codedir) if '.npy' in fn])
assert size <= len(codefns), 'not enough codes (%d) to satisfy size (%d)' % (len(codefns), size)
# load codes
codefns = list(np.random.choice(codefns, size=min(len(codefns), size), replace=False))
codes = []
for codefn in codefns:
code = np.load(os.path.join(codedir, codefn), allow_pickle=False).flatten()
codes.append(code)
codes = np.array(codes)
return codes, codefns
def load_codes_search(codedir, srckey, size=None):
"""Load the code files with `srckey` in its name.
:param codedir:
:param srckey: keyword to identify / filter the code. e.g. "gen298_010760.npy", "gen298_010760", "gen298"
:param size: Defaultly None. if there is too many codes, one can use this to specify the sample size
:return: codes and corresponding file names `codes, codefns`
Added @sep.19
"""
# make sure enough codes for requested size
codefns = sorted([fn for fn in os.listdir(codedir) if ('.npy' in fn) and (srckey in fn)])
if not size is None: # input size parameter indicates to select the codes.
assert size <= len(codefns), 'not enough codes (%d) to satisfy size (%d)' % (len(codefns), size)
codefns = list(np.random.choice(codefns, size=min(len(codefns), size), replace=False))
# load codes by the codefns
codes = []
for codefn in codefns:
code = np.load(os.path.join(codedir, codefn), allow_pickle=False).flatten()
codes.append(code)
codes = np.array(codes)
return codes, codefns
def load_block_mat(matfpath):
attempts = 0
while True:
try:
import h5py
with h5py.File(matfpath, 'r') as f:
imgids_refs = np.array(f['stimulusID'])[0]
imgids = []
for ref in imgids_refs:
imgpath = ''.join(chr(i) for i in f[ref])
imgids.append(imgpath.split('\\')[-1])
imgids = np.array(imgids)
scores = np.array(f['tEvokedResp']) # shape = (imgs, channels)
return imgids, scores
except (KeyError, IOError, OSError): # if broken mat file or unable to access
attempts += 1
if attempts % 100 == 0:
print('%d failed attempts to read .mat file' % attempts)
sleep(0.001)
def load_block_mat_code(matfpath):
attempts = 0
while True:
try:
data = loadmat(matfpath) # need the mat file to be saved in a older version
codes = data['codes']
ids = data['ids']
imgids = []
for id in ids[0]:
imgids.append(id[0])
return imgids, codes
except (KeyError, IOError, OSError): # if broken mat file or unable to access
attempts += 1
if attempts % 100 == 0:
print('%d failed attempts to read .mat file' % attempts)
sleep(0.001)
def set_dynamic_parameters_by_file(fpath, dynamic_parameters):
try:
with open(fpath, 'r') as file:
line = 'placeholder'
while len(line) > 0:
line = file.readline()
if ':' not in line:
continue
if '#' in line:
line = line[:line.find('#')]
if len(line.split(':')) != 2:
continue
key, val = line.split(':')
key = key.strip()
val = val.strip()
try:
# if key is not in dynamic_parameter.keys(), will throw KeyError
# if val (a str literal) cannot be converted to dynamic_parameter.type, will throw ValueError
dynamic_parameters[key].set_value(val)
except (KeyError, ValueError):
continue
except IOError:
print('cannot open dynamic parameters file %s' % fpath)
def write_dynamic_parameters_to_file(fpath, dynamic_parameters):
with open(fpath, 'w') as file:
for key in sorted(list(dynamic_parameters.keys())):
file.write('%s:\t%s\t# %s\n' % (key, str(dynamic_parameters[key].value), dynamic_parameters[key].description))
# https://nedbatchelder.com/blog/200712/human_sorting.html
def tryint(s):
try:
return int(s)
except ValueError:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryint(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Return the given list sorted in the way that humans expect.
"""
newl = l[:]
newl.sort(key=alphanum_key)
return newl
#%% Dir name manipulation (for experimental code)
def add_neuron_subdir(neuron, exp_dir):
''' Add neuron name to the exp_dir, in the form of ('caffe-net', 'fc6', 30). (make the dir in case it doesn't exist) '''
if len(neuron) == 5:
subdir = '%s_%s_%04d_%d,%d' % (neuron[0], neuron[1].replace('/', '_'), neuron[2], neuron[3], neuron[4])
else:
subdir = '%s_%s_%04d' % (neuron[0], neuron[1].replace('/', '_'), neuron[2])
this_exp_dir = os.path.join(exp_dir, subdir)
for dir_ in (this_exp_dir,):
if not os.path.isdir(dir_):
os.mkdir(dir_)
return this_exp_dir
def add_trial_subdir(neuron_dir, trial_title):
''' Add trial title to the directory with neuron name on it (make the dir in case it doesn't exist) '''
trialdir = os.path.join(neuron_dir, trial_title)
if not os.path.isdir(trialdir):
os.mkdir(trialdir)
return trialdir
#%% Code Geometrical Manipulation
def simplex_interpolate(wvec, code_array):
'''Do simplex interpolate/extrapolate between several codes
Codes can be input in array (each row is a code) or in list
wvec: weight vector can be a scalar for 2 codes. or same length list / array for more codes.
'''
if type(code_array) is list:
code_array = np.asarray(code_array)
code_n = code_array.shape[0]
if np.isscalar(wvec):
w_vec =
|
np.asarray([1-wvec, wvec])
|
numpy.asarray
|
# Copyright (c) 2019 <NAME>
# Universidad Carlos III de Madrid and University of Sheffield
from GPy import kern
from GPy.util import choleskies
from GPy.util import linalg
from GPy.core.parameterization.param import Param
import random
import warnings
import numpy as np
import climin
from functools import partial
import matplotlib.pyplot as plt
from matplotlib2tikz import save as tikz_save
def get_batch_scales(X_all, X):
batch_scales = []
for t, X_all_task in enumerate(X_all):
batch_scales.append(float(X_all_task.shape[0]) / float(X[t].shape[0]))
return batch_scales
def true_u_functions(X_list, Q):
u_functions = []
amplitude = (1.5-0.5)*np.random.rand(Q,3) + 0.5
freq = (3-1)*np.random.rand(Q,3) + 1
shift = 2*np.random.rand(Q,3)
for X in X_list:
u_task = np.empty((X.shape[0],Q))
for q in range(Q):
u_task[:,q,None] = 3*amplitude[q,0]*np.cos(freq[q,0]*np.pi*X + shift[q,0]*np.pi) - \
2*amplitude[q,1]*np.sin(2*freq[q,1]*np.pi*X + shift[q,1]*np.pi) + \
amplitude[q,2] * np.cos(4*freq[q, 2] * np.pi * X + shift[q, 2] * np.pi)
u_functions.append(u_task)
return u_functions
def true_f_functions(true_u, W_list, D, likelihood_list, Y_metadata):
true_f = []
f_index = Y_metadata['function_index'].flatten()
d_index = Y_metadata['d_index'].flatten()
for t, u_task in enumerate(true_u):
Ntask = u_task.shape[0]
_, num_f_task, _ = likelihood_list[t].get_metadata()
F = np.zeros((Ntask, num_f_task))
for q, W in enumerate(W_list):
for d in range(D):
if f_index[d] == t:
F[:,d_index[d],None] += np.tile(W[d].T, (Ntask, 1)) * u_task[:, q, None]
true_f.append(F)
return true_f
def mini_slices(n_samples, batch_size):
"""Yield slices of size `batch_size` that work with a container of length
`n_samples`."""
n_batches, rest = divmod(n_samples, batch_size)
if rest != 0:
n_batches += 1
return [slice(i * batch_size, (i + 1) * batch_size) for i in range(n_batches)]
def draw_mini_slices(n_samples, batch_size, with_replacement=False):
slices = mini_slices(n_samples, batch_size)
idxs = list(range(len(slices))) # change this line
if with_replacement:
yield random.choice(slices)
else:
while True:
random.shuffle(list(idxs))
for i in idxs:
yield slices[i]
def latent_functions_prior(Q, lenghtscale=None, variance=None, input_dim=None, kname=None):
if lenghtscale is None:
lenghtscale = np.random.rand(Q)
else:
lenghtscale = lenghtscale
if variance is None:
variance = np.random.rand(Q)
else:
variance = variance
if kname is None:
kname = 'rbf'
else:
kname = kname
kern_list = []
for q in range(Q):
if kname=='rbf':
kern_q = kern.RBF(input_dim=input_dim, lengthscale=lenghtscale[q], variance=variance[q], name='rbf')
else:
kern_q = kern.Matern32(input_dim=input_dim, lengthscale=lenghtscale[q], variance=variance[q], name='matern')
kern_q.name = 'kern_q'+str(q)
kern_list.append(kern_q)
return kern_list
def random_W_kappas(Q,D,rank, experiment=False):
W_list = []
kappa_list = []
for q in range(Q):
p = np.random.binomial(n=1, p=0.5*np.ones((D,1)))
Ws = p*np.random.normal(loc=0.5, scale=0.5, size=(D,1)) - (p-1)*np.random.normal(loc=-0.5, scale=0.5, size=(D,1))
W_list.append(Ws / np.sqrt(rank)) # deberían ser tanto positivos como negativos
if experiment:
kappa_list.append(np.zeros(D))
else:
kappa_list.append(np.zeros(D))
return W_list, kappa_list
def ICM(input_dim, output_dim, kernel, rank, W=None, kappa=None, name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
kern_q = kernel.copy()
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
B = kern.Coregionalize(input_dim=input_dim, output_dim=output_dim, rank=rank, W=W, kappa=kappa)
B.name = name
K = kern_q.prod(B, name=name)
return K, B
def LCM(input_dim, output_dim, kernels_list, W_list, kappa_list, rank, name='B_q'):
"""
Builds a kernel for an Linear Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
B_q = []
K, B = ICM(input_dim, output_dim, kernels_list[0], W=W_list[0], kappa=kappa_list[0], rank=rank, name='%s%s' %(name,0))
B_q.append(B)
for q, kernel in enumerate(kernels_list[1:]):
Kq, Bq = ICM(input_dim, output_dim, kernel, W=W_list[q+1], kappa=kappa_list[q+1], rank=rank, name='%s%s' %(name,q+1))
B_q.append(Bq)
K += Kq
return K, B_q
def cross_covariance(X, Z, B, kernel_list, d):
"""
Builds the cross-covariance cov[f_d(x),u(z)] of a Multi-output GP
:param X: Input data
:param Z: Inducing Points
:param B: Coregionalization matric
:param kernel_list: Kernels of u_q functions
:param d: output function f_d index
:return: Kfdu
"""
N,_ = X.shape
M,Dz = Z.shape
Q = len(B)
Xdim = int(Dz/Q)
Kfdu = np.empty([N,M*Q])
for q, B_q in enumerate(B):
Kfdu[:, q * M:(q * M) + M] = B_q.W[d] * kernel_list[q].K(X, Z[:, q*Xdim:q*Xdim+Xdim])
#Kfdu[:,q*M:(q*M)+M] = B_q.W[d]*kernel_list[q].K(X,Z[:,q,None])
#Kfdu[:, q * M:(q * M) + M] = B_q.B[d,d] * kernel_list[q].K(X, Z[:,q,None])
return Kfdu
def function_covariance(X, B, kernel_list, d):
"""
Builds the cross-covariance Kfdfd = cov[f_d(x),f_d(x)] of a Multi-output GP
:param X: Input data
:param B: Coregionalization matrix
:param kernel_list: Kernels of u_q functions
:param d: output function f_d index
:return: Kfdfd
"""
N,_ = X.shape
Kfdfd = np.zeros((N, N))
for q, B_q in enumerate(B):
Kfdfd += B_q.B[d,d]*kernel_list[q].K(X,X)
return Kfdfd
def latent_funs_cov(Z, kernel_list):
"""
Builds the full-covariance cov[u(z),u(z)] of a Multi-output GP
for a Sparse approximation
:param Z: Inducing Points
:param kernel_list: Kernels of u_q functions priors
:return: Kuu
"""
Q = len(kernel_list)
M,Dz = Z.shape
Xdim = int(Dz/Q)
#Kuu = np.zeros([Q*M,Q*M])
Kuu = np.empty((Q, M, M))
Luu = np.empty((Q, M, M))
Kuui = np.empty((Q, M, M))
for q, kern in enumerate(kernel_list):
Kuu[q, :, :] = kern.K(Z[:,q*Xdim:q*Xdim+Xdim],Z[:,q*Xdim:q*Xdim+Xdim])
Luu[q, :, :] = linalg.jitchol(Kuu[q, :, :])
Kuui[q, :, :], _ = linalg.dpotri(np.asfortranarray(Luu[q, :, :]))
return Kuu, Luu, Kuui
def latent_funs_conditional(Z, Zold, kernel_list):
Q = len(kernel_list)
M, Dz = Z.shape
Mold, Dz_old = Zold.shape
Xdim = int(Dz/Q)
Kuu_cond = np.empty((Q, M, Mold))
for q, kern in enumerate(kernel_list):
Kuu_cond[q, :, :] = kern.K(Z[:,q*Xdim:q*Xdim+Xdim], Zold[:,q*Xdim:q*Xdim+Xdim])
return Kuu_cond
def conditional_prior(Z, Zold, kern_list_old, phi_means, phi_chols):
M, Dz = Z.shape
Mold, _ = Zold.shape
Q = len(kern_list_old)
# Algebra for q(u):
#phi_m = phi_means.copy()
phi_L = choleskies.flat_to_triang(phi_chols)
phi_S = np.empty((Q, Mold, Mold))
[np.dot(phi_L[q, :, :], phi_L[q, :, :].T, phi_S[q, :, :]) for q in range(Q)]
Mu = np.empty((Q, M, 1))
Kuu = np.empty((Q, M, M))
Luu = np.empty((Q, M, M))
Kuui = np.empty((Q, M, M))
Kuu_old, Luu_old, Kuui_old = latent_funs_cov(Zold, kern_list_old)
Kuu_new, _, _ = latent_funs_cov(Z, kern_list_old)
Kuu_cond = latent_funs_conditional(Z, Zold, kern_list_old)
for q, kern in enumerate(kern_list_old):
R, _ = linalg.dpotrs(np.asfortranarray(Luu_old[q, :, :]), Kuu_cond[q, :, :].T)
Auu = R.T # Kuu_cond * Kuui
Mu[q, :] = np.dot(Auu, phi_means[:, q, None])
Kuu[q, :, :] = Kuu_new[q, :, :] + np.dot(np.dot(R.T, phi_S[q, :, :]), R) - np.dot(Kuu_cond[q, :, :], R)
Luu[q, :, :] = linalg.jitchol(Kuu[q, :, :])
Kuui[q, :, :], _ = linalg.dpotri(np.asfortranarray(Luu[q, :, :]))
return Mu, Kuu, Luu, Kuui
def hyperparams_new_to_old(model_new, model_old):
model_new.update_model(False)
Q = len(model_old.kern_list)
model_new.Zold = model_old.Z.copy() + 0.
for q in range(Q):
model_new.kern_list_old[q].lengthscale = model_old.kern_list[q].lengthscale.copy() + 0.
model_new.kern_list_old[q].variance = model_old.kern_list[q].variance.copy() + 0.
#print(model_new.B_list[q].W[0,0])# = 0
#model_new.B_list[q].W[0,0] = 0.
#print(model_new.B_list[q].W[0, 0])
#print(model_new.B_list[q].W.shape)
#print(model_old.B_list[q].W.copy() + 0.)
model_new.B_list[q].W[:] = model_old.B_list[q].W + 0.
model_new.initialize_parameter()
def variational_new_to_old_online(q_new_means, q_new_chols):
#q_old_means = Param('phi', q_new_means + 0.)
#q_old_chols = Param('LO_chols', q_new_chols + 0.)
q_old_means = Param('phi', q_new_means)
q_old_chols = Param('LO_chols', q_new_chols)
return q_old_means, q_old_chols
def variational_new_to_old_offline(q_old_means, q_old_chols):
means_old = q_old_means.copy() + 0.
chols_old = q_old_chols.copy() + 0.
return means_old, chols_old
def generate_toy_U(X,Q):
arg = np.tile(X, (1,Q))
rnd = np.tile(np.random.rand(1,Q), (X.shape))
U = 2*rnd*np.sin(10*rnd*arg + np.random.randn(1)) + 2*rnd*np.cos(20*rnd*arg + np.random.randn(1))
return U
def _gradient_reduce_numpy(coreg, dL_dK, index, index2):
index, index2 = index[:,0], index2[:,0]
dL_dK_small = np.zeros_like(coreg.B)
for i in range(coreg.output_dim):
tmp1 = dL_dK[index==i]
for j in range(coreg.output_dim):
dL_dK_small[j,i] = tmp1[:,index2==j].sum()
return dL_dK_small
def _gradient_B(coreg, dL_dK, index, index2):
index, index2 = index[:,0], index2[:,0]
B = coreg.B
isqrtB = 1 / np.sqrt(B)
dL_dK_small = np.zeros_like(B)
for i in range(coreg.output_dim):
tmp1 = dL_dK[index==i]
for j in range(coreg.output_dim):
dL_dK_small[j,i] = (0.5 * isqrtB[i,j] * tmp1[:,index2==j]).sum()
return dL_dK_small
def update_gradients_diag(coreg, dL_dKdiag):
dL_dKdiag_small = np.array([dL_dKdiag_task.sum() for dL_dKdiag_task in dL_dKdiag])
coreg.W.gradient = 2.*coreg.W*dL_dKdiag_small[:, None] # should it be 2*..? R/Yes Pablo, it should be :)
coreg.kappa.gradient = dL_dKdiag_small
def update_gradients_full(coreg, dL_dK, X, X2=None):
index = np.asarray(X, dtype=np.int)
if X2 is None:
index2 = index
else:
index2 = np.asarray(X2, dtype=np.int)
dL_dK_small = _gradient_reduce_numpy(coreg, dL_dK, index, index2)
dkappa = np.diag(dL_dK_small).copy()
dL_dK_small += dL_dK_small.T
dW = (coreg.W[:, None, :]*dL_dK_small[:, :, None]).sum(0)
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def update_gradients_Kmn(coreg, dL_dK, D):
dW = np.zeros((D,1))
dkappa = np.zeros((D)) # not used
for d in range(D):
dW[d,:] = dL_dK[d].sum()
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def gradients_coreg(coreg, dL_dK, X, X2=None):
index = np.asarray(X, dtype=np.int)
if X2 is None:
index2 = index
else:
index2 = np.asarray(X2, dtype=np.int)
dK_dB = _gradient_B(coreg, dL_dK, index, index2)
dkappa = np.diag(dK_dB).copy()
dK_dB += dK_dB.T
dW = (coreg.W[:, None, :]*dK_dB[:, :, None]).sum(0)
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def gradients_coreg_diag(coreg, dL_dKdiag, kern_q, X, X2=None):
# dL_dKdiag is (NxD)
if X2 is None:
X2 = X
N,D = dL_dKdiag.shape
matrix_sum = np.zeros((D,1))
for d in range(D):
matrix_sum[d,0] = np.sum(np.diag(kern_q.K(X, X2)) * dL_dKdiag[:,d,None])
dW = 2 * coreg.W * matrix_sum
dkappa = matrix_sum
return dW, dkappa
def vem_algorithm(model, vem_iters=None, maxIter_perVEM = None, step_rate=None ,verbose=False, optZ=True, verbose_plot=False, non_chained=True):
if vem_iters is None:
vem_iters = 5
if maxIter_perVEM is None:
#maxIter_perVEM = 25
maxIter_perVEM = 100
model['.*.kappa'].fix() # must be always fixed
#model.elbo = np.empty((vem_iters,1))
if model.batch_size is None:
for i in range(vem_iters):
# VARIATIONAL E-STEP
model['.*.lengthscale'].fix()
model['.*.variance'].fix()
model.Z.fix()
model['.*.W'].fix()
model.q_u_means.unfix()
model.q_u_chols.unfix()
model.optimize(messages=verbose, max_iters=maxIter_perVEM)
print('iteration ('+str(i+1)+') VE step, ELBO='+str(model.log_likelihood().flatten()))
# VARIATIONAL M-STEP
model['.*.lengthscale'].unfix()
model['.*.variance'].unfix()
if optZ:
model.Z.unfix()
if non_chained:
model['.*.W'].unfix()
model.q_u_means.fix()
model.q_u_chols.fix()
model.optimize(messages=verbose, max_iters=maxIter_perVEM)
print('iteration (' + str(i+1) + ') VM step, ELBO=' + str(model.log_likelihood().flatten()))
else:
if step_rate is None:
step_rate = 0.01
model.elbo = np.empty((maxIter_perVEM*vem_iters+2, 1))
model.elbo[0,0]=model.log_likelihood()
c_full = partial(model.callback, max_iter=maxIter_perVEM, verbose=verbose, verbose_plot=verbose_plot)
for i in range(vem_iters):
# VARIATIONAL E-STEP
model['.*.lengthscale'].fix()
model['.*.variance'].fix()
model.Z.fix()
model['.*.W'].fix()
model.q_u_means.unfix()
model.q_u_chols.unfix()
optimizer = climin.Adam(model.optimizer_array, model.stochastic_grad, step_rate=step_rate,decay_mom1=1 - 0.9, decay_mom2=1 - 0.999)
optimizer.minimize_until(c_full)
print('iteration (' + str(i + 1) + ') VE step, mini-batch ELBO=' + str(model.log_likelihood().flatten()))
#
# # VARIATIONAL M-STEP
model['.*.lengthscale'].unfix()
model['.*.variance'].unfix()
if optZ:
model.Z.unfix()
if non_chained:
model['.*.W'].unfix()
model.q_u_means.fix()
model.q_u_chols.fix()
optimizer = climin.Adam(model.optimizer_array, model.stochastic_grad, step_rate=step_rate,decay_mom1=1 - 0.9, decay_mom2=1 - 0.999)
optimizer.minimize_until(c_full)
print('iteration (' + str(i + 1) + ') VM step, mini-batch ELBO=' + str(model.log_likelihood().flatten()))
# Unfix everything
model.q_u_means.unfix()
model.q_u_chols.unfix()
model['.*.lengthscale'].unfix()
model['.*.variance'].unfix()
model.Z.unfix()
model['.*.W'].unfix()
return model
def plot_streaming_figures_experiment1_latex(model_list, Xtrain_list, Xtest_list, Ytrain_list, Ytest_list):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# First BATCH
m_pred_gaussian1, v_pred_gaussian1 = model_list[0].predictive_new(Xtest_list[0][0], output_function_ind=0)
m_pred_gp_upper_gaussian1 = m_pred_gaussian1 + 2 * np.sqrt(v_pred_gaussian1)
m_pred_gp_lower_gaussian1 = m_pred_gaussian1 - 2 * np.sqrt(v_pred_gaussian1)
m_pred_gaussian2, v_pred_gaussian2 = model_list[0].predictive_new(Xtest_list[0][1], output_function_ind=1)
m_pred_gp_upper_gaussian2 = m_pred_gaussian2 + 2 * np.sqrt(v_pred_gaussian2)
m_pred_gp_lower_gaussian2 = m_pred_gaussian2 - 2 * np.sqrt(v_pred_gaussian2)
fig_batch_1 = plt.figure(figsize=(12, 5))
plt.plot(Xtrain_list[0][0], Ytrain_list[0][0], 'x', color='blue', markersize=10, alpha=0.2)
plt.plot(Xtest_list[0][0], Ytest_list[0][0], 'o', color='blue', markersize=2, alpha=0.75)
plt.plot(Xtest_list[0][0], m_pred_gaussian1, 'b-', linewidth=4, alpha=0.5)
plt.plot(Xtest_list[0][0], m_pred_gp_upper_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(Xtest_list[0][0], m_pred_gp_lower_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(Xtrain_list[0][1], Ytrain_list[0][1], 'x', color='red', markersize=10, alpha=0.2)
plt.plot(Xtest_list[0][1], Ytest_list[0][1], 'o', color='red', markersize=2, alpha=0.75)
plt.plot(Xtest_list[0][1], m_pred_gaussian2, 'r-', linewidth=4, alpha=0.5)
plt.plot(Xtest_list[0][1], m_pred_gp_upper_gaussian2, 'r-', linewidth=2, alpha=1)
plt.plot(Xtest_list[0][1], m_pred_gp_lower_gaussian2, 'r-', linewidth=2, alpha=1)
for q in range(model_list[0].Z.shape[1]):
for m in range(model_list[0].Z[:,q].shape[0]):
plt.axvline(model_list[0].Z[m, q], color='black', alpha=0.5)
plt.title(r'Online Multi-Output Gaussian Regression (t=1)')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
plt.xlim(0, 1)
#tikz_save('online_mogp_regression_batch_1.tex')
plt.show()
# -------------------------------------------------------------------------------
# Second BATCH
joint_Xtrain_for_batch_2_output0 = np.vstack((Xtrain_list[0][0], Xtrain_list[1][0]))
joint_Ytrain_for_batch_2_output0 = np.vstack((Ytrain_list[0][0], Ytrain_list[1][0]))
joint_Xtest_for_batch_2_output0 = np.vstack((Xtest_list[0][0], Xtest_list[1][0]))
joint_Ytest_for_batch_2_output0 = np.vstack((Ytest_list[0][0], Ytest_list[1][0]))
joint_Xtrain_for_batch_2_output1 = np.vstack((Xtrain_list[0][1], Xtrain_list[1][1]))
joint_Ytrain_for_batch_2_output1 = np.vstack((Ytrain_list[0][1], Ytrain_list[1][1]))
joint_Xtest_for_batch_2_output1 = np.vstack((Xtest_list[0][1], Xtest_list[1][1]))
joint_Ytest_for_batch_2_output1 = np.vstack((Ytest_list[0][1], Ytest_list[1][1]))
m_pred2_gaussian1, v_pred2_gaussian1 = model_list[1].predictive_new(np.sort(joint_Xtest_for_batch_2_output0),
output_function_ind=0)
m_pred2_gp_upper_gaussian1 = m_pred2_gaussian1 + 2 * np.sqrt(v_pred2_gaussian1)
m_pred2_gp_lower_gaussian1 = m_pred2_gaussian1 - 2 * np.sqrt(v_pred2_gaussian1)
m_pred2_gaussian2, v_pred2_gaussian2 = model_list[1].predictive_new(np.sort(joint_Xtest_for_batch_2_output1),
output_function_ind=1)
m_pred2_gp_upper_gaussian2 = m_pred2_gaussian2 + 2 * np.sqrt(v_pred2_gaussian2)
m_pred2_gp_lower_gaussian2 = m_pred2_gaussian2 - 2 * np.sqrt(v_pred2_gaussian2)
fig_batch_2 = plt.figure(figsize=(12, 5))
plt.plot(joint_Xtrain_for_batch_2_output0, joint_Ytrain_for_batch_2_output0, 'x', color='blue', markersize=10,
alpha=0.2)
plt.plot(joint_Xtest_for_batch_2_output0, joint_Ytest_for_batch_2_output0, 'o', color='blue', markersize=2,
alpha=0.75)
plt.plot(np.sort(joint_Xtest_for_batch_2_output0), m_pred2_gaussian1, 'b-', linewidth=4, alpha=0.5)
plt.plot(np.sort(joint_Xtest_for_batch_2_output0), m_pred2_gp_upper_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(np.sort(joint_Xtest_for_batch_2_output0), m_pred2_gp_lower_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(joint_Xtrain_for_batch_2_output1, joint_Ytrain_for_batch_2_output1, 'x', color='red', markersize=10,
alpha=0.2)
plt.plot(joint_Xtest_for_batch_2_output1, joint_Ytest_for_batch_2_output1, 'o', color='red', markersize=2,
alpha=0.75)
plt.plot(np.sort(joint_Xtest_for_batch_2_output1), m_pred2_gaussian2, 'r-', linewidth=4, alpha=0.5)
plt.plot(np.sort(joint_Xtest_for_batch_2_output1), m_pred2_gp_upper_gaussian2, 'r-', linewidth=2, alpha=1)
plt.plot(np.sort(joint_Xtest_for_batch_2_output1), m_pred2_gp_lower_gaussian2, 'r-', linewidth=2, alpha=1)
for q in range(model_list[1].Z.shape[1]):
for m in range(model_list[1].Z[:,q].shape[0]):
plt.axvline(model_list[1].Z[m, q], color='black', alpha=0.5)
plt.title(r'Online Multi-Output Gaussian Regression (t=2)')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
plt.xlim(0, 1)
#tikz_save('online_mogp_regression_batch_2.tex')
plt.show()
# -------------------------------------------------------------------------------
# Third BATCH
joint_Xtrain_for_batch_3_output0 = np.vstack((joint_Xtrain_for_batch_2_output0, Xtrain_list[2][0]))
joint_Ytrain_for_batch_3_output0 = np.vstack((joint_Ytrain_for_batch_2_output0, Ytrain_list[2][0]))
joint_Xtest_for_batch_3_output0 = np.vstack((joint_Xtest_for_batch_2_output0, Xtest_list[2][0]))
joint_Ytest_for_batch_3_output0 = np.vstack((joint_Ytest_for_batch_2_output0, Ytest_list[2][0]))
joint_Xtrain_for_batch_3_output1 = np.vstack((joint_Xtrain_for_batch_2_output1, Xtrain_list[2][1]))
joint_Ytrain_for_batch_3_output1 = np.vstack((joint_Ytrain_for_batch_2_output1, Ytrain_list[2][1]))
joint_Xtest_for_batch_3_output1 = np.vstack((joint_Xtest_for_batch_2_output1, Xtest_list[2][1]))
joint_Ytest_for_batch_3_output1 = np.vstack((joint_Ytest_for_batch_2_output1, Ytest_list[2][1]))
m_pred3_gaussian1, v_pred3_gaussian1 = model_list[2].predictive_new(np.sort(joint_Xtest_for_batch_3_output0),
output_function_ind=0)
m_pred3_gp_upper_gaussian1 = m_pred3_gaussian1 + 2 * np.sqrt(v_pred3_gaussian1)
m_pred3_gp_lower_gaussian1 = m_pred3_gaussian1 - 2 * np.sqrt(v_pred3_gaussian1)
m_pred3_gaussian2, v_pred3_gaussian2 = model_list[2].predictive_new(np.sort(joint_Xtest_for_batch_3_output1),
output_function_ind=1)
m_pred3_gp_upper_gaussian2 = m_pred3_gaussian2 + 2 * np.sqrt(v_pred3_gaussian2)
m_pred3_gp_lower_gaussian2 = m_pred3_gaussian2 - 2 * np.sqrt(v_pred3_gaussian2)
fig_batch_3 = plt.figure(figsize=(12, 5))
plt.plot(joint_Xtrain_for_batch_3_output0, joint_Ytrain_for_batch_3_output0, 'x', color='blue', markersize=10,
alpha=0.2)
plt.plot(joint_Xtest_for_batch_3_output0, joint_Ytest_for_batch_3_output0, 'o', color='blue', markersize=2,
alpha=0.75)
plt.plot(np.sort(joint_Xtest_for_batch_3_output0), m_pred3_gaussian1, 'b-', linewidth=4, alpha=0.5)
plt.plot(np.sort(joint_Xtest_for_batch_3_output0), m_pred3_gp_upper_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(np.sort(joint_Xtest_for_batch_3_output0), m_pred3_gp_lower_gaussian1, 'b-', linewidth=2, alpha=1)
plt.plot(joint_Xtrain_for_batch_3_output1, joint_Ytrain_for_batch_3_output1, 'x', color='red', markersize=10,
alpha=0.2)
plt.plot(joint_Xtest_for_batch_3_output1, joint_Ytest_for_batch_3_output1, 'o', color='red', markersize=2,
alpha=0.75)
plt.plot(np.sort(joint_Xtest_for_batch_3_output1), m_pred3_gaussian2, 'r-', linewidth=4, alpha=0.5)
plt.plot(np.sort(joint_Xtest_for_batch_3_output1), m_pred3_gp_upper_gaussian2, 'r-', linewidth=2, alpha=1)
plt.plot(np.sort(joint_Xtest_for_batch_3_output1), m_pred3_gp_lower_gaussian2, 'r-', linewidth=2, alpha=1)
for q in range(model_list[2].Z.shape[1]):
for m in range(model_list[2].Z[:,q].shape[0]):
plt.axvline(model_list[2].Z[m, q], color='black', alpha=0.5)
plt.title(r'Online Multi-Output Gaussian Regression (t=3)')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
plt.xlim(0, 1)
#tikz_save('online_mogp_regression_batch_3.tex')
plt.show()
# SINGLE OUTPUT (SO)
def plot_streaming_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
lik_noise = 1.5
max_X = 2.0
max_Y = 16.0
T = len(model_list)
for t in range(T):
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t],0), output_function_ind=0)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new) + lik_noise
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new) - lik_noise
fig_batch = plt.figure(figsize=(12, 4))
if t>0:
for t_past in range(t):
m_pred_past, v_pred_past = model_list[t].predictive_new(np.sort(sXtest[t_past],0), output_function_ind=0)
m_pred_gp_upper_past = m_pred_past + 2 * np.sqrt(v_pred_past) + 1.0
m_pred_gp_lower_past = m_pred_past - 2 * np.sqrt(v_pred_past) - 1.0
plt.plot(sXtrain[t_past], sYtrain[t_past], 'x', color='blue', markersize=10, alpha=0.25)
plt.plot(sXtest[t_past], sYtest[t_past], 'o', color='blue', markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t_past],0), m_pred_past, 'b', linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t_past],0), m_pred_gp_upper_past, 'b', linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t_past],0), m_pred_gp_lower_past, 'b', linewidth=1, alpha=1)
plt.plot(sXtrain[t], sYtrain[t], 'x', color='red', markersize=10, alpha=0.2)
plt.plot(sXtest[t], sYtest[t], 'o', color='red', markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t],0), m_pred_new, 'r', linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t],0), m_pred_gp_upper_new, 'r', linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t],0), m_pred_gp_lower_new, 'r', linewidth=1, alpha=1)
# plt.plot(Z_points[t][:,0], q_mean_list[t], 'kx', markersize=15.0, mew=1.5)
plt.xlim(0, max_X)
plt.ylim(-max_Y, max_Y)
plt.title(r'Continual Gaussian Process Regression (t=' + str(t+1) +')')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
if save:
tikz_save('so_gpr_streaming_t'+str(t+1)+'.tex')
plt.show()
# MULTI-OUTPUT (MO)
def plot_multioutput_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
max_X = 2.0
max_Y = 13.0
T = len(model_list)
D = len(sXtrain[0])
color_list = ['salmon', 'slateblue']
new_color_list = ['red', 'blue']
# For every batch in the stream:
for t in range(T):
fig_batch = plt.figure(figsize=(12, 4))
# For every output in the model:
for d in range(D):
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t][d], 0), output_function_ind=d)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new) - model_list[0].likelihood.likelihoods_list[d].sigma
if t>0:
for t_past in range(t):
m_pred_past, v_pred_past = model_list[t].predictive_new(np.sort(sXtest[t_past][d], 0), output_function_ind=d)
m_pred_gp_upper_past = m_pred_past + 2 * np.sqrt(v_pred_past) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_past = m_pred_past - 2 * np.sqrt(v_pred_past) - model_list[0].likelihood.likelihoods_list[d].sigma
plt.plot(sXtrain[t_past][d], sYtrain[t_past][d], 'x', color=color_list[d], markersize=10, alpha=0.25)
plt.plot(sXtest[t_past][d], sYtest[t_past][d], 'o', color=color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_past, color=color_list[d], linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_upper_past, color=color_list[d], linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_lower_past, color=color_list[d], linewidth=1, alpha=1)
plt.plot(sXtrain[t][d], sYtrain[t][d], 'x', color=new_color_list[d], markersize=10, alpha=0.2)
plt.plot(sXtest[t][d], sYtest[t][d], 'o', color=new_color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_new, color=new_color_list[d], linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_upper_new, color=new_color_list[d], linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_lower_new, color=new_color_list[d], linewidth=1, alpha=1)
plt.xlim(0, max_X)
plt.ylim(-max_Y, max_Y)
plt.title(r'Continual Multi-output Gaussian Process Regression (t=' + str(t+1) +')')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
if save:
tikz_save('mo_gpr_streaming_t'+str(t+1)+'.tex')
plt.show()
# ASYNCHRONOUS MULTI-OUTPUT (MO)
def plot_asyn_multioutput_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
max_X = 1.0
max_Y = 13.0
T = len(model_list)
D = len(sXtrain[0])
color_list = ['salmon', 'slateblue']
new_color_list = ['red', 'blue']
# For every batch in the stream:
for t in range(T):
fig_batch = plt.figure(figsize=(12, 4))
# For every output in the model:
for d in range(D):
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t][d], 0), output_function_ind=d)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new) - model_list[0].likelihood.likelihoods_list[d].sigma
if t>0:
for t_past in range(t):
m_pred_past, v_pred_past = model_list[t].predictive_new(np.sort(sXtest[t_past][d], 0), output_function_ind=d)
m_pred_gp_upper_past = m_pred_past + 2 * np.sqrt(v_pred_past) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_past = m_pred_past - 2 * np.sqrt(v_pred_past) - model_list[0].likelihood.likelihoods_list[d].sigma
plt.plot(sXtrain[t_past][d], sYtrain[t_past][d], 'x', color=color_list[d], markersize=10, alpha=0.25)
plt.plot(sXtest[t_past][d], sYtest[t_past][d], 'o', color=color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_past, color=color_list[d], linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_upper_past, color=color_list[d], linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_lower_past, color=color_list[d], linewidth=1, alpha=1)
plt.plot(sXtrain[t][d], sYtrain[t][d], 'x', color=new_color_list[d], markersize=10, alpha=0.2)
plt.plot(sXtest[t][d], sYtest[t][d], 'o', color=new_color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_new, color=new_color_list[d], linewidth=4, alpha=0.5)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_upper_new, color=new_color_list[d], linewidth=1, alpha=1)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_lower_new, color=new_color_list[d], linewidth=1, alpha=1)
plt.xlim(0, max_X)
plt.ylim(-max_Y, max_Y)
plt.title(r'Continual Multi-output Gaussian Process Regression (t=' + str(t+1) +')')
plt.ylabel(r'Real Outputs')
plt.xlabel(r'Real Inputs')
if save:
tikz_save('mo_gpr_asynchronous_t'+str(t+1)+'.tex')
plt.show()
def plot_mocap_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
max_X = 1.0
max_Y = 35.0
T = len(model_list)
D = len(sXtrain[0])
color_list = ['lightpink', 'moccasin', 'thistle']
new_color_list = ['crimson', 'orange', 'darkviolet']
# For every batch in the stream:
for t in range(T):
fig_batch = plt.figure(figsize=(12, 4))
# For every output in the model:
for d in range(D):
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t][d], 0), output_function_ind=d)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new) - model_list[0].likelihood.likelihoods_list[d].sigma
if t>0:
for t_past in range(t):
m_pred_past, v_pred_past = model_list[t].predictive_new(np.sort(sXtest[t_past][d], 0), output_function_ind=d)
m_pred_gp_upper_past = m_pred_past + 2 * np.sqrt(v_pred_past) + model_list[0].likelihood.likelihoods_list[d].sigma
m_pred_gp_lower_past = m_pred_past - 2 * np.sqrt(v_pred_past) - model_list[0].likelihood.likelihoods_list[d].sigma
plt.plot(sXtrain[t_past][d], sYtrain[t_past][d], 'x', color=color_list[d], markersize=10, alpha=1.0)
#plt.plot(sXtest[t_past][d], sYtest[t_past][d], 'o', color=color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_past, color='k', linewidth=2, alpha=1.0)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_upper_past, color='k', linewidth=1, alpha=1.0)
plt.plot(np.sort(sXtest[t_past][d], 0), m_pred_gp_lower_past, color='k', linewidth=1, alpha=1.0)
plt.plot(sXtrain[t][d], sYtrain[t][d], 'x', color=new_color_list[d], markersize=10, alpha=1.0)
#plt.plot(sXtest[t][d], sYtest[t][d], 'o', color=new_color_list[d], markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_new, color='k', linewidth=2, alpha=1.0)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_upper_new, color='k', linewidth=1, alpha=1.0)
plt.plot(np.sort(sXtest[t][d], 0), m_pred_gp_lower_new, color='k', linewidth=1, alpha=1.0)
plt.xlim(0, max_X)
plt.ylim(-max_Y, max_Y)
plt.title(r'MOCAP (t=' + str(t+1) +')')
plt.ylabel(r'Sensor Motion / Y axis output')
plt.xlabel(r'Time')
plt.legend(['Left Wrist Sensor', 'Right Femur Sensor', 'Mean Predictive Posterior'])
if save:
tikz_save('mocap_t'+str(t+1)+'.tex')
plt.show()
# BANANA EXPERIMENT
def plot_banana_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, max_min, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
T = len(model_list)
Ntest = 30
min_X = max_min[0]
min_Y = max_min[1]
max_X = max_min[2]
max_Y = max_min[3]
stack_X_train = sXtrain[0]
stack_Y_train = sYtrain[0]
for t in range(T):
max_ty = stack_X_train[:,1].max()
min_ty = stack_X_train[:,1].min()
max_tx = stack_X_train[:,0].max()
min_tx = stack_X_train[:,0].min()
ty = np.linspace(min_ty, max_ty, Ntest)
tx = np.linspace(min_tx, max_tx, Ntest)
TX_grid, TY_grid = np.meshgrid(tx, ty)
TX = TX_grid.reshape(Ntest ** 2, 1)
TY = TY_grid.reshape(Ntest ** 2, 1)
test_X = np.hstack((TX, TY))
# For every batch in the stream:
m_pred_new, _ = model_list[t].predictive_new(test_X, output_function_ind=0)
m_pred_new = np.exp(m_pred_new)/(1 + np.exp(m_pred_new))
fig_batch = plt.figure(figsize=[8,6])
if t > 0:
for t_past in range(t):
plt.plot(sXtrain[t_past][sYtrain[t_past][:, 0] == 1, 0], sXtrain[t_past][sYtrain[t_past][:, 0] == 1, 1], 'x', color='darkviolet', alpha=0.25)
plt.plot(sXtrain[t_past][sYtrain[t_past][:, 0] == 0, 0], sXtrain[t_past][sYtrain[t_past][:, 0] == 0, 1], 'x', color='darkorange', alpha=0.25)
plt.plot(sXtrain[t][sYtrain[t][:, 0] == 1, 0], sXtrain[t][sYtrain[t][:, 0] == 1, 1], 'x', color='darkviolet')
plt.plot(sXtrain[t][sYtrain[t][:, 0] == 0, 0], sXtrain[t][sYtrain[t][:, 0] == 0, 1], 'x', color='darkorange')
plt.contour(TX_grid, TY_grid, np.reshape(m_pred_new, (Ntest, Ntest)), linewidths=3, colors='k', levels=0.5 * np.eye(1))
plt.xlim(min_X, max_X)
plt.ylim(min_Y, max_Y)
plt.title(r'Continual Gaussian Process Classification (t=' + str(t + 1) + ')')
plt.ylabel(r'Real Input')
plt.xlabel(r'Real Input')
plt.legend(['y=1', 'y=-1'])
if save:
tikz_save('banana_t' + str(t + 1) + '.tex')
plt.show()
if t < T-1:
stack_X_train = np.vstack((stack_X_train, sXtrain[t+1]))
stack_Y_train = np.vstack((stack_Y_train, sYtrain[t+1]))
return
# CURRENCY EXPERIMENT
def plot_currency_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, mean_usd, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
lik_noise = model_list[0].likelihood.likelihoods_list[0].sigma
max_X = 2.0
max_Y = 0.12
#max_Y = 16.0
T = len(model_list)
for t in range(T):
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t],0), output_function_ind=0)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new) + lik_noise
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new) - lik_noise
fig_batch = plt.figure(figsize=(12, 4))
if t>0:
for t_past in range(t):
m_pred_past, v_pred_past = model_list[t].predictive_new(np.sort(sXtest[t_past],0), output_function_ind=0)
m_pred_gp_upper_past = m_pred_past + 2 * np.sqrt(v_pred_past) + lik_noise
m_pred_gp_lower_past = m_pred_past - 2 * np.sqrt(v_pred_past) - lik_noise
plt.plot(sXtrain[t_past][sXtrain[t_past][:,0].argsort()], sYtrain[t_past][sXtrain[t_past][:,0].argsort()]+mean_usd, '-', color='steelblue', linewidth=2, markersize=10, alpha=1.0)
#plt.plot(sXtest[t_past][sXtest[t_past][:,0].argsort()], sYtest[t_past][sXtest[t_past][:,0].argsort()], '-', color='blue', markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t_past],0), m_pred_past+mean_usd, '-k', linewidth=2, alpha=1.0)
plt.plot(np.sort(sXtest[t_past],0), m_pred_gp_upper_past, '-k', linewidth=1, alpha=1.0)
plt.plot(np.sort(sXtest[t_past],0), m_pred_gp_lower_past, '-k', linewidth=1, alpha=1.0)
plt.plot(sXtrain[t][sXtrain[t][:,0].argsort()], sYtrain[t][sXtrain[t][:,0].argsort()]+mean_usd, '-', color='mediumturquoise', linewidth=2, markersize=10, alpha=1.0)
#plt.plot(sXtest[t][sXtest[t][:,0].argsort()], sYtest[t][sXtest[t][:,0].argsort()], '-', color='red', markersize=2, alpha=0.75)
plt.plot(np.sort(sXtest[t],0), m_pred_new+mean_usd, '-k', linewidth=2, alpha=1.0)
plt.plot(np.sort(sXtest[t],0), m_pred_gp_upper_new, '-k', linewidth=1, alpha=1.0)
plt.plot(np.sort(sXtest[t],0), m_pred_gp_lower_new, '-k', linewidth=1, alpha=1.0)
plt.xlim(0, max_X)
plt.ylim(-max_Y, max_Y)
plt.title(r'Dollar Exchange Rate (t=' + str(t+1) +')')
plt.ylabel(r'USD/EUR')
plt.xlabel(r'Real Inputs')
if save:
tikz_save('currency_t'+str(t+1)+'.tex')
plt.show()
# BANANA MSE AND NLPD
def banana_metrics(model_list, sXtest, sYtest):
T = len(model_list)
for t in range(T):
m_pred_test, _ = model_list[t].predictive_new(sXtest[t], output_function_ind=0)
m_pred_test = np.exp(m_pred_test) / (1 + np.exp(m_pred_test))
m_pred_test[m_pred_test[:,0]<0.5, 0] = 0.0
m_pred_test[m_pred_test[:,0]>=0.5, 0] = 1.0
errors = sYtest[t] - m_pred_test
errors = np.abs(errors)
mse = np.sum(errors,axis=0)/errors.shape[0]
print(mse)
return
def plot_eb2_latex(model_list, sXtrain, sXtest, sYtrain, sYtest, Z_points, q_mean_list, save=False):
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
max_X = 1.0
max_Y = 35.0
T = len(model_list)
D = len(sXtrain[0])
color_list = ['lightpink', 'moccasin', 'thistle']
new_color_list = ['crimson', 'orange', 'darkviolet']
# For every batch in the stream:
for t in range(T):
# For every output in the model:
for d in range(D):
fig_batch = plt.figure(figsize=(12, 4))
if d == 0:
# For every batch in the stream:
m_pred_new, v_pred_new = model_list[t].predictive_new(np.sort(sXtest[t][d], 0), output_function_ind=0)
m_pred_gp_upper_new = m_pred_new + 2 * np.sqrt(v_pred_new)
m_pred_gp_lower_new = m_pred_new - 2 * np.sqrt(v_pred_new)
m_pred_new = np.exp(m_pred_new) / (1 +
|
np.exp(m_pred_new)
|
numpy.exp
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
''' Test for volumeutils module '''
from __future__ import with_statement
from ..py3k import BytesIO, asbytes, ZEROB
import tempfile
import warnings
import numpy as np
from ..tmpdirs import InTemporaryDirectory
from ..volumeutils import (array_from_file,
array_to_file,
allopen, # for backwards compatibility
BinOpener,
calculate_scale,
can_cast,
write_zeros,
apply_read_scaling,
_inter_type,
working_type,
best_write_scale_ftype,
better_float_of,
int_scinter_ftype,
make_dt_codes,
native_code,
shape_zoom_affine,
rec2dict)
from ..casting import (floor_log2, type_info, best_float, OK_FLOATS)
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_equal, assert_raises
from ..testing import assert_dt_equal
#: convenience variables for numpy types
FLOAT_TYPES = np.sctypes['float']
CFLOAT_TYPES = np.sctypes['complex'] + FLOAT_TYPES
IUINT_TYPES = np.sctypes['int'] + np.sctypes['uint']
NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES
def test_array_from_file():
shape = (2,3,4)
dtype = np.dtype(np.float32)
in_arr = np.arange(24, dtype=dtype).reshape(shape)
# Check on string buffers
offset = 0
assert_true(buf_chk(in_arr, BytesIO(), None, offset))
offset = 10
assert_true(buf_chk(in_arr, BytesIO(), None, offset))
# check on real file
fname = 'test.bin'
with InTemporaryDirectory():
# fortran ordered
out_buf = open(fname, 'wb')
in_buf = open(fname, 'rb')
assert_true(buf_chk(in_arr, out_buf, in_buf, offset))
# Drop offset to check that shape's not coming from file length
out_buf.seek(0)
in_buf.seek(0)
offset = 5
assert_true(buf_chk(in_arr, out_buf, in_buf, offset))
del out_buf, in_buf
# Make sure empty shape, and zero length, give empty arrays
arr = array_from_file((), np.dtype('f8'), BytesIO())
assert_equal(len(arr), 0)
arr = array_from_file((0,), np.dtype('f8'), BytesIO())
assert_equal(len(arr), 0)
# Check error from small file
assert_raises(IOError, array_from_file,
shape, dtype, BytesIO())
# check on real file
fd, fname = tempfile.mkstemp()
with InTemporaryDirectory():
open(fname, 'wb').write(asbytes('1'))
in_buf = open(fname, 'rb')
# For windows this will raise a WindowsError from mmap, Unices
# appear to raise an IOError
assert_raises(Exception, array_from_file,
shape, dtype, in_buf)
del in_buf
def buf_chk(in_arr, out_buf, in_buf, offset):
''' Write contents of in_arr into fileobj, read back, check same '''
instr = asbytes(' ') * offset + in_arr.tostring(order='F')
out_buf.write(instr)
out_buf.flush()
if in_buf is None: # we're using in_buf from out_buf
out_buf.seek(0)
in_buf = out_buf
arr = array_from_file(
in_arr.shape,
in_arr.dtype,
in_buf,
offset)
return np.allclose(in_arr, arr)
def test_array_to_file():
arr = np.arange(10).reshape(5,2)
str_io = BytesIO()
for tp in (np.uint64, np.float, np.complex):
dt = np.dtype(tp)
for code in '<>':
ndt = dt.newbyteorder(code)
for allow_intercept in (True, False):
scale, intercept, mn, mx = calculate_scale(arr,
ndt,
allow_intercept)
data_back = write_return(arr, str_io, ndt,
0, intercept, scale)
assert_array_almost_equal(arr, data_back)
def test_a2f_intercept_scale():
arr = np.array([0.0, 1.0, 2.0])
str_io = BytesIO()
# intercept
data_back = write_return(arr, str_io, np.float64, 0, 1.0)
assert_array_equal(data_back, arr-1)
# scaling
data_back = write_return(arr, str_io, np.float64, 0, 1.0, 2.0)
assert_array_equal(data_back, (arr-1) / 2.0)
def test_a2f_upscale():
# Test working type scales with needed range
info = type_info(np.float32)
# Test values discovered from stress testing. The largish value (2**115)
# overflows to inf after the intercept is subtracted, using float32 as the
# working precision. The difference between inf and this value is lost.
arr = np.array([[info['min'], 2**115, info['max']]], dtype=np.float32)
slope = np.float32(2**121)
inter = info['min']
str_io = BytesIO()
# We need to provide mn, mx for function to be able to calculate upcasting
array_to_file(arr, str_io, np.uint8, intercept=inter, divslope=slope,
mn = info['min'], mx = info['max'])
raw = array_from_file(arr.shape, np.uint8, str_io)
back = apply_read_scaling(raw, slope, inter)
top = back - arr
score = np.abs(top / arr)
assert_true(np.all(score < 10))
def test_a2f_min_max():
str_io = BytesIO()
for in_dt in (np.float32, np.int8):
for out_dt in (np.float32, np.int8):
arr = np.arange(4, dtype=in_dt)
# min thresholding
data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1)
assert_array_equal(data_back, [1, 1, 2, 3])
# max thresholding
data_back = write_return(arr, str_io, out_dt, 0, 0, 1, None, 2)
assert_array_equal(data_back, [0, 1, 2, 2])
# min max thresholding
data_back = write_return(arr, str_io, out_dt, 0, 0, 1, 1, 2)
assert_array_equal(data_back, [1, 1, 2, 2])
# Check that works OK with scaling and intercept
arr = np.arange(4, dtype=np.float32)
data_back = write_return(arr, str_io, np.int, 0, -1, 0.5, 1, 2)
assert_array_equal(data_back * 0.5 - 1, [1, 1, 2, 2])
# Even when scaling is negative
data_back = write_return(arr, str_io, np.int, 0, 1, -0.5, 1, 2)
assert_array_equal(data_back * -0.5 + 1, [1, 1, 2, 2])
def test_a2f_order():
ndt = np.dtype(np.float)
arr = np.array([0.0, 1.0, 2.0])
str_io = BytesIO()
# order makes no difference in 1D case
data_back = write_return(arr, str_io, ndt, order='C')
assert_array_equal(data_back, [0.0, 1.0, 2.0])
# but does in the 2D case
arr = np.array([[0.0, 1.0],[2.0, 3.0]])
data_back = write_return(arr, str_io, ndt, order='F')
assert_array_equal(data_back, arr)
data_back = write_return(arr, str_io, ndt, order='C')
assert_array_equal(data_back, arr.T)
def test_a2f_nan2zero():
ndt = np.dtype(np.float)
str_io = BytesIO()
# nans set to 0 for integer output case, not float
arr = np.array([[np.nan, 0],[0, np.nan]])
data_back = write_return(arr, str_io, ndt) # float, thus no effect
assert_array_equal(data_back, arr)
# True is the default, but just to show its possible
data_back = write_return(arr, str_io, ndt, nan2zero=True)
assert_array_equal(data_back, arr)
data_back = write_return(arr, str_io,
np.dtype(np.int64), nan2zero=True)
assert_array_equal(data_back, [[0, 0],[0, 0]])
# otherwise things get a bit weird; tidied here
# How weird? Look at arr.astype(np.int64)
data_back = write_return(arr, str_io,
np.dtype(np.int64), nan2zero=False)
assert_array_equal(data_back, arr.astype(np.int64))
def test_a2f_offset():
# check that non-zero file offset works
arr = np.array([[0.0, 1.0],[2.0, 3.0]])
str_io = BytesIO()
str_io.write(asbytes('a') * 42)
array_to_file(arr, str_io, np.float, 42)
data_back = array_from_file(arr.shape, np.float, str_io, 42)
assert_array_equal(data_back, arr.astype(np.float))
# And that offset=None respected
str_io.truncate(22)
str_io.seek(22)
array_to_file(arr, str_io, np.float, None)
data_back = array_from_file(arr.shape, np.float, str_io, 22)
assert_array_equal(data_back, arr.astype(np.float))
def test_a2f_dtype_default():
# that default dtype is input dtype
arr = np.array([[0.0, 1.0],[2.0, 3.0]])
str_io = BytesIO()
array_to_file(arr.astype(np.int16), str_io)
data_back = array_from_file(arr.shape, np.int16, str_io)
assert_array_equal(data_back, arr.astype(np.int16))
def test_a2f_zeros():
# Check that, if there is no valid data, we get zeros
arr = np.array([[0.0, 1.0],[2.0, 3.0]])
str_io = BytesIO()
# With slope=None signal
array_to_file(arr + np.inf, str_io, np.int32, 0, 0.0, None)
data_back = array_from_file(arr.shape, np.int32, str_io)
assert_array_equal(data_back, np.zeros(arr.shape))
# With mn, mx = 0 signal
array_to_file(arr, str_io, np.int32, 0, 0.0, 1.0, 0, 0)
data_back = array_from_file(arr.shape, np.int32, str_io)
assert_array_equal(data_back, np.zeros(arr.shape))
# With mx < mn signal
array_to_file(arr, str_io, np.int32, 0, 0.0, 1.0, 4, 2)
data_back = array_from_file(arr.shape, np.int32, str_io)
assert_array_equal(data_back, np.zeros(arr.shape))
def test_a2f_big_scalers():
# Check that clip works even for overflowing scalers / data
info = type_info(np.float32)
arr = np.array([info['min'], np.nan, info['max']], dtype=np.float32)
str_io = BytesIO()
# Intercept causes overflow - does routine scale correctly?
array_to_file(arr, str_io, np.int8, intercept=np.float32(2**120))
data_back = array_from_file(arr.shape, np.int8, str_io)
assert_array_equal(data_back, [-128, 0, 127])
# Scales also if mx, mn specified?
str_io.seek(0)
array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'],
intercept=np.float32(2**120))
data_back = array_from_file(arr.shape, np.int8, str_io)
assert_array_equal(data_back, [-128, 0, 127])
# And if slope causes overflow?
str_io.seek(0)
array_to_file(arr, str_io, np.int8, divslope=np.float32(0.5))
data_back = array_from_file(arr.shape, np.int8, str_io)
assert_array_equal(data_back, [-128, 0, 127])
# with mn, mx specified?
str_io.seek(0)
array_to_file(arr, str_io, np.int8, mn=info['min'], mx=info['max'],
divslope=np.float32(0.5))
data_back = array_from_file(arr.shape, np.int8, str_io)
assert_array_equal(data_back, [-128, 0, 127])
def write_return(data, fileobj, out_dtype, *args, **kwargs):
fileobj.truncate(0)
array_to_file(data, fileobj, out_dtype, *args, **kwargs)
data = array_from_file(data.shape, out_dtype, fileobj)
return data
def test_apply_scaling():
# Null scaling, same array returned
arr = np.zeros((3,), dtype=np.int16)
assert_true(apply_read_scaling(arr) is arr)
assert_true(apply_read_scaling(arr, np.float64(1.0)) is arr)
assert_true(apply_read_scaling(arr, inter=np.float64(0)) is arr)
f32, f64 = np.float32, np.float64
f32_arr = np.zeros((1,), dtype=f32)
i16_arr = np.zeros((1,), dtype=np.int16)
# Check float upcast (not the normal numpy scalar rule)
# This is the normal rule - no upcast from scalar
assert_equal((f32_arr * f64(1)).dtype, np.float32)
assert_equal((f32_arr + f64(1)).dtype, np.float32)
# The function does upcast though
ret = apply_read_scaling(np.float32(0), np.float64(2))
assert_equal(ret.dtype, np.float64)
ret = apply_read_scaling(np.float32(0), inter=np.float64(2))
assert_equal(ret.dtype, np.float64)
# Check integer inf upcast
big = f32(type_info(f32)['max'])
# Normally this would not upcast
assert_equal((i16_arr * big).dtype, np.float32)
# An equivalent case is a little hard to find for the intercept
nmant_32 = type_info(np.float32)['nmant']
big_delta = np.float32(2**(floor_log2(big)-nmant_32))
assert_equal((i16_arr * big_delta + big).dtype, np.float32)
# Upcasting does occur with this routine
assert_equal(apply_read_scaling(i16_arr, big).dtype, np.float64)
assert_equal(apply_read_scaling(i16_arr, big_delta, big).dtype, np.float64)
# If float32 passed, no overflow, float32 returned
assert_equal(apply_read_scaling(np.int8(0), f32(-1.0), f32(0.0)).dtype,
np.float32)
# float64 passed, float64 returned
assert_equal(apply_read_scaling(np.int8(0), -1.0, 0.0).dtype, np.float64)
# float32 passed, overflow, float64 returned
assert_equal(apply_read_scaling(np.int8(0), f32(1e38), f32(0.0)).dtype,
np.float64)
assert_equal(apply_read_scaling(np.int8(0), f32(-1e38), f32(0.0)).dtype,
np.float64)
# Test that integer casting during read scaling works
assert_dt_equal(apply_read_scaling(i16_arr, 1.0, 1.0).dtype, np.int32)
assert_dt_equal(apply_read_scaling(
np.zeros((1,), dtype=np.int32), 1.0, 1.0).dtype, np.int64)
assert_dt_equal(apply_read_scaling(
np.zeros((1,), dtype=np.int64), 1.0, 1.0).dtype, best_float())
def test__inter_type():
# Test routine to get intercept type
bf = best_float()
for in_type, inter, out_type, exp_out in (
(np.int8, 0, None, np.int8),
(np.int8, 0, np.int8, np.int8),
(np.int8, 1, None, np.int16),
(np.int8, 1, np.int8, bf),
(np.int8, 1, np.int16, np.int16),
(np.uint8, 0, None, np.uint8),
(np.uint8, 1, None, np.uint16),
(np.uint8, -1, None, np.int16),
(np.int16, 1, None, np.int32),
(np.uint16, 0, None, np.uint16),
(np.uint16, 1, None, np.uint32),
(np.int32, 1, None, np.int64),
(np.uint32, 1, None, np.uint64),
(np.int64, 1, None, bf),
(np.uint64, 1, None, bf),
):
assert_dt_equal(_inter_type(in_type, inter, out_type), exp_out)
# Check that casting is as expected
A = np.zeros((1,), dtype=in_type)
B = np.array([inter], dtype=exp_out)
ApBt = (A + B).dtype.type
assert_dt_equal(ApBt, exp_out)
def test_int_scinter():
# Finding float type needed for applying scale, offset to ints
assert_equal(int_scinter_ftype(np.int8, 1.0, 0.0), np.float32)
assert_equal(int_scinter_ftype(np.int8, -1.0, 0.0), np.float32)
assert_equal(int_scinter_ftype(np.int8, 1e38, 0.0), np.float64)
assert_equal(int_scinter_ftype(np.int8, -1e38, 0.0), np.float64)
def test_working_type():
# Which type do input types with slope and inter cast to in numpy?
# Wrapper function because we need to use the dtype str for comparison. We
# need this because of the very confusing np.int32 != np.intp (on 32 bit).
def wt(*args, **kwargs):
return np.dtype(working_type(*args, **kwargs)).str
d1 = np.atleast_1d
for in_type in NUMERIC_TYPES:
in_ts = np.dtype(in_type).str
assert_equal(wt(in_type), in_ts)
assert_equal(wt(in_type, 1, 0), in_ts)
assert_equal(wt(in_type, 1.0, 0.0), in_ts)
in_val = d1(in_type(0))
for slope_type in NUMERIC_TYPES:
sl_val = slope_type(1) # no scaling, regardless of type
assert_equal(wt(in_type, sl_val, 0.0), in_ts)
sl_val = slope_type(2) # actual scaling
out_val = in_val / d1(sl_val)
assert_equal(wt(in_type, sl_val), out_val.dtype.str)
for inter_type in NUMERIC_TYPES:
i_val = inter_type(0) # no scaling, regardless of type
assert_equal(wt(in_type, 1, i_val), in_ts)
i_val = inter_type(1) # actual scaling
out_val = in_val - d1(i_val)
assert_equal(wt(in_type, 1, i_val), out_val.dtype.str)
# Combine scaling and intercept
out_val = (in_val - d1(i_val)) / d1(sl_val)
assert_equal(wt(in_type, sl_val, i_val), out_val.dtype.str)
# Confirm that type codes and dtypes work as well
f32s = np.dtype(np.float32).str
assert_equal(wt('f4', 1, 0), f32s)
assert_equal(wt(np.dtype('f4'), 1, 0), f32s)
def test_better_float():
# Better float function
def check_against(f1, f2):
return f1 if FLOAT_TYPES.index(f1) >= FLOAT_TYPES.index(f2) else f2
for first in FLOAT_TYPES:
for other in IUINT_TYPES + np.sctypes['complex']:
assert_equal(better_float_of(first, other), first)
assert_equal(better_float_of(other, first), first)
for other2 in IUINT_TYPES + np.sctypes['complex']:
assert_equal(better_float_of(other, other2), np.float32)
assert_equal(better_float_of(other, other2, np.float64),
np.float64)
for second in FLOAT_TYPES:
assert_equal(better_float_of(first, second),
check_against(first, second))
# Check codes and dtypes work
assert_equal(better_float_of('f4', 'f8', 'f4'), np.float64)
assert_equal(better_float_of('i4', 'i8', 'f8'), np.float64)
def test_best_write_scale_ftype():
# Test best write scaling type
# Types return better of (default, array type) unless scale overflows.
# Return float type cannot be less capable than the input array type
for dtt in IUINT_TYPES + FLOAT_TYPES:
arr = np.arange(10, dtype=dtt)
assert_equal(best_write_scale_ftype(arr, 1, 0),
better_float_of(dtt, np.float32))
assert_equal(best_write_scale_ftype(arr, 1, 0, np.float64),
better_float_of(dtt, np.float64))
assert_equal(best_write_scale_ftype(arr, np.float32(2), 0),
better_float_of(dtt, np.float32))
assert_equal(best_write_scale_ftype(arr, 1, np.float32(1)),
better_float_of(dtt, np.float32))
# Overflowing ints with scaling results in upcast
best_vals = ((np.float32, np.float64),)
if np.longdouble in OK_FLOATS:
best_vals += ((np.float64, np.longdouble),)
for lower_t, higher_t in best_vals:
# Information on this float
L_info = type_info(lower_t)
t_max = L_info['max']
nmant = L_info['nmant'] # number of significand digits
big_delta = lower_t(2**(floor_log2(t_max) - nmant)) # delta below max
# Even large values that don't overflow don't change output
arr = np.array([0, t_max], dtype=lower_t)
assert_equal(best_write_scale_ftype(arr, 1, 0), lower_t)
# Scaling > 1 reduces output values, so no upcast needed
assert_equal(best_write_scale_ftype(arr, lower_t(1.01), 0), lower_t)
# Scaling < 1 increases values, so upcast may be needed (and is here)
assert_equal(best_write_scale_ftype(arr, lower_t(0.99), 0), higher_t)
# Large minus offset on large array can cause upcast
assert_equal(best_write_scale_ftype(arr, 1, -big_delta/2.01), lower_t)
assert_equal(best_write_scale_ftype(arr, 1, -big_delta/2.0), higher_t)
# With infs already in input, default type returns
arr[0] = np.inf
assert_equal(best_write_scale_ftype(arr, lower_t(0.5), 0), lower_t)
arr[0] = -np.inf
assert_equal(best_write_scale_ftype(arr, lower_t(0.5), 0), lower_t)
def test_can_cast():
tests = ((np.float32, np.float32, True, True, True),
(np.float64, np.float32, True, True, True),
(np.complex128, np.float32, False, False, False),
(np.float32, np.complex128, True, True, True),
(np.float32, np.uint8, False, True, True),
(np.uint32, np.complex128, True, True, True),
(np.int64, np.float32, True, True, True),
(np.complex128, np.int16, False, False, False),
(np.float32, np.int16, False, True, True),
(np.uint8, np.int16, True, True, True),
(np.uint16, np.int16, False, True, True),
(np.int16, np.uint16, False, False, True),
(np.int8, np.uint16, False, False, True),
(np.uint16, np.uint8, False, True, True),
)
for intype, outtype, def_res, scale_res, all_res in tests:
assert_equal(def_res, can_cast(intype, outtype))
assert_equal(scale_res, can_cast(intype, outtype, False, True))
assert_equal(all_res, can_cast(intype, outtype, True, True))
def test_write_zeros():
bio = BytesIO()
write_zeros(bio, 10000)
assert_equal(bio.getvalue(), ZEROB*10000)
bio.seek(0)
bio.truncate(0)
write_zeros(bio, 10000, 256)
assert_equal(bio.getvalue(), ZEROB*10000)
bio.seek(0)
bio.truncate(0)
write_zeros(bio, 200, 256)
assert_equal(bio.getvalue(), ZEROB*200)
def test_BinOpener():
# Test that BinOpener does add '.mgz' as gzipped file type
with InTemporaryDirectory():
with BinOpener('test.gz', 'w') as fobj:
assert_true(hasattr(fobj.fobj, 'compress'))
with BinOpener('test.mgz', 'w') as fobj:
assert_true(hasattr(fobj.fobj, 'compress'))
def test_allopen():
# This import into volumeutils is for compatibility. The code is the
# ``openers`` module.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Test default mode is 'rb'
fobj = allopen(__file__)
assert_equal(fobj.mode, 'rb')
# That we can set it
fobj = allopen(__file__, 'r')
assert_equal(fobj.mode, 'r')
# with keyword arguments
fobj = allopen(__file__, mode='r')
assert_equal(fobj.mode, 'r')
# fileobj returns fileobj
msg = asbytes('tiddle pom')
sobj = BytesIO(msg)
fobj = allopen(sobj)
assert_equal(fobj.read(), msg)
# mode is gently ignored
fobj = allopen(sobj, mode='r')
def test_allopen_compresslevel():
# We can set the default compression level with the module global
# Get some data to compress
with open(__file__, 'rb') as fobj:
my_self = fobj.read()
# Prepare loop
fname = 'test.gz'
sizes = {}
# Stash module global
from .. import volumeutils as vu
original_compress_level = vu.default_compresslevel
assert_equal(original_compress_level, 1)
try:
with InTemporaryDirectory():
for compresslevel in ('default', 1, 9):
if compresslevel != 'default':
vu.default_compresslevel = compresslevel
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with allopen(fname, 'wb') as fobj:
fobj.write(my_self)
with open(fname, 'rb') as fobj:
my_selves_smaller = fobj.read()
sizes[compresslevel] = len(my_selves_smaller)
assert_equal(sizes['default'], sizes[1])
assert_true(sizes[1] > sizes[9])
finally:
vu.default_compresslevel = original_compress_level
def test_shape_zoom_affine():
shape = (3, 5, 7)
zooms = (3, 2, 1)
res = shape_zoom_affine(shape, zooms)
exp = np.array([[-3., 0., 0., 3.],
[ 0., 2., 0., -4.],
[ 0., 0., 1., -3.],
[ 0., 0., 0., 1.]])
assert_array_almost_equal(res, exp)
res = shape_zoom_affine((3, 5), (3, 2))
exp = np.array([[-3., 0., 0., 3.],
[ 0., 2., 0., -4.],
[ 0., 0., 1., -0.],
[ 0., 0., 0., 1.]])
|
assert_array_almost_equal(res, exp)
|
numpy.testing.assert_array_almost_equal
|
"""src/utils/ssd_utils.py
Functions and classes from the https://github.com/pierluigiferrari/ssd_keras
GitHub repo that are used in DLAE. Modifications were made to some of these
functions. All copyrights are reserved by the original author.
"""
from __future__ import division
import numpy as np
import keras
import sklearn.utils
from copy import deepcopy
import warnings
import inspect
import cv2
import keras.backend as K
from keras.engine.topology import InputSpec
from keras.engine.topology import Layer
# Bounding box utils pulled from https://github.com/pierluigiferrari/ssd_keras/blob/master/bounding_box_utils/bounding_box_utils.py
# Modified by <NAME> for DLAE
def convert_coordinates(tensor, start_index, conversion, border_pixels='half'):
'''
Convert coordinates for axis-aligned 2D boxes between two coordinate formats.
Creates a copy of `tensor`, i.e. does not operate in place. Currently there are
three supported coordinate formats that can be converted from and to each other:
1) (xmin, xmax, ymin, ymax) - the 'minmax' format
2) (xmin, ymin, xmax, ymax) - the 'corners' format
3) (cx, cy, w, h) - the 'centroids' format
Arguments:
tensor (array): A Numpy nD array containing the four consecutive coordinates
to be converted somewhere in the last axis.
start_index (int): The index of the first coordinate in the last axis of `tensor`.
conversion (str, optional): The conversion direction. Can be 'minmax2centroids',
'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners',
or 'corners2minmax'.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A Numpy nD array, a copy of the input tensor with the converted coordinates
in place of the original coordinates and the unaltered elements of the original
tensor elsewhere.
'''
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1
elif border_pixels == 'exclude':
d = -1
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind + 1]) / 2.0 # Set cx
tensor1[..., ind + 1] = (tensor[..., ind + 2] + tensor[..., ind + 3]) / 2.0 # Set cy
tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind] + d # Set w
tensor1[..., ind + 3] = tensor[..., ind + 3] - tensor[..., ind + 2] + d # Set h
elif conversion == 'centroids2minmax':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind + 2] / 2.0 # Set xmin
tensor1[..., ind + 1] = tensor[..., ind] + tensor[..., ind + 2] / 2.0 # Set xmax
tensor1[..., ind + 2] = tensor[..., ind + 1] - tensor[..., ind + 3] / 2.0 # Set ymin
tensor1[..., ind + 3] = tensor[..., ind + 1] + tensor[..., ind + 3] / 2.0 # Set ymax
elif conversion == 'corners2centroids':
tensor1[..., ind] = (tensor[..., ind] + tensor[..., ind + 2]) / 2.0 # Set cx
tensor1[..., ind + 1] = (tensor[..., ind + 1] + tensor[..., ind + 3]) / 2.0 # Set cy
tensor1[..., ind + 2] = tensor[..., ind + 2] - tensor[..., ind] + d # Set w
tensor1[..., ind + 3] = tensor[..., ind + 3] - tensor[..., ind + 1] + d # Set h
elif conversion == 'centroids2corners':
tensor1[..., ind] = tensor[..., ind] - tensor[..., ind + 2] / 2.0 # Set xmin
tensor1[..., ind + 1] = tensor[..., ind + 1] - tensor[..., ind + 3] / 2.0 # Set ymin
tensor1[..., ind + 2] = tensor[..., ind] + tensor[..., ind + 2] / 2.0 # Set xmax
tensor1[..., ind + 3] = tensor[..., ind + 1] + tensor[..., ind + 3] / 2.0 # Set ymax
elif (conversion == 'minmax2corners') or (conversion == 'corners2minmax'):
tensor1[..., ind + 1] = tensor[..., ind + 2]
tensor1[..., ind + 2] = tensor[..., ind + 1]
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids', 'centroids2minmax', 'corners2centroids', 'centroids2corners', 'minmax2corners', and 'corners2minmax'.")
return tensor1
def convert_coordinates2(tensor, start_index, conversion):
'''
A matrix multiplication implementation of `convert_coordinates()`.
Supports only conversion between the 'centroids' and 'minmax' formats.
This function is marginally slower on average than `convert_coordinates()`,
probably because it involves more (unnecessary) arithmetic operations (unnecessary
because the two matrices are sparse).
For details please refer to the documentation of `convert_coordinates()`.
'''
ind = start_index
tensor1 = np.copy(tensor).astype(np.float)
if conversion == 'minmax2centroids':
M = np.array([[0.5, 0., -1., 0.],
[0.5, 0., 1., 0.],
[0., 0.5, 0., -1.],
[0., 0.5, 0., 1.]])
tensor1[..., ind:ind + 4] = np.dot(tensor1[..., ind:ind + 4], M)
elif conversion == 'centroids2minmax':
M = np.array([[1., 1., 0., 0.],
[0., 0., 1., 1.],
[-0.5, 0.5, 0., 0.],
[0., 0., -0.5, 0.5]]) # The multiplicative inverse of the matrix above
tensor1[..., ind:ind + 4] = np.dot(tensor1[..., ind:ind + 4], M)
else:
raise ValueError("Unexpected conversion value. Supported values are 'minmax2centroids' and 'centroids2minmax'.")
return tensor1
def intersection_area(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection areas of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the intersection areas for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the intersection areas for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the intersection area of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values with
the intersection areas of the boxes in `boxes1` and `boxes2`.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.", format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmin, ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmax, ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, :, 0] * side_lengths[:, :, 1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, 0] * side_lengths[:, 1]
def intersection_area_(boxes1, boxes2, coords='corners', mode='outer_product', border_pixels='half'):
'''
The same as 'intersection_area()' but for internal use, i.e. without all the safety checks.
'''
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the intersection areas.
if mode == 'outer_product':
# For all possible box combinations, get the greater xmin and ymin values.
# This is a tensor of shape (m,n,2).
min_xy = np.maximum(np.tile(np.expand_dims(boxes1[:, [xmin, ymin]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmin, ymin]], axis=0), reps=(m, 1, 1)))
# For all possible box combinations, get the smaller xmax and ymax values.
# This is a tensor of shape (m,n,2).
max_xy = np.minimum(np.tile(np.expand_dims(boxes1[:, [xmax, ymax]], axis=1), reps=(1, n, 1)),
np.tile(np.expand_dims(boxes2[:, [xmax, ymax]], axis=0), reps=(m, 1, 1)))
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, :, 0] * side_lengths[:, :, 1]
elif mode == 'element-wise':
min_xy = np.maximum(boxes1[:, [xmin, ymin]], boxes2[:, [xmin, ymin]])
max_xy = np.minimum(boxes1[:, [xmax, ymax]], boxes2[:, [xmax, ymax]])
# Compute the side lengths of the intersection rectangles.
side_lengths = np.maximum(0, max_xy - min_xy + d)
return side_lengths[:, 0] * side_lengths[:, 1]
def iou(boxes1, boxes2, coords='centroids', mode='outer_product', border_pixels='half'):
'''
Computes the intersection-over-union similarity (also known as Jaccard similarity)
of two sets of axis-aligned 2D rectangular boxes.
Let `boxes1` and `boxes2` contain `m` and `n` boxes, respectively.
In 'outer_product' mode, returns an `(m,n)` matrix with the IoUs for all possible
combinations of the boxes in `boxes1` and `boxes2`.
In 'element-wise' mode, `m` and `n` must be broadcast-compatible. Refer to the explanation
of the `mode` argument for details.
Arguments:
boxes1 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(m, 4)` containing the coordinates for `m` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes2`.
boxes2 (array): Either a 1D Numpy array of shape `(4, )` containing the coordinates for one box in the
format specified by `coords` or a 2D Numpy array of shape `(n, 4)` containing the coordinates for `n` boxes.
If `mode` is set to 'element_wise', the shape must be broadcast-compatible with `boxes1`.
coords (str, optional): The coordinate format in the input arrays. Can be either 'centroids' for the format
`(cx, cy, w, h)`, 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format
`(xmin, ymin, xmax, ymax)`.
mode (str, optional): Can be one of 'outer_product' and 'element-wise'. In 'outer_product' mode, returns an
`(m,n)` matrix with the IoU overlaps for all possible combinations of the `m` boxes in `boxes1` with the
`n` boxes in `boxes2`. In 'element-wise' mode, returns a 1D array and the shapes of `boxes1` and `boxes2`
must be boadcast-compatible. If both `boxes1` and `boxes2` have `m` boxes, then this returns an array of
length `m` where the i-th position contains the IoU overlap of `boxes1[i]` with `boxes2[i]`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A 1D or 2D Numpy array (refer to the `mode` argument for details) of dtype float containing values in [0,1],
the Jaccard similarity of the boxes in `boxes1` and `boxes2`. 0 means there is no overlap between two given
boxes, 1 means their coordinates are identical.
'''
# Make sure the boxes have the right shapes.
if boxes1.ndim > 2: raise ValueError("boxes1 must have rank either 1 or 2, but has rank {}.".format(boxes1.ndim))
if boxes2.ndim > 2: raise ValueError("boxes2 must have rank either 1 or 2, but has rank {}.".format(boxes2.ndim))
if boxes1.ndim == 1: boxes1 = np.expand_dims(boxes1, axis=0)
if boxes2.ndim == 1: boxes2 = np.expand_dims(boxes2, axis=0)
if not (boxes1.shape[1] == boxes2.shape[1] == 4): raise ValueError("All boxes must consist of 4 coordinates, but the boxes in `boxes1` and `boxes2` have {} and {} coordinates, respectively.".format(boxes1.shape[1], boxes2.shape[1]))
if not mode in {'outer_product', 'element-wise'}: raise ValueError("`mode` must be one of 'outer_product' and 'element-wise', but got '{}'.".format(mode))
# Convert the coordinates if necessary.
if coords == 'centroids':
boxes1 = convert_coordinates(boxes1, start_index=0, conversion='centroids2corners')
boxes2 = convert_coordinates(boxes2, start_index=0, conversion='centroids2corners')
coords = 'corners'
elif not (coords in {'minmax', 'corners'}):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
# Compute the IoU.
# Compute the interesection areas.
intersection_areas = intersection_area_(boxes1, boxes2, coords=coords, mode=mode)
m = boxes1.shape[0] # The number of boxes in `boxes1`
n = boxes2.shape[0] # The number of boxes in `boxes2`
# Compute the union areas.
# Set the correct coordinate indices for the respective formats.
if coords == 'corners':
xmin = 0
ymin = 1
xmax = 2
ymax = 3
elif coords == 'minmax':
xmin = 0
xmax = 1
ymin = 2
ymax = 3
if border_pixels == 'half':
d = 0
elif border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
if mode == 'outer_product':
boxes1_areas = np.tile(np.expand_dims((boxes1[:, xmax] - boxes1[:, xmin] + d) * (boxes1[:, ymax] - boxes1[:, ymin] + d), axis=1), reps=(1, n))
boxes2_areas = np.tile(np.expand_dims((boxes2[:, xmax] - boxes2[:, xmin] + d) * (boxes2[:, ymax] - boxes2[:, ymin] + d), axis=0), reps=(m, 1))
elif mode == 'element-wise':
boxes1_areas = (boxes1[:, xmax] - boxes1[:, xmin] + d) * (boxes1[:, ymax] - boxes1[:, ymin] + d)
boxes2_areas = (boxes2[:, xmax] - boxes2[:, xmin] + d) * (boxes2[:, ymax] - boxes2[:, ymin] + d)
union_areas = boxes1_areas + boxes2_areas - intersection_areas
return intersection_areas / union_areas
# ArchorBoxes pulled from https://github.com/pierluigiferrari/ssd_keras/blob/master/keras_layers/keras_layer_AnchorBoxes.py
# Modified by <NAME> for DLAE
class AnchorBoxes(keras.layers.Layer):
'''
A Keras layer to create an output tensor containing anchor box coordinates
and variances based on the input tensor and the passed arguments.
A set of 2D anchor boxes of different aspect ratios is created for each spatial unit of
the input tensor. The number of anchor boxes created per unit depends on the arguments
`aspect_ratios` and `two_boxes_for_ar1`, in the default case it is 4. The boxes
are parameterized by the coordinate tuple `(xmin, xmax, ymin, ymax)`.
The logic implemented by this layer is identical to the logic in the module
`ssd_box_encode_decode_utils.py`.
The purpose of having this layer in the network is to make the model self-sufficient
at inference time. Since the model is predicting offsets to the anchor boxes
(rather than predicting absolute box coordinates directly), one needs to know the anchor
box coordinates in order to construct the final prediction boxes from the predicted offsets.
If the model's output tensor did not contain the anchor box coordinates, the necessary
information to convert the predicted offsets back to absolute coordinates would be missing
in the model output. The reason why it is necessary to predict offsets to the anchor boxes
rather than to predict absolute box coordinates directly is explained in `README.md`.
Input shape:
4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.
Output shape:
5D tensor of shape `(batch, height, width, n_boxes, 8)`. The last axis contains
the four anchor box coordinates and the four variance values for each box.
'''
def __init__(self,
img_height,
img_width,
this_scale,
next_scale,
aspect_ratios=[0.5, 1.0, 2.0],
two_boxes_for_ar1=True,
this_steps=None,
this_offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
coords='centroids',
normalize_coords=False,
**kwargs):
'''
All arguments need to be set to the same values as in the box encoding process, otherwise the behavior is undefined.
Some of these arguments are explained in more detail in the documentation of the `SSDBoxEncoder` class.
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
this_scale (float): A float in [0, 1], the scaling factor for the size of the generated anchor boxes
as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
aspect_ratios (list, optional): The list of aspect ratios for which default boxes are to be
generated for this layer.
two_boxes_for_ar1 (bool, optional): Only relevant if `aspect_ratios` contains 1.
If `True`, two default boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
coords (str, optional): The box coordinate format to be used internally in the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width, and height),
'corners' for the format `(xmin, ymin, xmax, ymax)`, or 'minmax' for the format `(xmin, xmax, ymin, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model uses relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
'''
variances = np.array(variances)
self.img_height = img_height
self.img_width = img_width
self.this_scale = this_scale
self.next_scale = next_scale
self.aspect_ratios = aspect_ratios
self.two_boxes_for_ar1 = two_boxes_for_ar1
self.this_steps = this_steps
self.this_offsets = this_offsets
self.clip_boxes = clip_boxes
self.variances = variances
self.coords = coords
self.normalize_coords = normalize_coords
# Compute the number of boxes per cell
if (1 in aspect_ratios) and two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios) + 1
else:
self.n_boxes = len(aspect_ratios)
super(AnchorBoxes, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [keras.layers.InputSpec(shape=input_shape)]
super(AnchorBoxes, self).build(input_shape)
def call(self, x, mask=None):
'''
Return an anchor box tensor based on the shape of the input tensor.
The logic implemented here is identical to the logic in the module `ssd_box_encode_decode_utils.py`.
Note that this tensor does not participate in any graph computations at runtime. It is being created
as a constant once during graph creation and is just being output along with the rest of the model output
during runtime. Because of this, all logic is implemented as Numpy array operations and it is sufficient
to convert the resulting Numpy array into a Keras tensor at the very end before outputting it.
Arguments:
x (tensor): 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`
or `(batch, height, width, channels)` if `dim_ordering = 'tf'`. The input for this
layer must be the output of the localization predictor layer.
'''
# Compute box width and height for each aspect ratio
# The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in self.aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = self.this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale value and the next.
box_height = box_width = np.sqrt(self.this_scale * self.next_scale) * size
wh_list.append((box_width, box_height))
else:
box_height = self.this_scale * size / np.sqrt(ar)
box_width = self.this_scale * size * np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
# We need the shape of the input tensor
batch_size, feature_map_height, feature_map_width, feature_map_channels = x._keras_shape
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes, i.e. how far apart the anchor box center points will be vertically and horizontally.
if (self.this_steps is None):
step_height = self.img_height / feature_map_height
step_width = self.img_width / feature_map_width
else:
if isinstance(self.this_steps, (list, tuple)) and (len(self.this_steps) == 2):
step_height = self.this_steps[0]
step_width = self.this_steps[1]
elif isinstance(self.this_steps, (int, float)):
step_height = self.this_steps
step_width = self.this_steps
# Compute the offsets, i.e. at what pixel values the first anchor box center point will be from the top and from the left of the image.
if (self.this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(self.this_offsets, (list, tuple)) and (len(self.this_offsets) == 2):
offset_height = self.this_offsets[0]
offset_width = self.this_offsets[1]
elif isinstance(self.this_offsets, (int, float)):
offset_height = self.this_offsets
offset_width = self.this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height, (offset_height + feature_map_height - 1) * step_height, feature_map_height)
cx = np.linspace(offset_width * step_width, (offset_width + feature_map_width - 1) * step_width, feature_map_width)
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_height, feature_map_width, self.n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, self.n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, self.n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, xmax, ymin, ymax)`
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:,:,:,[0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:,:,:,[0, 2]] = x_coords
y_coords = boxes_tensor[:,:,:,[1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:,:,:,[1, 3]] = y_coords
# If `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth.
if self.coords == 'centroids':
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids', border_pixels='half')
elif self.coords == 'minmax':
# Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax', border_pixels='half')
# Create a tensor to contain the variances and append it to `boxes_tensor`. This tensor has the same shape
# as `boxes_tensor` and simply contains the same 4 variance values for every position in the last axis.
variances_tensor = np.zeros_like(boxes_tensor) # Has shape `(feature_map_height, feature_map_width, n_boxes, 4)`
variances_tensor += self.variances # Long live broadcasting
# Now `boxes_tensor` becomes a tensor of shape `(feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.concatenate((boxes_tensor, variances_tensor), axis=-1)
# Now prepend one dimension to `boxes_tensor` to account for the batch size and tile it along
# The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 8)`
boxes_tensor = np.expand_dims(boxes_tensor, axis=0)
boxes_tensor = keras.backend.tensorflow_backend.tile(keras.backend.tensorflow_backend.constant(boxes_tensor, dtype='float32'), (keras.backend.tensorflow_backend.shape(x)[0], 1, 1, 1, 1))
return boxes_tensor
def compute_output_shape(self, input_shape):
batch_size, feature_map_height, feature_map_width, feature_map_channels = input_shape
return (batch_size, feature_map_height, feature_map_width, self.n_boxes, 8)
def get_config(self):
config = {
'img_height': self.img_height,
'img_width': self.img_width,
'this_scale': self.this_scale,
'next_scale': self.next_scale,
'aspect_ratios': list(self.aspect_ratios),
'two_boxes_for_ar1': self.two_boxes_for_ar1,
'clip_boxes': self.clip_boxes,
'variances': list(self.variances),
'coords': self.coords,
'normalize_coords': self.normalize_coords
}
base_config = super(AnchorBoxes, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SSDInputEncoder:
'''
Transforms ground truth labels for object detection in images
(2D bounding box coordinates and class labels) to the format required for
training an SSD model.
In the process of encoding the ground truth labels, a template of anchor boxes
is being built, which are subsequently matched to the ground truth boxes
via an intersection-over-union threshold criterion.
'''
def __init__(self,
img_height,
img_width,
n_classes,
predictor_sizes,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=[0.5, 1.0, 2.0],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
border_pixels='half',
coords='centroids',
normalize_coords=True,
background_id=0):
'''
Arguments:
img_height (int): The height of the input images.
img_width (int): The width of the input images.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
predictor_sizes (list): A list of int-tuples of the format `(height, width)`
containing the output heights and widths of the convolutional predictor layers.
min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. Note that you should set the scaling factors
such that the resulting anchor box sizes correspond to the sizes of the objects you are trying
to detect. Must be >0.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. All scaling factors between the smallest and the
largest will be linearly interpolated. Note that the second to last of the linearly interpolated
scaling factors will actually be the scaling factor for the last predictor layer, while the last
scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
if `two_boxes_for_ar1` is `True`. Note that you should set the scaling factors
such that the resulting anchor box sizes correspond to the sizes of the objects you are trying
to detect. Must be greater than or equal to `min_scale`.
scales (list, optional): A list of floats >0 containing scaling factors per convolutional predictor layer.
This list must be one element longer than the number of predictor layers. The first `k` elements are the
scaling factors for the `k` predictor layers, while the last element is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
last scaling factor must be passed either way, even if it is not being used. If a list is passed,
this argument overrides `min_scale` and `max_scale`. All scaling factors must be greater than zero.
Note that you should set the scaling factors such that the resulting anchor box sizes correspond to
the sizes of the objects you are trying to detect.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
generated. This list is valid for all prediction layers. Note that you should set the aspect ratios such
that the resulting anchor box shapes roughly correspond to the shapes of the objects you are trying to detect.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each prediction layer.
If a list is passed, it overrides `aspect_ratios_global`. Note that you should set the aspect ratios such
that the resulting anchor box shapes very roughly correspond to the shapes of the objects you are trying to detect.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratios lists that contain 1. Will be ignored otherwise.
If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
If no steps are provided, then they will be computed such that the anchor box center points will form an
equidistant grid within the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either floats or tuples of two floats. These numbers represent for each predictor layer how many
pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
of the step size specified in the `steps` argument. If the list contains floats, then that value will
be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, limits the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
matching_type (str, optional): Can be either 'multi' or 'bipartite'. In 'bipartite' mode, each ground truth box will
be matched only to the one anchor box with the highest IoU overlap. In 'multi' mode, in addition to the aforementioned
bipartite matching, all anchor boxes with an IoU overlap greater than or equal to the `pos_iou_threshold` will be
matched to a given ground truth box.
pos_iou_threshold (float, optional): The intersection-over-union similarity threshold that must be
met in order to match a given ground truth box to a given anchor box.
neg_iou_limit (float, optional): The maximum allowed intersection-over-union similarity of an
anchor box with any ground truth box to be labeled a negative (i.e. background) box. If an
anchor box is neither a positive, nor a negative box, it will be ignored during training.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
coords (str, optional): The box coordinate format to be used internally by the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width,
and height), 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): If `True`, the encoder uses relative instead of absolute coordinates.
This means instead of using absolute tartget coordinates, the encoder will scale all coordinates to be within [0,1].
This way learning becomes independent of the input image size.
background_id (int, optional): Determines which class ID is for the background class.
'''
predictor_sizes = np.array(predictor_sizes)
if predictor_sizes.ndim == 1:
predictor_sizes = np.expand_dims(predictor_sizes, axis=0)
##################################################################################
# Handle exceptions.
##################################################################################
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if any(scales):
if (len(scales) != predictor_sizes.shape[0] + 1): # Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
raise ValueError("It must be either scales is None or len(scales) == len(predictor_sizes)+1, but len(scales) == {} and len(predictor_sizes)+1 == {}".format(len(scales), len(predictor_sizes)+1))
scales = np.array(scales)
if np.any(scales <= 0):
raise ValueError("All values in `scales` must be greater than 0, but the passed list of scales is {}".format(scales))
else: # If no list of scales was passed, we need to make sure that `min_scale` and `max_scale` are valid values.
if not 0 < min_scale <= max_scale:
raise ValueError("It must be 0 < min_scale <= max_scale, but it is min_scale = {} and max_scale = {}".format(min_scale, max_scale))
if not (aspect_ratios_per_layer is None):
if (len(aspect_ratios_per_layer) != predictor_sizes.shape[0]): # Must be two nested `if` statements since `list` and `bool` cannot be combined by `&`
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == len(predictor_sizes), but len(aspect_ratios_per_layer) == {} and len(predictor_sizes) == {}".format(len(aspect_ratios_per_layer), len(predictor_sizes)))
for aspect_ratios in aspect_ratios_per_layer:
if np.any(np.array(aspect_ratios) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
else:
if (aspect_ratios_global is None):
raise ValueError("At least one of `aspect_ratios_global` and `aspect_ratios_per_layer` must not be `None`.")
if np.any(np.array(aspect_ratios_global) <= 0):
raise ValueError("All aspect ratios must be greater than zero.")
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if not (coords == 'minmax' or coords == 'centroids' or coords == 'corners'):
raise ValueError("Unexpected value for `coords`. Supported values are 'minmax', 'corners' and 'centroids'.")
if (not (steps is None)) and (len(steps) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != predictor_sizes.shape[0]):
raise ValueError("You must provide at least one offset value per predictor layer.")
##################################################################################
# Set or compute members.
##################################################################################
self.img_height = img_height
self.img_width = img_width
self.n_classes = n_classes + 1 # + 1 for the background class
self.predictor_sizes = predictor_sizes
self.min_scale = min_scale
self.max_scale = max_scale
# If `scales` is None, compute the scaling factors by linearly interpolating between
# `min_scale` and `max_scale`. If an explicit list of `scales` is given, however,
# then it takes precedent over `min_scale` and `max_scale`.
if (scales is None):
self.scales = np.linspace(self.min_scale, self.max_scale, len(self.predictor_sizes)+1)
else:
# If a list of scales is given explicitly, we'll use that instead of computing it from `min_scale` and `max_scale`.
self.scales = scales
# If `aspect_ratios_per_layer` is None, then we use the same list of aspect ratios
# `aspect_ratios_global` for all predictor layers. If `aspect_ratios_per_layer` is given,
# however, then it takes precedent over `aspect_ratios_global`.
if (aspect_ratios_per_layer is None):
self.aspect_ratios = [aspect_ratios_global] * predictor_sizes.shape[0]
else:
# If aspect ratios are given per layer, we'll use those.
self.aspect_ratios = aspect_ratios_per_layer
self.two_boxes_for_ar1 = two_boxes_for_ar1
if not (steps is None):
self.steps = steps
else:
self.steps = [None] * predictor_sizes.shape[0]
if not (offsets is None):
self.offsets = offsets
else:
self.offsets = [None] * predictor_sizes.shape[0]
self.clip_boxes = clip_boxes
self.variances = variances
self.matching_type = matching_type
self.pos_iou_threshold = pos_iou_threshold
self.neg_iou_limit = neg_iou_limit
self.border_pixels = border_pixels
self.coords = coords
self.normalize_coords = normalize_coords
self.background_id = background_id
# Compute the number of boxes per spatial location for each predictor layer.
# For example, if a predictor layer has three different aspect ratios, [1.0, 0.5, 2.0], and is
# supposed to predict two boxes of slightly different size for aspect ratio 1.0, then that predictor
# layer predicts a total of four boxes at every spatial location across the feature map.
if not (aspect_ratios_per_layer is None):
self.n_boxes = []
for aspect_ratios in aspect_ratios_per_layer:
if (1 in aspect_ratios) & two_boxes_for_ar1:
self.n_boxes.append(len(aspect_ratios) + 1)
else:
self.n_boxes.append(len(aspect_ratios))
else:
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
self.n_boxes = len(aspect_ratios_global) + 1
else:
self.n_boxes = len(aspect_ratios_global)
##################################################################################
# Compute the anchor boxes for each predictor layer.
##################################################################################
# Compute the anchor boxes for each predictor layer. We only have to do this once
# since the anchor boxes depend only on the model configuration, not on the input data.
# For each predictor layer (i.e. for each scaling factor) the tensors for that layer's
# anchor boxes will have the shape `(feature_map_height, feature_map_width, n_boxes, 4)`.
self.boxes_list = [] # This will store the anchor boxes for each predicotr layer.
# The following lists just store diagnostic information. Sometimes it's handy to have the
# boxes' center points, heights, widths, etc. in a list.
self.wh_list_diag = [] # Box widths and heights for each predictor layer
self.steps_diag = [] # Horizontal and vertical distances between any two boxes for each predictor layer
self.offsets_diag = [] # Offsets for each predictor layer
self.centers_diag = [] # Anchor box center points as `(cy, cx)` for each predictor layer
# Iterate over all predictor layers and compute the anchor boxes for each one.
for i in range(len(self.predictor_sizes)):
boxes, center, wh, step, offset = self.generate_anchor_boxes_for_layer(feature_map_size=self.predictor_sizes[i],
aspect_ratios=self.aspect_ratios[i],
this_scale=self.scales[i],
next_scale=self.scales[i+1],
this_steps=self.steps[i],
this_offsets=self.offsets[i],
diagnostics=True)
self.boxes_list.append(boxes)
self.wh_list_diag.append(wh)
self.steps_diag.append(step)
self.offsets_diag.append(offset)
self.centers_diag.append(center)
def __call__(self, ground_truth_labels, diagnostics=False):
'''
Converts ground truth bounding box data into a suitable format to train an SSD model.
Arguments:
ground_truth_labels (list): A python list of length `batch_size` that contains one 2D Numpy array
for each batch image. Each such array has `k` rows for the `k` ground truth bounding boxes belonging
to the respective image, and the data for each ground truth bounding box has the format
`(class_id, xmin, ymin, xmax, ymax)` (i.e. the 'corners' coordinate format), and `class_id` must be
an integer greater than 0 for all boxes as class ID 0 is reserved for the background class.
diagnostics (bool, optional): If `True`, not only the encoded ground truth tensor will be returned,
but also a copy of it with anchor box coordinates in place of the ground truth coordinates.
This can be very useful if you want to visualize which anchor boxes got matched to which ground truth
boxes.
Returns:
`y_encoded`, a 3D numpy array of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)` that serves as the
ground truth label tensor for training, where `#boxes` is the total number of boxes predicted by the
model per image, and the classes are one-hot-encoded. The four elements after the class vecotrs in
the last axis are the box coordinates, the next four elements after that are just dummy elements, and
the last four elements are the variances.
'''
# Mapping to define which indices represent which coordinates in the ground truth.
class_id = 0
xmin = 1
ymin = 2
xmax = 3
ymax = 4
batch_size = len(ground_truth_labels)
##################################################################################
# Generate the template for y_encoded.
##################################################################################
y_encoded = self.generate_encoding_template(batch_size=batch_size, diagnostics=False)
##################################################################################
# Match ground truth boxes to anchor boxes.
##################################################################################
# Match the ground truth boxes to the anchor boxes. Every anchor box that does not have
# a ground truth match and for which the maximal IoU overlap with any ground truth box is less
# than or equal to `neg_iou_limit` will be a negative (background) box.
y_encoded[:, :, self.background_id] = 1 # All boxes are background boxes by default.
n_boxes = y_encoded.shape[1] # The total number of boxes that the model predicts per batch item
class_vectors = np.eye(self.n_classes) # An identity matrix that we'll use as one-hot class vectors
for i in range(batch_size): # For each batch item...
if ground_truth_labels[i].size == 0: continue # If there is no ground truth for this batch item, there is nothing to match.
labels = ground_truth_labels[i].astype(np.float) # The labels for this batch item
# Check for degenerate ground truth bounding boxes before attempting any computations.
if np.any(labels[:,[xmax]] - labels[:,[xmin]] <= 0) or np.any(labels[:,[ymax]] - labels[:,[ymin]] <= 0):
raise DegenerateBoxError("SSDInputEncoder detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, ".format(i, labels) +
"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. Degenerate ground truth " +
"bounding boxes will lead to NaN errors during the training.")
# Maybe normalize the box coordinates.
if self.normalize_coords:
labels[:,[ymin,ymax]] /= self.img_height # Normalize ymin and ymax relative to the image height
labels[:,[xmin,xmax]] /= self.img_width # Normalize xmin and xmax relative to the image width
# Maybe convert the box coordinate format.
if self.coords == 'centroids':
labels = convert_coordinates(labels, start_index=xmin, conversion='corners2centroids', border_pixels=self.border_pixels)
elif self.coords == 'minmax':
labels = convert_coordinates(labels, start_index=xmin, conversion='corners2minmax')
classes_one_hot = class_vectors[labels[:, class_id].astype(np.int)] # The one-hot class IDs for the ground truth boxes of this batch item
labels_one_hot = np.concatenate([classes_one_hot, labels[:, [xmin,ymin,xmax,ymax]]], axis=-1) # The one-hot version of the labels for this batch item
# Compute the IoU similarities between all anchor boxes and all ground truth boxes for this batch item.
# This is a matrix of shape `(num_ground_truth_boxes, num_anchor_boxes)`.
similarities = iou(labels[:,[xmin,ymin,xmax,ymax]], y_encoded[i,:,-12:-8], coords=self.coords, mode='outer_product', border_pixels=self.border_pixels)
# First: Do bipartite matching, i.e. match each ground truth box to the one anchor box with the highest IoU.
# This ensures that each ground truth box will have at least one good match.
# For each ground truth box, get the anchor box to match with it.
bipartite_matches = match_bipartite_greedy(weight_matrix=similarities)
# Write the ground truth data to the matched anchor boxes.
y_encoded[i, bipartite_matches, :-8] = labels_one_hot
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
similarities[:, bipartite_matches] = 0
# Second: Maybe do 'multi' matching, where each remaining anchor box will be matched to its most similar
# ground truth box with an IoU of at least `pos_iou_threshold`, or not matched if there is no
# such ground truth box.
if self.matching_type == 'multi':
# Get all matches that satisfy the IoU threshold.
matches = match_multi(weight_matrix=similarities, threshold=self.pos_iou_threshold)
# Write the ground truth data to the matched anchor boxes.
y_encoded[i, matches[1], :-8] = labels_one_hot[matches[0]]
# Set the columns of the matched anchor boxes to zero to indicate that they were matched.
similarities[:, matches[1]] = 0
# Third: Now after the matching is done, all negative (background) anchor boxes that have
# an IoU of `neg_iou_limit` or more with any ground truth box will be set to netral,
# i.e. they will no longer be background boxes. These anchors are "too close" to a
# ground truth box to be valid background boxes.
max_background_similarities = np.amax(similarities, axis=0)
neutral_boxes = np.nonzero(max_background_similarities >= self.neg_iou_limit)[0]
y_encoded[i, neutral_boxes, self.background_id] = 0
##################################################################################
# Convert box coordinates to anchor box offsets.
##################################################################################
if self.coords == 'centroids':
y_encoded[:,:,[-12,-11]] -= y_encoded[:,:,[-8,-7]] # cx(gt) - cx(anchor), cy(gt) - cy(anchor)
y_encoded[:,:,[-12,-11]] /= y_encoded[:,:,[-6,-5]] * y_encoded[:,:,[-4,-3]] # (cx(gt) - cx(anchor)) / w(anchor) / cx_variance, (cy(gt) - cy(anchor)) / h(anchor) / cy_variance
y_encoded[:,:,[-10,-9]] /= y_encoded[:,:,[-6,-5]] # w(gt) / w(anchor), h(gt) / h(anchor)
y_encoded[:,:,[-10,-9]] = np.log(y_encoded[:,:,[-10,-9]]) / y_encoded[:,:,[-2,-1]] # ln(w(gt) / w(anchor)) / w_variance, ln(h(gt) / h(anchor)) / h_variance (ln == natural logarithm)
elif self.coords == 'corners':
y_encoded[:,:,-12:-8] -= y_encoded[:,:,-8:-4] # (gt - anchor) for all four coordinates
y_encoded[:,:,[-12,-10]] /= np.expand_dims(y_encoded[:,:,-6] - y_encoded[:,:,-8], axis=-1) # (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:,:,[-11,-9]] /= np.expand_dims(y_encoded[:,:,-5] - y_encoded[:,:,-7], axis=-1) # (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:,:,-12:-8] /= y_encoded[:,:,-4:] # (gt - anchor) / size(anchor) / variance for all four coordinates, where 'size' refers to w and h respectively
elif self.coords == 'minmax':
y_encoded[:,:,-12:-8] -= y_encoded[:,:,-8:-4] # (gt - anchor) for all four coordinates
y_encoded[:,:,[-12,-11]] /= np.expand_dims(y_encoded[:,:,-7] - y_encoded[:,:,-8], axis=-1) # (xmin(gt) - xmin(anchor)) / w(anchor), (xmax(gt) - xmax(anchor)) / w(anchor)
y_encoded[:,:,[-10,-9]] /= np.expand_dims(y_encoded[:,:,-5] - y_encoded[:,:,-6], axis=-1) # (ymin(gt) - ymin(anchor)) / h(anchor), (ymax(gt) - ymax(anchor)) / h(anchor)
y_encoded[:,:,-12:-8] /= y_encoded[:,:,-4:] # (gt - anchor) / size(anchor) / variance for all four coordinates, where 'size' refers to w and h respectively
if diagnostics:
# Here we'll save the matched anchor boxes (i.e. anchor boxes that were matched to a ground truth box, but keeping the anchor box coordinates).
y_matched_anchors = np.copy(y_encoded)
y_matched_anchors[:,:,-12:-8] = 0 # Keeping the anchor box coordinates means setting the offsets to zero.
return y_encoded, y_matched_anchors
else:
return y_encoded
def generate_anchor_boxes_for_layer(self,
feature_map_size,
aspect_ratios,
this_scale,
next_scale,
this_steps=None,
this_offsets=None,
diagnostics=False):
'''
Computes an array of the spatial positions and sizes of the anchor boxes for one predictor layer
of size `feature_map_size == [feature_map_height, feature_map_width]`.
Arguments:
feature_map_size (tuple): A list or tuple `[feature_map_height, feature_map_width]` with the spatial
dimensions of the feature map for which to generate the anchor boxes.
aspect_ratios (list): A list of floats, the aspect ratios for which anchor boxes are to be generated.
All list elements must be unique.
this_scale (float): A float in [0, 1], the scaling factor for the size of the generate anchor boxes
as a fraction of the shorter side of the input image.
next_scale (float): A float in [0, 1], the next larger scaling factor. Only relevant if
`self.two_boxes_for_ar1 == True`.
diagnostics (bool, optional): If true, the following additional outputs will be returned:
1) A list of the center point `x` and `y` coordinates for each spatial location.
2) A list containing `(width, height)` for each box aspect ratio.
3) A tuple containing `(step_height, step_width)`
4) A tuple containing `(offset_height, offset_width)`
This information can be useful to understand in just a few numbers what the generated grid of
anchor boxes actually looks like, i.e. how large the different boxes are and how dense
their spatial distribution is, in order to determine whether the box grid covers the input images
appropriately and whether the box sizes are appropriate to fit the sizes of the objects
to be detected.
Returns:
A 4D Numpy tensor of shape `(feature_map_height, feature_map_width, n_boxes_per_cell, 4)` where the
last dimension contains `(xmin, xmax, ymin, ymax)` for each anchor box in each cell of the feature map.
'''
# Compute box width and height for each aspect ratio.
# The shorter side of the image will be used to compute `w` and `h` using `scale` and `aspect_ratios`.
size = min(self.img_height, self.img_width)
# Compute the box widths and and heights for all aspect ratios
wh_list = []
for ar in aspect_ratios:
if (ar == 1):
# Compute the regular anchor box for aspect ratio 1.
box_height = box_width = this_scale * size
wh_list.append((box_width, box_height))
if self.two_boxes_for_ar1:
# Compute one slightly larger version using the geometric mean of this scale value and the next.
box_height = box_width = np.sqrt(this_scale * next_scale) * size
wh_list.append((box_width, box_height))
else:
box_width = this_scale * size * np.sqrt(ar)
box_height = this_scale * size / np.sqrt(ar)
wh_list.append((box_width, box_height))
wh_list = np.array(wh_list)
n_boxes = len(wh_list)
# Compute the grid of box center points. They are identical for all aspect ratios.
# Compute the step sizes, i.e. how far apart the anchor box center points will be vertically and horizontally.
if (this_steps is None):
step_height = self.img_height / feature_map_size[0]
step_width = self.img_width / feature_map_size[1]
else:
if isinstance(this_steps, (list, tuple)) and (len(this_steps) == 2):
step_height = this_steps[0]
step_width = this_steps[1]
elif isinstance(this_steps, (int, float)):
step_height = this_steps
step_width = this_steps
# Compute the offsets, i.e. at what pixel values the first anchor box center point will be from the top and from the left of the image.
if (this_offsets is None):
offset_height = 0.5
offset_width = 0.5
else:
if isinstance(this_offsets, (list, tuple)) and (len(this_offsets) == 2):
offset_height = this_offsets[0]
offset_width = this_offsets[1]
elif isinstance(this_offsets, (int, float)):
offset_height = this_offsets
offset_width = this_offsets
# Now that we have the offsets and step sizes, compute the grid of anchor box center points.
cy = np.linspace(offset_height * step_height, (offset_height + feature_map_size[0] - 1) * step_height, feature_map_size[0])
cx = np.linspace(offset_width * step_width, (offset_width + feature_map_size[1] - 1) * step_width, feature_map_size[1])
cx_grid, cy_grid = np.meshgrid(cx, cy)
cx_grid = np.expand_dims(cx_grid, -1) # This is necessary for np.tile() to do what we want further down
cy_grid = np.expand_dims(cy_grid, -1) # This is necessary for np.tile() to do what we want further down
# Create a 4D tensor template of shape `(feature_map_height, feature_map_width, n_boxes, 4)`
# where the last dimension will contain `(cx, cy, w, h)`
boxes_tensor = np.zeros((feature_map_size[0], feature_map_size[1], n_boxes, 4))
boxes_tensor[:, :, :, 0] = np.tile(cx_grid, (1, 1, n_boxes)) # Set cx
boxes_tensor[:, :, :, 1] = np.tile(cy_grid, (1, 1, n_boxes)) # Set cy
boxes_tensor[:, :, :, 2] = wh_list[:, 0] # Set w
boxes_tensor[:, :, :, 3] = wh_list[:, 1] # Set h
# Convert `(cx, cy, w, h)` to `(xmin, ymin, xmax, ymax)`
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='centroids2corners')
# If `clip_boxes` is enabled, clip the coordinates to lie within the image boundaries
if self.clip_boxes:
x_coords = boxes_tensor[:,:,:,[0, 2]]
x_coords[x_coords >= self.img_width] = self.img_width - 1
x_coords[x_coords < 0] = 0
boxes_tensor[:,:,:,[0, 2]] = x_coords
y_coords = boxes_tensor[:,:,:,[1, 3]]
y_coords[y_coords >= self.img_height] = self.img_height - 1
y_coords[y_coords < 0] = 0
boxes_tensor[:,:,:,[1, 3]] = y_coords
# `normalize_coords` is enabled, normalize the coordinates to be within [0,1]
if self.normalize_coords:
boxes_tensor[:, :, :, [0, 2]] /= self.img_width
boxes_tensor[:, :, :, [1, 3]] /= self.img_height
# TODO: Implement box limiting directly for `(cx, cy, w, h)` so that we don't have to unnecessarily convert back and forth.
if self.coords == 'centroids':
# Convert `(xmin, ymin, xmax, ymax)` back to `(cx, cy, w, h)`.
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2centroids', border_pixels='half')
elif self.coords == 'minmax':
# Convert `(xmin, ymin, xmax, ymax)` to `(xmin, xmax, ymin, ymax).
boxes_tensor = convert_coordinates(boxes_tensor, start_index=0, conversion='corners2minmax', border_pixels='half')
if diagnostics:
return boxes_tensor, (cy, cx), wh_list, (step_height, step_width), (offset_height, offset_width)
else:
return boxes_tensor
def generate_encoding_template(self, batch_size, diagnostics=False):
'''
Produces an encoding template for the ground truth label tensor for a given batch.
Note that all tensor creation, reshaping and concatenation operations performed in this function
and the sub-functions it calls are identical to those performed inside the SSD model. This, of course,
must be the case in order to preserve the spatial meaning of each box prediction, but it's useful to make
yourself aware of this fact and why it is necessary.
In other words, the boxes in `y_encoded` must have a specific order in order correspond to the right spatial
positions and scales of the boxes predicted by the model. The sequence of operations here ensures that `y_encoded`
has this specific form.
Arguments:
batch_size (int): The batch size.
diagnostics (bool, optional): See the documnentation for `generate_anchor_boxes()`. The diagnostic output
here is similar, just for all predictor conv layers.
Returns:
A Numpy array of shape `(batch_size, #boxes, #classes + 12)`, the template into which to encode
the ground truth labels for training. The last axis has length `#classes + 12` because the model
output contains not only the 4 predicted box coordinate offsets, but also the 4 coordinates for
the anchor boxes and the 4 variance values.
'''
# Tile the anchor boxes for each predictor layer across all batch items.
boxes_batch = []
for boxes in self.boxes_list:
# Prepend one dimension to `self.boxes_list` to account for the batch size and tile it along.
# The result will be a 5D tensor of shape `(batch_size, feature_map_height, feature_map_width, n_boxes, 4)`
boxes = np.expand_dims(boxes, axis=0)
boxes = np.tile(boxes, (batch_size, 1, 1, 1, 1))
# Now reshape the 5D tensor above into a 3D tensor of shape
# `(batch, feature_map_height * feature_map_width * n_boxes, 4)`. The resulting
# order of the tensor content will be identical to the order obtained from the reshaping operation
# in our Keras model (we're using the Tensorflow backend, and tf.reshape() and np.reshape()
# use the same default index order, which is C-like index ordering)
boxes = np.reshape(boxes, (batch_size, -1, 4))
boxes_batch.append(boxes)
# Concatenate the anchor tensors from the individual layers to one.
boxes_tensor = np.concatenate(boxes_batch, axis=1)
# 3: Create a template tensor to hold the one-hot class encodings of shape `(batch, #boxes, #classes)`
# It will contain all zeros for now, the classes will be set in the matching process that follows
classes_tensor = np.zeros((batch_size, boxes_tensor.shape[1], self.n_classes))
# 4: Create a tensor to contain the variances. This tensor has the same shape as `boxes_tensor` and simply
# contains the same 4 variance values for every position in the last axis.
variances_tensor = np.zeros_like(boxes_tensor)
variances_tensor += self.variances # Long live broadcasting
# 4: Concatenate the classes, boxes and variances tensors to get our final template for y_encoded. We also need
# another tensor of the shape of `boxes_tensor` as a space filler so that `y_encoding_template` has the same
# shape as the SSD model output tensor. The content of this tensor is irrelevant, we'll just use
# `boxes_tensor` a second time.
y_encoding_template = np.concatenate((classes_tensor, boxes_tensor, boxes_tensor, variances_tensor), axis=2)
if diagnostics:
return y_encoding_template, self.centers_diag, self.wh_list_diag, self.steps_diag, self.offsets_diag
else:
return y_encoding_template
def match_bipartite_greedy(weight_matrix):
'''
Returns a bipartite matching according to the given weight matrix.
The algorithm works as follows:
Let the first axis of `weight_matrix` represent ground truth boxes
and the second axis anchor boxes.
The ground truth box that has the greatest similarity with any
anchor box will be matched first, then out of the remaining ground
truth boxes, the ground truth box that has the greatest similarity
with any of the remaining anchor boxes will be matched second, and
so on. That is, the ground truth boxes will be matched in descending
order by maximum similarity with any of the respectively remaining
anchor boxes.
The runtime complexity is O(m^2 * n), where `m` is the number of
ground truth boxes and `n` is the number of anchor boxes.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
Returns:
A 1D Numpy array of length `weight_matrix.shape[0]` that represents
the matched index along the second axis of `weight_matrix` for each index
along the first axis.
'''
weight_matrix = np.copy(weight_matrix) # We'll modify this array.
num_ground_truth_boxes = weight_matrix.shape[0]
all_gt_indices = list(range(num_ground_truth_boxes)) # Only relevant for fancy-indexing below.
# This 1D array will contain for each ground truth box the index of
# the matched anchor box.
matches = np.zeros(num_ground_truth_boxes, dtype=np.int)
# In each iteration of the loop below, exactly one ground truth box
# will be matched to one anchor box.
for _ in range(num_ground_truth_boxes):
# Find the maximal anchor-ground truth pair in two steps: First, reduce
# over the anchor boxes and then reduce over the ground truth boxes.
anchor_indices = np.argmax(weight_matrix, axis=1) # Reduce along the anchor box axis.
overlaps = weight_matrix[all_gt_indices, anchor_indices]
ground_truth_index = np.argmax(overlaps) # Reduce along the ground truth box axis.
anchor_index = anchor_indices[ground_truth_index]
matches[ground_truth_index] = anchor_index # Set the match.
# Set the row of the matched ground truth box and the column of the matched
# anchor box to all zeros. This ensures that those boxes will not be matched again,
# because they will never be the best matches for any other boxes.
weight_matrix[ground_truth_index] = 0
weight_matrix[:,anchor_index] = 0
return matches
def match_multi(weight_matrix, threshold):
'''
Matches all elements along the second axis of `weight_matrix` to their best
matches along the first axis subject to the constraint that the weight of a match
must be greater than or equal to `threshold` in order to produce a match.
If the weight matrix contains elements that should be ignored, the row or column
representing the respective elemet should be set to a value below `threshold`.
Arguments:
weight_matrix (array): A 2D Numpy array that represents the weight matrix
for the matching process. If `(m,n)` is the shape of the weight matrix,
it must be `m <= n`. The weights can be integers or floating point
numbers. The matching process will maximize, i.e. larger weights are
preferred over smaller weights.
threshold (float): A float that represents the threshold (i.e. lower bound)
that must be met by a pair of elements to produce a match.
Returns:
Two 1D Numpy arrays of equal length that represent the matched indices. The first
array contains the indices along the first axis of `weight_matrix`, the second array
contains the indices along the second axis.
'''
num_anchor_boxes = weight_matrix.shape[1]
all_anchor_indices = list(range(num_anchor_boxes)) # Only relevant for fancy-indexing below.
# Find the best ground truth match for every anchor box.
ground_truth_indices = np.argmax(weight_matrix, axis=0) # Array of shape (weight_matrix.shape[1],)
overlaps = weight_matrix[ground_truth_indices, all_anchor_indices] # Array of shape (weight_matrix.shape[1],)
# Filter out the matches with a weight below the threshold.
anchor_indices_thresh_met = np.nonzero(overlaps >= threshold)[0]
gt_indices_thresh_met = ground_truth_indices[anchor_indices_thresh_met]
return gt_indices_thresh_met, anchor_indices_thresh_met
class BoxFilter:
'''
Returns all bounding boxes that are valid with respect to a the defined criteria.
'''
def __init__(self,
check_overlap=True,
check_min_area=True,
check_degenerate=True,
overlap_criterion='center_point',
overlap_bounds=(0.3, 1.0),
min_area=16,
labels_format={'class_id': 0, 'xmin': 1, 'ymin': 2, 'xmax': 3, 'ymax': 4},
border_pixels='half'):
'''
Arguments:
check_overlap (bool, optional): Whether or not to enforce the overlap requirements defined by
`overlap_criterion` and `overlap_bounds`. Sometimes you might want to use the box filter only
to enforce a certain minimum area for all boxes (see next argument), in such cases you can
turn the overlap requirements off.
check_min_area (bool, optional): Whether or not to enforce the minimum area requirement defined
by `min_area`. If `True`, any boxes that have an area (in pixels) that is smaller than `min_area`
will be removed from the labels of an image. Bounding boxes below a certain area aren't useful
training examples. An object that takes up only, say, 5 pixels in an image is probably not
recognizable anymore, neither for a human, nor for an object detection model. It makes sense
to remove such boxes.
check_degenerate (bool, optional): Whether or not to check for and remove degenerate bounding boxes.
Degenerate bounding boxes are boxes that have `xmax <= xmin` and/or `ymax <= ymin`. In particular,
boxes with a width and/or height of zero are degenerate. It is obviously important to filter out
such boxes, so you should only set this option to `False` if you are certain that degenerate
boxes are not possible in your data and processing chain.
overlap_criterion (str, optional): Can be either of 'center_point', 'iou', or 'area'. Determines
which boxes are considered valid with respect to a given image. If set to 'center_point',
a given bounding box is considered valid if its center point lies within the image.
If set to 'area', a given bounding box is considered valid if the quotient of its intersection
area with the image and its own area is within the given `overlap_bounds`. If set to 'iou', a given
bounding box is considered valid if its IoU with the image is within the given `overlap_bounds`.
overlap_bounds (list or BoundGenerator, optional): Only relevant if `overlap_criterion` is 'area' or 'iou'.
Determines the lower and upper bounds for `overlap_criterion`. Can be either a 2-tuple of scalars
representing a lower bound and an upper bound, or a `BoundGenerator` object, which provides
the possibility to generate bounds randomly.
min_area (int, optional): Only relevant if `check_min_area` is `True`. Defines the minimum area in
pixels that a bounding box must have in order to be valid. Boxes with an area smaller than this
will be removed.
labels_format (dict, optional): A dictionary that defines which index in the last axis of the labels
of an image contains which bounding box coordinate. The dictionary maps at least the keywords
'xmin', 'ymin', 'xmax', and 'ymax' to their respective indices within last axis of the labels array.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
'''
if not isinstance(overlap_bounds, (list, tuple, BoundGenerator)):
raise ValueError("`overlap_bounds` must be either a 2-tuple of scalars or a `BoundGenerator` object.")
if isinstance(overlap_bounds, (list, tuple)) and (overlap_bounds[0] > overlap_bounds[1]):
raise ValueError("The lower bound must not be greater than the upper bound.")
if not (overlap_criterion in {'iou', 'area', 'center_point'}):
raise ValueError("`overlap_criterion` must be one of 'iou', 'area', or 'center_point'.")
self.overlap_criterion = overlap_criterion
self.overlap_bounds = overlap_bounds
self.min_area = min_area
self.check_overlap = check_overlap
self.check_min_area = check_min_area
self.check_degenerate = check_degenerate
self.labels_format = labels_format
self.border_pixels = border_pixels
def __call__(self,
labels,
image_height=None,
image_width=None):
'''
Arguments:
labels (array): The labels to be filtered. This is an array with shape `(m,n)`, where
`m` is the number of bounding boxes and `n` is the number of elements that defines
each bounding box (box coordinates, class ID, etc.). The box coordinates are expected
to be in the image's coordinate system.
image_height (int): Only relevant if `check_overlap == True`. The height of the image
(in pixels) to compare the box coordinates to.
image_width (int): `check_overlap == True`. The width of the image (in pixels) to compare
the box coordinates to.
Returns:
An array containing the labels of all boxes that are valid.
'''
labels = np.copy(labels)
xmin = self.labels_format['xmin']
ymin = self.labels_format['ymin']
xmax = self.labels_format['xmax']
ymax = self.labels_format['ymax']
# Record the boxes that pass all checks here.
requirements_met = np.ones(shape=labels.shape[0], dtype=np.bool)
if self.check_degenerate:
non_degenerate = (labels[:,xmax] > labels[:,xmin]) * (labels[:,ymax] > labels[:,ymin])
requirements_met *= non_degenerate
if self.check_min_area:
min_area_met = (labels[:,xmax] - labels[:,xmin]) * (labels[:,ymax] - labels[:,ymin]) >= self.min_area
requirements_met *= min_area_met
if self.check_overlap:
# Get the lower and upper bounds.
if isinstance(self.overlap_bounds, BoundGenerator):
lower, upper = self.overlap_bounds()
else:
lower, upper = self.overlap_bounds
# Compute which boxes are valid.
if self.overlap_criterion == 'iou':
# Compute the patch coordinates.
image_coords = np.array([0, 0, image_width, image_height])
# Compute the IoU between the patch and all of the ground truth boxes.
image_boxes_iou = iou(image_coords, labels[:, [xmin, ymin, xmax, ymax]], coords='corners', mode='element-wise', border_pixels=self.border_pixels)
requirements_met *= (image_boxes_iou > lower) * (image_boxes_iou <= upper)
elif self.overlap_criterion == 'area':
if self.border_pixels == 'half':
d = 0
elif self.border_pixels == 'include':
d = 1 # If border pixels are supposed to belong to the bounding boxes, we have to add one pixel to any difference `xmax - xmin` or `ymax - ymin`.
elif self.border_pixels == 'exclude':
d = -1 # If border pixels are not supposed to belong to the bounding boxes, we have to subtract one pixel from any difference `xmax - xmin` or `ymax - ymin`.
# Compute the areas of the boxes.
box_areas = (labels[:,xmax] - labels[:,xmin] + d) * (labels[:,ymax] - labels[:,ymin] + d)
# Compute the intersection area between the patch and all of the ground truth boxes.
clipped_boxes = np.copy(labels)
clipped_boxes[:,[ymin,ymax]] = np.clip(labels[:,[ymin,ymax]], a_min=0, a_max=image_height-1)
clipped_boxes[:,[xmin,xmax]] = np.clip(labels[:,[xmin,xmax]], a_min=0, a_max=image_width-1)
intersection_areas = (clipped_boxes[:,xmax] - clipped_boxes[:,xmin] + d) * (clipped_boxes[:,ymax] - clipped_boxes[:,ymin] + d) # +1 because the border pixels belong to the box areas.
# Check which boxes meet the overlap requirements.
if lower == 0.0:
mask_lower = intersection_areas > lower * box_areas # If `self.lower == 0`, we want to make sure that boxes with area 0 don't count, hence the ">" sign instead of the ">=" sign.
else:
mask_lower = intersection_areas >= lower * box_areas # Especially for the case `self.lower == 1` we want the ">=" sign, otherwise no boxes would count at all.
mask_upper = intersection_areas <= upper * box_areas
requirements_met *= mask_lower * mask_upper
elif self.overlap_criterion == 'center_point':
# Compute the center points of the boxes.
cy = (labels[:,ymin] + labels[:,ymax]) / 2
cx = (labels[:,xmin] + labels[:,xmax]) / 2
# Check which of the boxes have center points within the cropped patch remove those that don't.
requirements_met *= (cy >= 0.0) * (cy <= image_height-1) * (cx >= 0.0) * (cx <= image_width-1)
return labels[requirements_met]
class BoundGenerator:
'''
Generates pairs of floating point values that represent lower and upper bounds
from a given sample space.
'''
def __init__(self,
sample_space=((0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, None)),
weights=None):
'''
Arguments:
sample_space (list or tuple): A list, tuple, or array-like object of shape
`(n, 2)` that contains `n` samples to choose from, where each sample
is a 2-tuple of scalars and/or `None` values.
weights (list or tuple, optional): A list or tuple representing the distribution
over the sample space. If `None`, a uniform distribution will be assumed.
'''
if (not (weights is None)) and len(weights) != len(sample_space):
raise ValueError("`weights` must either be `None` for uniform distribution or have the same length as `sample_space`.")
self.sample_space = []
for bound_pair in sample_space:
if len(bound_pair) != 2:
raise ValueError("All elements of the sample space must be 2-tuples.")
bound_pair = list(bound_pair)
if bound_pair[0] is None: bound_pair[0] = 0.0
if bound_pair[1] is None: bound_pair[1] = 1.0
if bound_pair[0] > bound_pair[1]:
raise ValueError("For all sample space elements, the lower bound cannot be greater than the upper bound.")
self.sample_space.append(bound_pair)
self.sample_space_size = len(self.sample_space)
if weights is None:
self.weights = [1.0/self.sample_space_size] * self.sample_space_size
else:
self.weights = weights
def __call__(self):
'''
Returns:
An item of the sample space, i.e. a 2-tuple of scalars.
'''
i = np.random.choice(self.sample_space_size, p=self.weights)
return self.sample_space[i]
def _greedy_nms(predictions, iou_threshold=0.45, coords='corners', border_pixels='half'):
'''
The same greedy non-maximum suppression algorithm as above, but slightly modified for use as an internal
function for per-class NMS in `decode_detections()`.
'''
boxes_left = np.copy(predictions)
maxima = [] # This is where we store the boxes that make it through the non-maximum suppression
while boxes_left.shape[0] > 0: # While there are still boxes left to compare...
maximum_index = np.argmax(boxes_left[:,0]) # ...get the index of the next box with the highest confidence...
maximum_box = np.copy(boxes_left[maximum_index]) # ...copy that box and...
maxima.append(maximum_box) # ...append it to `maxima` because we'll definitely keep it
boxes_left = np.delete(boxes_left, maximum_index, axis=0) # Now remove the maximum box from `boxes_left`
if boxes_left.shape[0] == 0: break # If there are no boxes left after this step, break. Otherwise...
similarities = iou(boxes_left[:,1:], maximum_box[1:], coords=coords, mode='element-wise', border_pixels=border_pixels) # ...compare (IoU) the other left over boxes to the maximum box...
boxes_left = boxes_left[similarities <= iou_threshold] # ...so that we can remove the ones that overlap too much with the maximum box
return np.array(maxima)
def decode_detections(y_pred,
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
input_coords='centroids',
normalize_coords=True,
img_height=None,
img_width=None,
border_pixels='half'):
'''
Convert model prediction output back to a format that contains only the positive box predictions
(i.e. the same format that `SSDInputEncoder` takes as input).
After the decoding, two stages of prediction filtering are performed for each class individually:
First confidence thresholding, then greedy non-maximum suppression. The filtering results for all
classes are concatenated and the `top_k` overall highest confidence results constitute the final
predictions for a given batch item. This procedure follows the original Caffe implementation.
For a slightly different and more efficient alternative to decode raw model output that performs
non-maximum suppresion globally instead of per class, see `decode_detections_fast()` below.
Arguments:
y_pred (array): The prediction output of the SSD model, expected to be a Numpy array
of shape `(batch_size, #boxes, #classes + 4 + 4 + 4)`, where `#boxes` is the total number of
boxes predicted by the model per image and the last axis contains
`[one-hot vector for the classes, 4 predicted coordinate offsets, 4 anchor box coordinates, 4 variances]`.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes with a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
input_coords (str, optional): The box coordinate format that the model outputs. Can be either 'centroids'
for the format `(cx, cy, w, h)` (box center coordinates, width, and height), 'minmax' for the format
`(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model outputs relative coordinates (i.e. coordinates in [0,1])
and you wish to transform these relative coordinates back to absolute coordinates. If the model outputs
relative coordinates, but you do not want to convert them back to absolute coordinates, set this to `False`.
Do not set this to `True` if the model already outputs absolute coordinates, as that would result in incorrect
coordinates. Requires `img_height` and `img_width` if set to `True`.
img_height (int, optional): The height of the input images. Only needed if `normalize_coords` is `True`.
img_width (int, optional): The width of the input images. Only needed if `normalize_coords` is `True`.
border_pixels (str, optional): How to treat the border pixels of the bounding boxes.
Can be 'include', 'exclude', or 'half'. If 'include', the border pixels belong
to the boxes. If 'exclude', the border pixels do not belong to the boxes.
If 'half', then one of each of the two horizontal and vertical borders belong
to the boxex, but not the other.
Returns:
A python list of length `batch_size` where each list element represents the predicted boxes
for one image and contains a Numpy array of shape `(boxes, 6)` where each row is a box prediction for
a non-background class for the respective image in the format `[class_id, confidence, xmin, ymin, xmax, ymax]`.
'''
if normalize_coords and ((img_height is None) or (img_width is None)):
raise ValueError("If relative box coordinates are supposed to be converted to absolute coordinates, the decoder needs the image size in order to decode the predictions, but `img_height == {}` and `img_width == {}`".format(img_height, img_width))
# 1: Convert the box coordinates from the predicted anchor box offsets to predicted absolute coordinates
y_pred_decoded_raw = np.copy(y_pred[:,:,:-8]) # Slice out the classes and the four offsets, throw away the anchor coordinates and variances, resulting in a tensor of shape `[batch, n_boxes, n_classes + 4 coordinates]`
if input_coords == 'centroids':
y_pred_decoded_raw[:,:,[-2,-1]] = np.exp(y_pred_decoded_raw[:,:,[-2,-1]] * y_pred[:,:,[-2,-1]]) # exp(ln(w(pred)/w(anchor)) / w_variance * w_variance) == w(pred) / w(anchor), exp(ln(h(pred)/h(anchor)) / h_variance * h_variance) == h(pred) / h(anchor)
y_pred_decoded_raw[:,:,[-2,-1]] *= y_pred[:,:,[-6,-5]] # (w(pred) / w(anchor)) * w(anchor) == w(pred), (h(pred) / h(anchor)) * h(anchor) == h(pred)
y_pred_decoded_raw[:,:,[-4,-3]] *= y_pred[:,:,[-4,-3]] * y_pred[:,:,[-6,-5]] # (delta_cx(pred) / w(anchor) / cx_variance) * cx_variance * w(anchor) == delta_cx(pred), (delta_cy(pred) / h(anchor) / cy_variance) * cy_variance * h(anchor) == delta_cy(pred)
y_pred_decoded_raw[:,:,[-4,-3]] += y_pred[:,:,[-8,-7]] # delta_cx(pred) + cx(anchor) == cx(pred), delta_cy(pred) + cy(anchor) == cy(pred)
y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='centroids2corners')
elif input_coords == 'minmax':
y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively
y_pred_decoded_raw[:,:,[-4,-3]] *= np.expand_dims(y_pred[:,:,-7] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)
y_pred_decoded_raw[:,:,[-2,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-6], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)
y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates
y_pred_decoded_raw = convert_coordinates(y_pred_decoded_raw, start_index=-4, conversion='minmax2corners')
elif input_coords == 'corners':
y_pred_decoded_raw[:,:,-4:] *= y_pred[:,:,-4:] # delta(pred) / size(anchor) / variance * variance == delta(pred) / size(anchor) for all four coordinates, where 'size' refers to w or h, respectively
y_pred_decoded_raw[:,:,[-4,-2]] *= np.expand_dims(y_pred[:,:,-6] - y_pred[:,:,-8], axis=-1) # delta_xmin(pred) / w(anchor) * w(anchor) == delta_xmin(pred), delta_xmax(pred) / w(anchor) * w(anchor) == delta_xmax(pred)
y_pred_decoded_raw[:,:,[-3,-1]] *= np.expand_dims(y_pred[:,:,-5] - y_pred[:,:,-7], axis=-1) # delta_ymin(pred) / h(anchor) * h(anchor) == delta_ymin(pred), delta_ymax(pred) / h(anchor) * h(anchor) == delta_ymax(pred)
y_pred_decoded_raw[:,:,-4:] += y_pred[:,:,-8:-4] # delta(pred) + anchor == pred for all four coordinates
else:
raise ValueError("Unexpected value for `input_coords`. Supported input coordinate formats are 'minmax', 'corners' and 'centroids'.")
# 2: If the model predicts normalized box coordinates and they are supposed to be converted back to absolute coordinates, do that
if normalize_coords:
y_pred_decoded_raw[:,:,[-4,-2]] *= img_width # Convert xmin, xmax back to absolute coordinates
y_pred_decoded_raw[:,:,[-3,-1]] *= img_height # Convert ymin, ymax back to absolute coordinates
# 3: Apply confidence thresholding and non-maximum suppression per class
n_classes = y_pred_decoded_raw.shape[-1] - 4 # The number of classes is the length of the last axis minus the four box coordinates
y_pred_decoded = [] # Store the final predictions in this list
for batch_item in y_pred_decoded_raw: # `batch_item` has shape `[n_boxes, n_classes + 4 coords]`
pred = [] # Store the final predictions for this batch item here
for class_id in range(1, n_classes): # For each class except the background class (which has class ID 0)...
single_class = batch_item[:,[class_id, -4, -3, -2, -1]] # ...keep only the confidences for that class, making this an array of shape `[n_boxes, 5]` and...
threshold_met = single_class[single_class[:,0] > confidence_thresh] # ...keep only those boxes with a confidence above the set threshold.
if threshold_met.shape[0] > 0: # If any boxes made the threshold...
maxima = _greedy_nms(threshold_met, iou_threshold=iou_threshold, coords='corners', border_pixels=border_pixels) # ...perform NMS on them.
maxima_output = np.zeros((maxima.shape[0], maxima.shape[1] + 1)) # Expand the last dimension by one element to have room for the class ID. This is now an arrray of shape `[n_boxes, 6]`
maxima_output[:,0] = class_id # Write the class ID to the first column...
maxima_output[:,1:] = maxima # ...and write the maxima to the other columns...
pred.append(maxima_output) # ...and append the maxima for this class to the list of maxima for this batch item.
# Once we're through with all classes, keep only the `top_k` maxima with the highest scores
if pred: # If there are any predictions left after confidence-thresholding...
pred = np.concatenate(pred, axis=0)
if top_k != 'all' and pred.shape[0] > top_k: # If we have more than `top_k` results left at this point, otherwise there is nothing to filter,...
top_k_indices =
|
np.argpartition(pred[:,1], kth=pred.shape[0]-top_k, axis=0)
|
numpy.argpartition
|
"""
Copyright (C) 2018 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import sys
import numpy as np
from sklearn.svm import SVC
from bisect import bisect_left
from sklearn.decomposition import PCA
class PropensityBatch(object):
def propensity_list_is_initialised(self):
return self.treatment_lists is not None
def make_propensity_lists(self, train_ids, benchmark):
from perfect_match.models.benchmarks.tcga_benchmark import TCGABenchmark
input_data, ids, pair_data = benchmark.get_data_access().get_rows(train_ids)
assignments = map(benchmark.get_assignment, ids, input_data)
treatment_data, batch_y = zip(*assignments)
treatment_data =
|
np.array(treatment_data)
|
numpy.array
|
import random
import h5py
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import convolve
from io import BytesIO
import copy
class TrainDataset(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class TrainDataset256(Dataset):
def __init__(self, file_path, patch_size, scale, aug=False, colorization=False, completion=False):
super(TrainDataset256, self).__init__()
self.files = ParseFile(file_path)
self.patch_size = patch_size
self.scale = scale
self.aug = aug
self.colorization = colorization
self.completion = completion
@staticmethod
def random_crop(lr, hr, size, scale):
lr_left = random.randint(0, lr.shape[1] - size)
lr_right = lr_left + size
lr_top = random.randint(0, lr.shape[0] - size)
lr_bottom = lr_top + size
hr_left = lr_left * scale
hr_right = lr_right * scale
hr_top = lr_top * scale
hr_bottom = lr_bottom * scale
lr = lr[lr_top:lr_bottom, lr_left:lr_right]
hr = hr[hr_top:hr_bottom, hr_left:hr_right]
return lr, hr
@staticmethod
def random_horizontal_flip(lr, hr):
if random.random() < 0.5:
lr = lr[:, ::-1, :].copy()
hr = hr[:, ::-1, :].copy()
return lr, hr
@staticmethod
def random_vertical_flip(lr, hr):
if random.random() < 0.5:
lr = lr[::-1, :, :].copy()
hr = hr[::-1, :, :].copy()
return lr, hr
# im is an numpy float/double array
@staticmethod
def add_gaussian_noise(im, std):
noise = np.random.normal(0,std,im.shape)
im = im + noise
return im
# im is read from PIL.Image.open
@staticmethod
def jpeg(im, jpeg_quality):
buffer = BytesIO()
im.save(buffer, 'jpeg', quality = jpeg_quality)
im = Image.open(buffer)
return im
@staticmethod
def random_rotate_90(lr, hr):
if random.random() < 0.5:
lr = np.rot90(lr, axes=(1, 0)).copy()
hr = np.rot90(hr, axes=(1, 0)).copy()
return lr, hr
def __getitem__(self, idx):
img = Image.open(self.files[idx])
img = img.resize((256 , 256), resample=Image.BICUBIC)
img2 = img.copy()
hr = np.array(img).astype('float')
if self.aug and np.random.uniform(0,1) > 0.7071:
img2 = self.jpeg(img2, int(np.random.choice(np.arange(25, 75))))
#print('agument jpeg')
hr2 = np.array(img2).astype('float')
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] , np.ones((15,15)).astype('float')/225)
hr2[:,:,2] = convolve(hr2[:,:,2] , np.ones((15,15)).astype('float')/225)
lr = 0
for i in range(self.scale):
for j in range(self.scale):
lr = lr + hr[i::self.scale, j::self.scale] / (self.scale * self.scale)
lr, hr = self.random_crop(lr, hr, self.patch_size, self.scale)
lr, hr = self.random_horizontal_flip(lr, hr)
lr, hr = self.random_vertical_flip(lr, hr)
lr, hr = self.random_rotate_90(lr, hr)
if self.aug and np.random.uniform(0,1) > 0.7071:
lr = self.add_gaussian_noise(lr, np.random.uniform(0,10))
#print('augment noising')
lr = lr.astype(np.float32).transpose([2, 0, 1]) / 255.0
hr = hr.astype(np.float32).transpose([2, 0, 1]) / 255.0
if self.completion and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
lr[0,mask] = 0
lr[1,mask] = 0
lr[2,mask] = 0
if self.colorization and np.random.uniform(0,1) > 0.7071:
dims = lr.shape
mask = np.random.uniform(0,1,(dims[1],dims[2]))
mask = mask < np.random.uniform(0.05,0.15)
tmp = lr.mean(axis=0)
for i_dim in range(dims[0]):
lr[i_dim,mask] = tmp[mask]
return lr, hr
def __len__(self):
return len(self.files)
class EvalDataset(Dataset):
def __init__(self, file_path, scale):
super(EvalDataset, self).__init__()
self.files = ParseFile(file_path)
self.scale = scale
def __getitem__(self, idx):
hr = np.array(Image.open(self.files[idx])).astype('float')
hr2 = hr.copy()
hr2[:,:,0] = convolve(hr2[:,:,0] , np.ones((15,15)).astype('float')/225)
hr2[:,:,1] = convolve(hr2[:,:,1] ,
|
np.ones((15,15))
|
numpy.ones
|
import numpy as np
from numpy.random import choice
import random
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from numba import jit,njit
##standing wave solution of Fisher wave
## given ab array with counts and an array withprobabilities return index from first array
# faster than np.random.choice for smallish arrays
@jit
def choice(options,probs):
x = np.random.rand()
cum = 0
for i,p in enumerate(probs):
cum += p
##sum of probability must be 1
if x < cum:
break
return options[i]
##generate a standing wave solution of fisher equations - i.e. an 'established' wave front
@jit
def standing_wave(y0,x,D,rw):
w = y0[0] ##initial value for wave profile at x =0, i.e. w(x=0)
z = y0[1] ##initial value for rate of change of profile w.r.t. position x , at x=0 i.e. dw/dx(x=0)
dwdx = z
dzdx =(-2*((rw*D)**.5)*dwdx -w*rw*(1-w))/D ## fisher equation in comoving frame
return [dwdx,dzdx]
### given a deme return that deme and a random neighbor
@jit
def rand_neighbors(n_demes):
ind_1 = np.random.randint(n_demes)
if ind_1 == 0:
ind_2 =1
else:
if ind_1 == n_demes:
ind_2 = n_demes-1
else:
if np.random.randn()>.5 and ind_1 !=0:
ind_2 = ind_1-1
else:
ind_2 = ind_1+1
return np.array([ind_1,ind_2])
##convert array of cell counts for each fitness
## to an array for each cell with its identity (fitness)
@jit
def counts_to_cells(counts,n_allele):
cells = np.repeat(np.arange(n_allele+1),counts)
return cells
##convert array of each cell with its identity (fitness)cell counts for each fitness
## to an array of cell counts for each fitness
@jit
def cells_to_counts(cells,n_allele):
counts = np.bincount(cells, minlength=n_allele+1)
return counts
## initialize array of number of spaces and number wild type cells for each deme
##using standing wave solution.
@jit
def standing_wave_solve(K):
return odeint(standing_wave,[1,-(2*2**.5)/K],np.arange(70),args=(2*2**.5,1))[:,0]
@jit
def initialize(K,n_allele,mu):
## generate standing wave
stand = standing_wave_solve(K)
## cuttoff non integer cell density based off of carry capacity K
w_0 = (K*stand).astype(int)
w_0 = w_0[w_0>1]
## subtract wild type cells from carrying capacity to get 'emopty' particle count
L = np.vstack(((K-w_0),w_0)).T
L = np.append(L,np.zeros((len(w_0),n_allele-1)),axis=1)
##array strucutre of an empty deme to be added on as needed
L_empty= np.append([K],np.zeros(n_allele,dtype=int))
## add on some number of empty demes
for i in range(140):
L= np.append(L,[L_empty],axis=0)
return L.astype(int), L_empty
## take two neighboring demes, pick a particle from each and exchange them
@jit
def migration(cell_counts,n_allele,K):
empty_cnt_1,empty_cnt_2 = np.zeros(n_allele+1),np.zeros(n_allele+1)
## pick a cell from each deme
chosen_1 = choice(np.arange(n_allele+1), cell_counts[0]/np.sum(cell_counts[0]))
chosen_2 = choice(np.arange(n_allele+1), cell_counts[1]/np.sum(cell_counts[0]))
## format chosen cell interms of array as [empty cell count, ...,, chosen cell count ]
empty_cnt_1[chosen_1] =1
empty_cnt_2[chosen_2] =1
## add chosen cell from second deme to first deme, and chosen cell from first deme to second deme
cell_counts[0]=cell_counts[0]- empty_cnt_1 + empty_cnt_2
cell_counts[1]= cell_counts[1]+ empty_cnt_1 - empty_cnt_2
return cell_counts
#from one chosen deme pick two cells and exchange the first with second one with some proability
@jit
def duplication(cell_counts,K,P,n_allele):
#pick two cells randomly from chosen deme. yes, i know i use list.append but np append was slower
## when i timed it
picks = []
for i in range(2):
picks.append(choice(np.arange(n_allele+1),
cell_counts/np.sum(cell_counts)))
## format chosen cells in terms of cell counts i.e. [empty cell count,...,chosen cell count]
empty_cnt_1,empty_cnt_2 = np.zeros(n_allele+1),np.zeros(n_allele+1)
empty_cnt_1[picks[0]] =1
empty_cnt_2[picks[1]] =1
if P[tuple(picks)]> np.random.random():
cell_counts = cell_counts + empty_cnt_1 - empty_cnt_2
return cell_counts
## from randomly chosen deme pick random cell and give it a mutation (change its genotype) with
## some probability
@jit
def mutation(cell_counts,mu,K,n_allele):
pick=choice(np.arange(n_allele+1),
cell_counts/np.sum(cell_counts))
## format chosen cells in terms of cell counts i.e. [empty cell count,...,chosen cell count]
empty_cnt_1,empty_cnt_2= np.zeros(n_allele+1),np.zeros(n_allele+1)
empty_cnt_1[pick] =1
if mu>np.random.random():
#3 only particles that are not empty spaces and are not the 'peak' in the landscape strucutre can mutate
if pick != n_allele and pick !=0:
## mutate cell and fromat in terms of cell counts i.e. [empty cell count,...,chosen cell count]
empty_cnt_2[pick+1] =1
##remove original cell and add mutated cell to cell counts
cell_counts = cell_counts - empty_cnt_1 + empty_cnt_2
return cell_counts
## shift simulation box
@jit
def recenter(L,L_empty, K):
shift = 0
##track how many demes are to be omitted
while L[0,0]<int(.02*K):
L=L[1:,:]
shift+=1
#for each dropped deme, add one
for i in range(shift):
L=np.append(L,[L_empty],axis=0)
return L
## one update setp includes on migration, duplication and mutation step
@jit
def update(L, ## population
L_empty, ## empty deme structure
P, ## porbability matrix for mutation
K, # population size
n_allele, ## length of landsacpe
mu): ##mutation rate
#L_tip = np.where(L[:,0]!=K)[0][-1]
n_demes = np.where(L[:,0]!=K)[0][-1] +2
#migration
neighb = rand_neighbors(n_demes)
L[neighb]= migration(L[neighb],n_allele,K)
#duplication
dup_deme = np.random.randint(n_demes)
L[dup_deme] = duplication(L[dup_deme],K,P,n_allele)
##mutation
mut_deme = np.random.randint(n_demes)
L[mut_deme] = mutation(L[mut_deme],mu,K,n_allele)
return L
## run simulation for chosen number of generation
@jit
def run_spatial(n_gen,## nunmber of gnerations
K, ## population size
landscape ## fitness landscape (Growthrates of each genotype (should be <<1))
,mu): ## mutation rate
n_allele = len(landscape)
func_args = [K,n_allele,mu]
##initialize probability matrix
P = np.ones((n_allele+1,n_allele+1))
P[0,1:] = 1 - landscape
muts =0
L , L_empty = initialize(K,n_allele,mu)
L_history=[L]
#begin evolution
for t in range(n_gen):
for dt in range(K):
## perform one simulation step
L= update(L,L_empty,P,K,n_allele,mu)
##recenter simulation box
L= recenter(L,L_empty,K)
##save
L_history.append(L)
return L_history
## run simulation until a fixation event occurs (fixation to some threshold 'thresh')
@jit
def fix_time_spatial(K,landscape,mu,thresh):
n_allele = len(landscape)
func_args = [K,n_allele,mu]
##initialize probability matrix
P = np.ones((n_allele+1,n_allele+1))
P[0,1:] = 1 - landscape
##initallize population
L,L_empty = initialize(*func_args)
init_pop = np.sum(L[:,1])
L_history=[L]
#mutant has not yet fixed
fixed=False
t = 0
while not fixed:
# perform on esimulation step
L = update(L,L_empty,P,*func_args)
## move simulation box
L = recenter(L,L_empty,K)
##check if beneficial mutant has arised, if it previously didnt exicst
if (np.sum(L[:,-1]) !=0) ==True and muts_arise ==False:
## record time
arise_time = t
muts_arise = (np.sum(L[:,-1]) !=0)
## check if fixed
fixed = np.sum(L[:,1:n_allele])<(thresh*init_pop)
t+=1
try:
return L,t, arise_time
except:
return L,t
#Run the automaton
#Implements cell division. The division rates are based on the experimental data
@jit
def run_mixed(n_gen,fit_land, # number of genetaions Fitness landscape (growth rates - should be <<1)
mut_rate=0.1, # probability of mutation per generation
max_cells=10**5, # Max number of cells
init_counts=None,
thresh = .02
):
# Obtain transition matrix for mutations
# Number of different alleles
n_allele = len(fit_land)
# Keeps track of cell counts at each generation
P = np.ones((n_allele+1,n_allele+1))
P[0,1:] = 1 - fit_land
if init_counts is None:
counts = np.zeros(n_allele+1)
counts[1] = 10
else:
counts = init_counts
counts= counts.astype(int)
fixed = False
t = 0
count_history = []
for gen in range(n_gen):
n_cells = np.sum( counts )
# Scale division rates based on carrying capacity
cell_types = np.repeat(np.arange(n_allele+1),counts)
cell_types = duplication(cell_types,max_cells,P)
cell_types = mutation(cell_types,mut_rate,max_cells,n_allele)
counts = np.bincount(cell_types, minlength=n_allele+1)
count_history.append(counts)
return count_history
#Run the automaton
#Implements cell division. The division rates are based on the experimental data
@jit
def fix_time_mixed(fit_land, # Fitness landscape
mut_rate=0.1, # probability of mutation per generation
max_cells=10**5, # Max number of cells
init_counts=None,
thresh = .9
):
# Obtain transition matrix for mutations
# Number of different alleles
n_allele = len(fit_land)
# Keeps track of cell counts at each generation
P = np.ones((n_allele+1,n_allele+1))
P[0,1:] = 1 - fit_land
if init_counts is None:
counts = np.zeros(n_allele+1)
counts[1] = 10
else:
if sum(init_counts) !=max_cells:
raise 'sum of initial counts must be equal to carrying capactiy'
counts = init_counts
counts= counts.astype(int)
fixed = False
t = 0
muts= 0
muts_arise =False
while not fixed:
n_cells = np.sum( counts )
# Scale division rates based on carrying capacity
cell_types = np.repeat(np.arange(n_allele+1),counts)
cell_types = duplication(cell_types,max_cells,P)
cell_types = mutation(cell_types,mut_rate,max_cells,n_allele)
counts =
|
np.bincount(cell_types, minlength=n_allele+1)
|
numpy.bincount
|
# MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import unittest
import keras.backend as k
import numpy as np
from art.attacks.evasion.elastic_net import ElasticNet
from art.estimators.classification.classifier import ClassGradientsMixin
from art.estimators.classification.keras import KerasClassifier
from art.utils import random_targets, to_categorical
from tests.attacks.utils import backend_test_classifier_type_check_fail
from tests.utils import (
TestBase,
get_image_classifier_kr,
get_image_classifier_pt,
get_image_classifier_tf,
get_tabular_classifier_kr,
get_tabular_classifier_pt,
get_tabular_classifier_tf,
master_seed,
)
logger = logging.getLogger(__name__)
class TestElasticNet(TestBase):
"""
A unittest class for testing the ElasticNet attack.
"""
@classmethod
def setUpClass(cls):
master_seed(seed=1234)
super().setUpClass()
cls.n_train = 500
cls.n_test = 10
cls.x_train_mnist = cls.x_train_mnist[0 : cls.n_train]
cls.y_train_mnist = cls.y_train_mnist[0 : cls.n_train]
cls.x_test_mnist = cls.x_test_mnist[0 : cls.n_test]
cls.y_test_mnist = cls.y_test_mnist[0 : cls.n_test]
def setUp(self):
master_seed(seed=1234)
super().setUp()
def test_tensorflow_failure_attack(self):
"""
Test the corner case when attack fails.
:return:
"""
# Build TensorFlowClassifier
tfc, sess = get_image_classifier_tf()
# Failure attack
ead = ElasticNet(
classifier=tfc, targeted=True, max_iter=0, binary_search_steps=0, learning_rate=0, initial_const=1
)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = ead.generate(self.x_test_mnist, **params)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
np.testing.assert_almost_equal(self.x_test_mnist, x_test_adv, 3)
# Clean-up session
if sess is not None:
sess.close()
def test_tensorflow_mnist(self):
"""
First test with the TensorFlowClassifier.
:return:
"""
x_test_original = self.x_test_mnist.copy()
# Build TensorFlowClassifier
tfc, sess = get_image_classifier_tf(from_logits=True)
# First attack
ead = ElasticNet(classifier=tfc, targeted=True, max_iter=2)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = ead.generate(self.x_test_mnist, **params)
expected_x_test_adv = np.asarray(
[
0.45704955,
0.43627003,
0.57238287,
1.0,
0.11541145,
0.12619308,
0.48318917,
0.3457903,
0.17863746,
0.09060935,
0.0,
0.00963121,
0.0,
0.04749763,
0.4058206,
0.17860745,
0.0,
0.9153206,
0.84564775,
0.20603634,
0.10586322,
0.00947509,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate on MNIST: %.2f%%", (100 * sum(target == y_pred_adv) / len(target)))
self.assertTrue((target == y_pred_adv).any())
# Second attack
ead = ElasticNet(classifier=tfc, targeted=False, max_iter=2)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = ead.generate(self.x_test_mnist, **params)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate on MNIST: %.2f%%", (100 * sum(target != y_pred_adv) / float(len(target))))
np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))
# Third attack
ead = ElasticNet(classifier=tfc, targeted=False, max_iter=2)
params = {}
x_test_adv = ead.generate(self.x_test_mnist, **params)
expected_x_test_adv = np.asarray(
[
0.22866514,
0.21826893,
0.22902338,
0.06268515,
0.0,
0.0,
0.04822975,
0.0,
0.0,
0.0,
0.05555382,
0.0,
0.0,
0.0,
0.38986346,
0.10653087,
0.32385707,
0.98043066,
0.75790393,
0.16486718,
0.16069527,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
y_pred = np.argmax(tfc.predict(self.x_test_mnist), axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", y_pred)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate: %.2f%%", (100 * sum(y_pred != y_pred_adv) / float(len(y_pred))))
np.testing.assert_array_equal(y_pred_adv, np.asarray([0, 4, 7, 9, 0, 7, 7, 3, 0, 7]))
# First attack without batching
ead_wob = ElasticNet(classifier=tfc, targeted=True, max_iter=2, batch_size=1)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = ead_wob.generate(self.x_test_mnist, **params)
expected_x_test_adv = np.asarray(
[
0.3287169,
0.31374657,
0.42853343,
0.8994576,
0.19850709,
0.11997936,
0.5622535,
0.43854535,
0.19387433,
0.12516324,
0.0,
0.10933565,
0.02162433,
0.07120894,
0.95224255,
0.3072921,
0.48966524,
1.0,
0.3814998,
0.15782641,
0.52283823,
0.12852049,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
np.testing.assert_array_almost_equal(x_test_adv[0, 14, :, 0], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate: %.2f%%", (100 * sum(target == y_pred_adv) / float(len(target))))
self.assertTrue((target == y_pred_adv).any())
# Second attack without batching
ead_wob = ElasticNet(classifier=tfc, targeted=False, max_iter=2, batch_size=1)
params = {"y": random_targets(self.y_test_mnist, tfc.nb_classes)}
x_test_adv = ead_wob.generate(self.x_test_mnist, **params)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(tfc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate: %.2f%%", (100 * sum(target != y_pred_adv) / float(len(target))))
np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_mnist))), 0.0, delta=0.00001)
# Close session
if sess is not None:
sess.close()
def test_keras_mnist(self):
"""
Second test with the KerasClassifier.
:return:
"""
x_test_original = self.x_test_mnist.copy()
# Build KerasClassifier
krc = get_image_classifier_kr()
# First attack
ead = ElasticNet(classifier=krc, targeted=True, max_iter=2)
y_target = to_categorical(np.asarray([6, 6, 7, 4, 9, 7, 9, 0, 1, 0]), nb_classes=10)
x_test_adv = ead.generate(self.x_test_mnist, y=y_target)
expected_x_test_adv = np.asarray(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.00183569,
0.0,
0.0,
0.49765405,
1.0,
0.6467149,
0.0033755,
0.0052456,
0.0,
0.01104407,
0.00495547,
0.02747423,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
np.testing.assert_array_almost_equal(x_test_adv[2, 14, :, 0], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(y_target, axis=1)
y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate: %.2f%%", (100 * sum(target == y_pred_adv) / float(len(target))))
self.assertTrue((target == y_pred_adv).any())
# Second attack
ead = ElasticNet(classifier=krc, targeted=False, max_iter=2)
y_target = to_categorical(np.asarray([9, 5, 6, 7, 1, 6, 1, 5, 8, 5]), nb_classes=10)
x_test_adv = ead.generate(self.x_test_mnist, y=y_target)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
y_pred_adv = np.argmax(krc.predict(x_test_adv), axis=1)
logger.debug("EAD target: %s", y_target)
logger.debug("EAD actual: %s", y_pred_adv)
logger.info("EAD success rate: %.2f", (100 * sum(target != y_pred_adv) / float(len(target))))
self.assertTrue((target != y_pred_adv).any())
np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - self.x_test_mnist))), 0.0, delta=0.00001)
k.clear_session()
def test_pytorch_mnist(self):
"""
Third test with the PyTorchClassifier.
:return:
"""
x_test = np.reshape(self.x_test_mnist, (self.x_test_mnist.shape[0], 1, 28, 28)).astype(np.float32)
x_test_original = x_test.copy()
# Build PyTorchClassifier
ptc = get_image_classifier_pt(from_logits=False)
# First attack
ead = ElasticNet(classifier=ptc, targeted=True, max_iter=2)
params = {"y": random_targets(self.y_test_mnist, ptc.nb_classes)}
x_test_adv = ead.generate(x_test, **params)
expected_x_test_adv = np.asarray(
[
0.01678124,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.00665895,
0.0,
0.11374763,
0.36250514,
0.5472948,
0.9308808,
1.0,
0.99920374,
0.86274165,
0.6346757,
0.5597227,
0.24191494,
0.25882354,
0.0091916,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
np.testing.assert_array_almost_equal(x_test_adv[2, 0, :, 14], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)
self.assertTrue((target == y_pred_adv).any())
# Second attack
ead = ElasticNet(classifier=ptc, targeted=False, max_iter=2)
params = {"y": random_targets(self.y_test_mnist, ptc.nb_classes)}
x_test_adv = ead.generate(x_test, **params)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
target = np.argmax(params["y"], axis=1)
y_pred_adv = np.argmax(ptc.predict(x_test_adv), axis=1)
self.assertTrue((target != y_pred_adv).any())
np.testing.assert_array_equal(y_pred_adv, np.asarray([7, 1, 1, 4, 4, 1, 4, 4, 4, 4]))
# Check that x_test has not been modified by attack and classifier
self.assertAlmostEqual(float(np.max(np.abs(x_test_original - x_test))), 0.0, delta=0.00001)
def test_classifier_type_check_fail(self):
backend_test_classifier_type_check_fail(ElasticNet, [ClassGradientsMixin])
def test_keras_iris_clipped(self):
classifier = get_tabular_classifier_kr()
attack = ElasticNet(classifier, targeted=False, max_iter=10)
x_test_adv = attack.generate(self.x_test_iris)
expected_x_test_adv = np.asarray([0.85931635, 0.44633555, 0.65658355, 0.23840423])
np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
np.testing.assert_array_equal(
predictions_adv,
np.asarray(
[
1,
1,
1,
2,
1,
1,
1,
2,
1,
2,
1,
1,
1,
2,
1,
1,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
1,
2,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
2,
]
),
)
accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("EAD success rate on Iris: %.2f%%", (accuracy * 100))
def test_keras_iris_unbounded(self):
classifier = get_tabular_classifier_kr()
# Recreate a classifier without clip values
classifier = KerasClassifier(model=classifier._model, use_logits=False, channels_first=True)
attack = ElasticNet(classifier, targeted=False, max_iter=10)
x_test_adv = attack.generate(self.x_test_iris)
expected_x_test_adv = np.asarray([0.85931635, 0.44633555, 0.65658355, 0.23840423])
np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
np.testing.assert_array_equal(
predictions_adv,
np.asarray(
[
1,
1,
1,
2,
1,
1,
1,
2,
1,
2,
1,
1,
1,
2,
1,
1,
2,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
1,
2,
1,
0,
1,
1,
1,
2,
0,
2,
2,
1,
1,
2,
]
),
)
accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("EAD success rate on Iris: %.2f%%", (accuracy * 100))
def test_tensorflow_iris(self):
classifier, _ = get_tabular_classifier_tf()
# Test untargeted attack
attack = ElasticNet(classifier, targeted=False, max_iter=10)
x_test_adv = attack.generate(self.x_test_iris)
expected_x_test_adv = np.asarray([0.8479195, 0.42525578, 0.70166135, 0.28664514])
np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
np.testing.assert_array_equal(
predictions_adv,
np.asarray(
[
1,
2,
2,
2,
1,
1,
1,
2,
1,
2,
1,
1,
1,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
1,
2,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
2,
]
),
)
accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("EAD success rate on Iris: %.2f%%", (accuracy * 100))
# Test targeted attack
targets = random_targets(self.y_test_iris, nb_classes=3)
attack = ElasticNet(classifier, targeted=True, max_iter=10)
x_test_adv = attack.generate(self.x_test_iris, **{"y": targets})
expected_x_test_adv = np.asarray([0.8859426, 0.51877, 0.5014498, 0.05447771])
np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
np.testing.assert_array_equal(
predictions_adv,
np.asarray(
[
0,
0,
0,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0,
2,
0,
0,
2,
2,
0,
2,
2,
2,
2,
2,
2,
0,
0,
0,
2,
0,
2,
2,
2,
2,
2,
0,
0,
0,
2,
2,
2,
2,
2,
0,
2,
]
),
)
accuracy = np.sum(predictions_adv == np.argmax(targets, axis=1)) / self.y_test_iris.shape[0]
logger.info("Targeted EAD success rate on Iris: %.2f%%", (accuracy * 100))
def test_pytorch_iris(self):
classifier = get_tabular_classifier_pt()
attack = ElasticNet(classifier, targeted=False, max_iter=10)
x_test_adv = attack.generate(self.x_test_iris.astype(np.float32))
expected_x_test_adv = np.asarray([0.8479194, 0.42525578, 0.70166135, 0.28664517])
np.testing.assert_array_almost_equal(x_test_adv[0, :], expected_x_test_adv, decimal=6)
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
np.testing.assert_array_equal(
predictions_adv,
np.asarray(
[
1,
2,
2,
2,
1,
1,
1,
2,
1,
2,
1,
1,
1,
2,
2,
2,
2,
2,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
1,
2,
1,
0,
2,
2,
1,
2,
0,
2,
2,
1,
1,
2,
]
),
)
accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("EAD success rate on Iris: %.2f%%", (accuracy * 100))
def test_scikitlearn(self):
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from art.estimators.classification.scikitlearn import SklearnClassifier
scikitlearn_test_cases = [
LogisticRegression(solver="lbfgs", multi_class="auto"),
SVC(gamma="auto"),
LinearSVC(),
]
x_test_original = self.x_test_iris.copy()
for model in scikitlearn_test_cases:
classifier = SklearnClassifier(model=model, clip_values=(0, 1))
classifier.fit(x=self.x_test_iris, y=self.y_test_iris)
# Test untargeted attack
attack = ElasticNet(classifier, targeted=False, max_iter=2)
x_test_adv = attack.generate(self.x_test_iris)
self.assertFalse((self.x_test_iris == x_test_adv).all())
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertFalse((np.argmax(self.y_test_iris, axis=1) == predictions_adv).all())
accuracy = 1.0 - np.sum(predictions_adv == np.argmax(self.y_test_iris, axis=1)) / self.y_test_iris.shape[0]
logger.info("EAD success rate of " + classifier.__class__.__name__ + " on Iris: %.2f%%", (accuracy * 100))
# Test targeted attack
targets = random_targets(self.y_test_iris, nb_classes=3)
attack = ElasticNet(classifier, targeted=True, max_iter=2)
x_test_adv = attack.generate(self.x_test_iris, **{"y": targets})
self.assertFalse((self.x_test_iris == x_test_adv).all())
self.assertLessEqual(np.amax(x_test_adv), 1.0)
self.assertGreaterEqual(np.amin(x_test_adv), 0.0)
predictions_adv = np.argmax(classifier.predict(x_test_adv), axis=1)
self.assertTrue((
|
np.argmax(targets, axis=1)
|
numpy.argmax
|
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import colors
import copy
from tqdm import tqdm
class IsingSquare:
# initialise a spin lattice and populate with random spins
def __init__(self, order, interactionVal=1, magMoment=1):
if order < 3:
raise ValueError('Order number needs to be greater than 2.')
self.temp = 0.0
self.beta = 0.0
self.boltzmann = 1.38064852 * (10 ** -23)
self.order = order
self.J = float(interactionVal)
self.h = float(magMoment)
self.magList = []
self.specHeatList = []
self.energyList = []
self.suscepList = []
self.spins = []
self.resetSpins()
# reset the spin lattice to a random configuration
def resetSpins(self):
vals = np.array([-1, 1])
self.spins = np.random.choice(vals, size=(self.order, self.order))
# returns an array of an atom's 4 nearest neighbours
def neighbours(self, row, col):
return np.asarray([self.spins[row][col - 1], #left
self.spins[row][(col + 1) % self.order], #right
self.spins[row - 1][col], #up
self.spins[(row + 1) % self.order][col]]) #down
# calculates the energy of a single atom, using the Hamiltonian
def singleEnergy(self, row, col):
neighbours = self.neighbours(row, col)
selfSpin = self.spins[row][col]
return -self.J * selfSpin * np.sum(np.sum(neighbours)) - self.h * selfSpin
# calculates the magnitude of the entire energy of the lattice
def totalEnergy(self):
energy = 0.0
for i in np.arange(self.order):
for j in np.arange(self.order):
energy += self.singleEnergy(i, j)
# to avoid counting pairs twice, divide by two
# divide by maximum possible energy to normalise
return energy# / (self.order * self.order * (-4 * self.J - self.h) )
# calculates the magnitude of the residual magnetic spin of the lattice
# normalise by dividing by order of lattice squared
def totalMag(self):
return np.sum(np.sum(self.spins)) / (self.order ** 2)
def specHeat(self, energy, energySquared, temp):
return (energySquared - energy ** 2) * (1 / (self.order * self.order * 2 * temp * temp))
def suscep(self, mag, magSquared, temp):
return (magSquared - mag ** 2) / temp
# attempts to flip a random spin using the metropolis algorithm and the Boltzmann distribution
def tryFlip(self, row, col):
# energy change = -2 * E_initial
# so accept change if E_initial >= 0
energy = self.singleEnergy(row, col)
if energy >= 0 or np.random.random() <= math.exp(self.beta * 2 * energy):
self.spins[row][col] *= -1
# closes plot window
def close_event(self):
plt.close() # timer calls this function after 3 seconds and closes the window
# plots a meshgrid of the initial and final spin lattices
def plotStartEndSpins(self, spinsList, iters=1000000):
cmap = colors.ListedColormap(['red', 'yellow'])
bounds = [-1, 0, 1]
norm = colors.BoundaryNorm(bounds, cmap.N)
plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
plt.subplot(1,2,1)
plt.imshow(spinsList[0], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Initial Configuration')
plt.subplot(1, 2, 2)
plt.imshow(spinsList[1], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Final Configuration')
title = "Temperature (J/K_B) = {0}, J = {1}, h = {2}, Iterations = {3}".format(self.temp, self.J, self.h, iters) + "\n" + "Order: {0} x {1}".format(self.order, self.order)
plt.suptitle(title)
# timer = fig.canvas.new_timer(
# interval=graphInterval) # creating a timer object and setting an interval of 3000 milliseconds
# timer.add_callback(self.close_event)
# timer.start()
plt.show()
# simulates the lattice at a constant temperature temp, for iters iterations, plots the resulting lattices, and returns the spin configurations
def basicIter(self, iters=1000000, temp=1, plot=False):
self.resetSpins()
spinsList = [copy.deepcopy(self.spins)]
self.temp = temp
self.beta = 1.0 / self.temp
for i in np.arange(iters + 1):
row, col = np.random.randint(self.order), np.random.randint(self.order)
self.tryFlip(row, col)
spinsList.append(self.spins)
if plot:
self.plotStartEndSpins(spinsList, iters)
else:
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
return spinsList
# simulates the lattice oer a temperature range tempRange, with itersPerTemp iterations per temperature
# plotProperties: plot the residual spin, total energy, susceptibility and specific heat
def tempRangeIter(self, tempRange=np.arange(start=0.8, stop=3.2, step=0.05), itersPerTemp=100000, plotProperties=False):
self.resetSpins()
# store the averages here
energyList = []
magList = []
specHeatList = []
suscepList = []
for temp in tqdm(tempRange):
self.beta = 1.0 / temp
#print("Calculating temp:", temp)
# allow to reach equilibrium
for i in np.arange(itersPerTemp + 1):
row, col = np.random.randint(0, self.order), np.random.randint(0, self.order)
self.tryFlip(row, col)
#do a further thousand iterations to get average, and every hundred iterations, store the properties
#store the values used to calculate averages here
magListEquilib = []
energyListEquilib = []
for i in np.arange(500000):
if i % 5000 == 0:
energy = self.totalEnergy()
mag = self.totalMag()
energyListEquilib.append(energy)
magListEquilib.append(mag)
row, col = np.random.randint(0, self.order), np.random.randint(0, self.order)
self.tryFlip(row, col)
energyAvg = np.average(energyListEquilib)
energySquaredAvg = np.average(np.square(energyListEquilib))
magAvg = np.average(magListEquilib)
magSquaredAvg = np.average(np.square(magListEquilib))
energyList.append(energyAvg)
magList.append(magAvg)
specHeatList.append(self.specHeat(energyAvg, energySquaredAvg, temp))
suscepList.append(self.suscep(magAvg, magSquaredAvg, temp))
# reset the spins for the next temperature
self.resetSpins()
if plotProperties:
plt.tight_layout()
plt.subplot(2, 2, 1)
plt.plot(tempRange, energyList)
plt.title("Total Energy")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.subplot(2, 2, 2)
plt.plot(tempRange, magList)
plt.title("Residual Spin")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 3)
plt.plot(tempRange, specHeatList)
plt.title("Specific Heat Capacity")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.subplot(2, 2, 4)
plt.plot(tempRange, suscepList)
plt.title("Susceptibility")
plt.axvline(x=2.269185, c='r', linestyle='--')
plt.tick_params(axis="x", direction="in")
plt.tick_params(axis="y", direction="in")
plt.xlim(tempRange[0], tempRange[len(tempRange) - 1])
plt.legend()
plt.show()
return {"tempRange": tempRange,
"energyList": energyList,
"magList": magList,
"specHeatList": specHeatList,
"suscepList": suscepList}
class IsingTriangle:
# initialise a spin lattice and populate with random spins
def __init__(self, order, interactionVal=1, magMoment=1):
if order < 4:
raise ValueError('Order number needs to be greater than 3.')
self.temp = 0.0
self.beta = 0.0
self.boltzmann = 1.38064852 * (10 ** -23)
self.order = order
self.J = float(interactionVal)
self.h = float(magMoment)
self.magList = []
self.specHeatList = []
self.energyList = []
self.suscepList = []
self.spins = []
self.resetSpins()
# reset the spin lattice to a random configuration
def resetSpins(self):
self.spins = []
vals = np.array([1, -1])
for i in np.arange(self.order):
self.spins.append(list(np.random.choice(vals, size=i + 1)))
# returns an array of an atom's 6 nearest neighbours
def neighbours(self, row, col):
# centre atoms
if 1 < row < self.order - 1 and 0 < col < row:
return np.asarray([self.spins[row - 1][col - 1],
self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row][col + 1],
self.spins[row + 1][col],
self.spins[row + 1][col + 1]])
# left side central
elif 0 < row < self.order - 1 and col == 0:
return np.asarray([self.spins[row - 1][0],
self.spins[row][1],
self.spins[row + 1][0],
self.spins[row + 1][1],
self.spins[row][row],
self.spins[row - 1][row - 1]])
# right side central
elif 0 < row < self.order - 1 and col == row:
return np.asarray([self.spins[row - 1][row - 1],
self.spins[row - 1][0],
self.spins[row][row - 1],
self.spins[row][0],
self.spins[row + 1][row],
self.spins[row + 1][row + 1]])
# bottom side central
elif row == self.order - 1 and 0 < col < row:
return np.asarray([self.spins[row - 1][col - 1],
self.spins[row - 1][col],
self.spins[row][col - 1],
self.spins[row][col + 1],
self.spins[0][0],
self.spins[0][0]])
# very top
elif row == 0:
return np.asarray([self.spins[1][0],
self.spins[1][1],
self.spins[self.order - 1][0],
self.spins[self.order - 1][self.order - 1],
self.spins[self.order - 1][1],
self.spins[self.order - 1][self.order - 2]])
# bottom left
elif row == self.order - 1 and col == 0:
return np.asarray([self.spins[row - 1][0],
self.spins[row - 1][row - 1],
self.spins[row][1],
self.spins[row][row],
self.spins[0][0],
self.spins[0][0]])
# bottom right
elif row == self.order - 1 and (col == row):
return np.asarray([self.spins[row - 1][0],
self.spins[row - 1][row - 1],
self.spins[row][0],
self.spins[row][row - 1],
self.spins[0][0],
self.spins[0][0]])
# calculates the energy of a single atom, using the Hamiltonian
def singleEnergy(self, row, col):
neighbours = self.neighbours(row, col)
selfSpin = self.spins[row][col]
return self.J * selfSpin * np.sum(np.sum(neighbours)) - self.h * selfSpin
# calculates the magnitude of the entire energy of the lattice
def totalEnergy(self):
energy = 0.0
for i in np.arange(self.order):
for j in np.arange(len(self.spins[i])):
energy += self.singleEnergy(i, j)
# to avoid counting pairs twice, divide by two
# divide by maximum possible energy to normalise
return -math.fabs(energy / ((-6 * self.J - self.h) * ((self.order ** 2 + self.order) / 2)))
# calculates the magnitude of the residual magnetic spin of the lattice
# normalise by dividing by order of lattice squared
def totalMag(self):
return math.fabs((np.sum(np.sum(self.spins)) * 2) / (self.order ** 2 + self.order))
def specHeat(self, energy, energySquared, temp):
return (energySquared - energy ** 2) * (1 / (self.order * self.order * 2 * temp * temp))
def suscep(self, mag, magSquared, temp):
return self.J * (magSquared - mag ** 2) * (1 / (self.order * self.order * 2 * temp))
# attempts to flip a random spin using the metropolis algorithm and the Boltzmann distribution
def tryFlip(self, row, col):
# energy change = -2 * E_initial
# so accept change if E_initial <= 0
energy = self.singleEnergy(row, col)
if energy <= 0 or np.random.random() <= math.exp(-self.beta * 2 * energy):
self.spins[row][col] *= -1
# closes plot window
def close_event(self):
plt.close() # timer calls this function after 3 seconds and closes the window
# plots a meshgrid of the initial and final spin lattices
def plotStartEndSpins(self, spinsList, iters=1000000):
for i in np.arange(self.order):
for j in np.arange(self.order - i - 1):
spinsList[0][i].append(8)
spinsList[1][i].append(8)
cmap = colors.ListedColormap(['red', 'yellow', 'white'])
bounds = [-1, 0, 2, 10]
norm = colors.BoundaryNorm(bounds, cmap.N)
plt.subplots(nrows=1, ncols=2)
plt.tight_layout()
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
plt.subplot(1,2,1)
plt.imshow(spinsList[0], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Initial Configuration')
plt.subplot(1, 2, 2)
plt.imshow(spinsList[1], cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.title('Final Configuration')
title = "Temperature (J/K_B) = {0}, J = {1}, h = {2}, Iterations = {3}".format(self.temp, self.J, self.h, iters) + "\n" + "Order: {0}".format(self.order,)
plt.suptitle(title)
# timer = fig.canvas.new_timer(
# interval=graphInterval) # creating a timer object and setting an interval of 3000 milliseconds
# timer.add_callback(self.close_event)
# timer.start()
plt.show()
# simulates the lattice at a constant temperature temp, for iters iterations, and returns the spin configurations
def basicIter(self, iters=1000000, temp=1, plot=False):
self.resetSpins()
spinsList = [copy.deepcopy(self.spins)]
self.temp = temp
self.beta = 1.0 / self.temp
for i in np.arange(iters + 1):
row = np.random.randint(self.order)
col = np.random.randint(row + 1)
self.tryFlip(row, col)
spinsList.append(self.spins)
print(spinsList[0])
print(spinsList[1])
if plot:
self.plotStartEndSpins(spinsList, iters)
else:
for i in np.arange(len(spinsList[0])):
spinsList[0][i] = np.asarray(spinsList[0][i])
for i in np.arange(len(spinsList[1])):
spinsList[1][i] = np.asarray(spinsList[1][i])
spinsList = np.array(spinsList)
return spinsList
# simulates the lattice oer a temperature range tempRange, with itersPerTemp iterations per temperature
# plotProperties: plot the residual spin, total energy, susceptibility and specific heat
def tempRangeIter(self, tempRange=np.arange(start=1, stop=5, step=0.2), itersPerTemp=100000, plotProperties=False):
self.resetSpins()
# store the averages here
energyList = []
magList = []
specHeatList = []
suscepList = []
for temp in tqdm(tempRange):
self.beta = 1.0 / temp
#print("Calculating temp:", temp)
# allow to reach equilibrium
for i in np.arange(itersPerTemp + 1):
row = np.random.randint(self.order)
col = np.random.randint(row + 1)
self.tryFlip(row, col)
#do a further ten thousand iterations to get average, and every two hundred iterations, store the properties
if plotProperties:
#store the values used to calculate averages here
magListEquilib = []
energyListEquilib = []
for i in np.arange(10000):
if i % 200 == 0:
energy = self.totalEnergy()
mag = self.totalMag()
energyListEquilib.append(energy)
magListEquilib.append(mag)
row =
|
np.random.randint(self.order)
|
numpy.random.randint
|
# -*- coding: utf-8 -*-
"""
Description
-----------
This module is written to execute GCMC moves with water molecules in OpenMM, via a series of
Sampler objects.
<NAME>
<NAME>
"""
import numpy as np
import mdtraj
import os
import logging
import parmed
import math
from copy import deepcopy
from simtk import unit
from simtk import openmm
from openmmtools.integrators import NonequilibriumLangevinIntegrator
from grand.utils import random_rotation_matrix
from grand.utils import PDBRestartReporter
from grand.potential import get_lambda_values
class BaseGrandCanonicalMonteCarloSampler(object):
"""
Base class for carrying out GCMC moves in OpenMM.
All other Sampler objects are derived from this
"""
def __init__(self, system, topology, temperature, ghostFile="gcmc-ghost-wats.txt", log='gcmc.log',
dcd=None, rst=None, overwrite=False):
"""
Initialise the object to be used for sampling water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
log : str
Log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Overwrite any data already present
"""
# Create logging object
if os.path.isfile(log):
if overwrite:
os.remove(log)
else:
raise Exception("File {} already exists, not overwriting...".format(log))
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(log)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s: %(message)s'))
self.logger.addHandler(file_handler)
# Set important variables here
self.system = system
self.topology = topology
self.positions = None # Store no positions upon initialisation
self.context = None
self.kT = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA * temperature
self.simulation_box = np.zeros(3) * unit.nanometer # Set to zero for now
self.logger.info("kT = {}".format(self.kT.in_units_of(unit.kilocalorie_per_mole)))
# Find NonbondedForce - needs to be updated to switch waters on/off
for f in range(system.getNumForces()):
force = system.getForce(f)
if force.__class__.__name__ == "NonbondedForce":
self.nonbonded_force = force
# Flag an error if not simulating at constant volume
elif "Barostat" in force.__class__.__name__:
self.raiseError("GCMC must be used at constant volume - {} cannot be used!".format(force.__class__.__name__))
# Set GCMC-specific variables
self.N = 0 # Initialise N as zero
self.Ns = [] # Store all observed values of N
self.n_moves = 0
self.n_accepted = 0
self.acceptance_probabilities = [] # Store acceptance probabilities
# Get parameters for the water model
self.water_params = self.getWaterParameters("HOH")
# Get water residue IDs & assign statuses to each
self.water_resids = self.getWaterResids("HOH") # All waters
# Assign each water a status: 0: ghost water, 1: GCMC water, 2: Water not under GCMC tracking (out of sphere)
self.water_status = np.ones(len(self.water_resids)) # Initially assign all to 1
# Need to create a customised force to handle softcore steric interactions of water molecules
# This should prevent any 0/0 energy evaluations
self.custom_nb_force = None
self.customiseForces()
# Need to open the file to store ghost water IDs
self.ghost_file = ghostFile
# Check whether to overwrite if the file already exists
if os.path.isfile(self.ghost_file) and not overwrite:
self.raiseError("File {} already exists, not overwriting...".format(self.ghost_file))
else:
with open(self.ghost_file, 'w') as f:
pass
# Store reporters for DCD and restart output
if dcd is not None:
# Check whether to overwrite
if os.path.isfile(dcd):
if overwrite:
# Need to remove before overwriting, so there isn't any mix up
os.remove(dcd)
self.dcd = mdtraj.reporters.DCDReporter(dcd, 0)
else:
self.raiseError("File {} already exists, not overwriting...".format(dcd))
else:
self.dcd = mdtraj.reporters.DCDReporter(dcd, 0)
else:
self.dcd = None
if rst is not None:
# Check whether to overwrite
if os.path.isfile(rst) and not overwrite:
self.raiseError("File {} already exists, not overwriting...".format(rst))
else:
# Check whether to use PDB or RST7 for the restart file
rst_ext = os.path.splitext(rst)[1]
if rst_ext == '.rst7':
self.restart = parmed.openmm.reporters.RestartReporter(rst, 0)
elif rst_ext == '.pdb':
self.restart = PDBRestartReporter(rst, self.topology)
else:
self.raiseError("File extension {} not recognised for restart file".format(rst))
else:
self.restart = None
self.logger.info("BaseGrandCanonicalMonteCarloSampler object initialised")
def customiseForces(self):
"""
Create a CustomNonbondedForce to handle water-water interactions and modify the original NonbondedForce
to ignore water interactions
"""
# Need to make sure that the electrostatics are handled using PME (for now)
if self.nonbonded_force.getNonbondedMethod() != openmm.NonbondedForce.PME:
self.raiseError("Currently only supporting PME for long range electrostatics")
# Define the energy expression for the softcore sterics
energy_expression = ("U;"
"U = (lambda^soft_a) * 4 * epsilon * x * (x-1.0);" # Softcore energy
"x = (sigma/reff)^6;" # Define x as sigma/r(effective)
# Calculate effective distance
"reff = sigma*((soft_alpha*(1.0-lambda)^soft_b + (r/sigma)^soft_c))^(1/soft_c);"
# Define combining rules
"sigma = 0.5*(sigma1+sigma2); epsilon = sqrt(epsilon1*epsilon2); lambda = lambda1*lambda2")
# Create a customised sterics force
custom_sterics = openmm.CustomNonbondedForce(energy_expression)
# Add necessary particle parameters
custom_sterics.addPerParticleParameter("sigma")
custom_sterics.addPerParticleParameter("epsilon")
custom_sterics.addPerParticleParameter("lambda")
# Assume that the system is periodic (for now)
custom_sterics.setNonbondedMethod(openmm.CustomNonbondedForce.CutoffPeriodic)
# Transfer properties from the original force
custom_sterics.setUseSwitchingFunction(self.nonbonded_force.getUseSwitchingFunction())
custom_sterics.setCutoffDistance(self.nonbonded_force.getCutoffDistance())
custom_sterics.setSwitchingDistance(self.nonbonded_force.getSwitchingDistance())
self.nonbonded_force.setUseDispersionCorrection(False)
custom_sterics.setUseLongRangeCorrection(self.nonbonded_force.getUseDispersionCorrection())
# Set softcore parameters
custom_sterics.addGlobalParameter('soft_alpha', 0.5)
custom_sterics.addGlobalParameter('soft_a', 1)
custom_sterics.addGlobalParameter('soft_b', 1)
custom_sterics.addGlobalParameter('soft_c', 6)
# Get a list of all water and non-water atom IDs
water_atom_ids = []
for resid, residue in enumerate(self.topology.residues()):
if resid in self.water_resids:
for atom in residue.atoms():
water_atom_ids.append(atom.index)
# Copy all steric interactions into the custom force, and remove them from the original force
for atom_idx in range(self.nonbonded_force.getNumParticles()):
# Get atom parameters
[charge, sigma, epsilon] = self.nonbonded_force.getParticleParameters(atom_idx)
# Make sure that sigma is not equal to zero
if np.isclose(sigma._value, 0.0):
sigma = 1.0 * unit.angstrom
# Add particle to the custom force (with lambda=1 for now)
custom_sterics.addParticle([sigma, epsilon, 1.0])
# Disable steric interactions in the original force by setting epsilon=0 (keep the charges for PME purposes)
self.nonbonded_force.setParticleParameters(atom_idx, charge, sigma, abs(0))
# Copy over all exceptions into the new force as exclusions
# Exceptions between non-water atoms will be excluded here, and handled by the NonbondedForce
# If exceptions (other than ignored interactions) are found involving water atoms, we have a problem
for exception_idx in range(self.nonbonded_force.getNumExceptions()):
[i, j, chargeprod, sigma, epsilon] = self.nonbonded_force.getExceptionParameters(exception_idx)
# If epsilon is greater than zero, this is a non-zero exception, which must be checked
if epsilon > 0.0 * unit.kilojoule_per_mole:
if i in water_atom_ids or j in water_atom_ids:
self.raiseError("Non-zero exception interaction found involving water atoms ({} & {}). grand is"
" not currently able to support this".format(i, j))
# Add this to the list of exclusions
custom_sterics.addExclusion(i, j)
# Add the custom force to the system
self.system.addForce(custom_sterics)
self.custom_nb_force = custom_sterics
return None
def reset(self):
"""
Reset counted values (such as number of total or accepted moves) to zero
"""
self.logger.info('Resetting any tracked variables...')
self.n_accepted = 0
self.n_moves = 0
self.Ns = []
self.acceptance_probabilities = []
return None
def getWaterParameters(self, water_resname="HOH"):
"""
Get the non-bonded parameters for each of the atoms in the water model used
Parameters
----------
water_resname : str
Name of the water residues
Returns
-------
wat_params : list
List of dictionaries containing the charge, sigma and epsilon for each water atom
"""
wat_params = [] # Store parameters in a list
for residue in self.topology.residues():
if residue.name == water_resname:
for atom in residue.atoms():
# Store the parameters of each atom
atom_params = self.nonbonded_force.getParticleParameters(atom.index)
wat_params.append({'charge' : atom_params[0],
'sigma' : atom_params[1],
'epsilon' : atom_params[2]})
break # Don't need to continue past the first instance
return wat_params
def getWaterResids(self, water_resname="HOH"):
"""
Get the residue IDs of all water molecules in the system
Parameters
----------
water_resname : str
Name of the water residues
Returns
-------
resid_list : list
List of residue ID numbers
"""
resid_list = []
for resid, residue in enumerate(self.topology.residues()):
if residue.name == water_resname:
resid_list.append(resid)
return resid_list
def setWaterStatus(self, resid, new_value):
"""
Set the status of a perticular water to a particular value
Parameters
----------
resid : int
Residue to update the status for
new_value : int
New value of the water status. 0: ghost, 1: GCMC water, 2: Non-tracked water
"""
# Index of this residue
wat_idx = np.where(np.array(self.water_resids) == resid)[0]
self.water_status[wat_idx] = new_value
return None
def getWaterStatusResids(self, value):
"""
Get a list of resids which have a particular status value
Parameters
----------
value : int
Value of the water status. 0: ghost, 1: GCMC water, 2: Non-tracked water
Returns
-------
resids : numpy.array
List of residues which match that status
"""
resids = [self.water_resids[idx] for idx in np.where(self.water_status == value)[0]]
return resids
def getWaterStatusValue(self, resid):
"""
Get the status value of a particular resid
Parameters
----------
resid : int
Residue to update the status for
Returns
-------
value : int
Value of the water status. 0: ghost, 1: GCMC water, 2: Non-tracked water
"""
wat_idx = np.where(np.array(self.water_resids) == resid)[0]
value = self.water_status[wat_idx]
return value
def deleteGhostWaters(self, ghostResids=None, ghostFile=None):
"""
Switch off nonbonded interactions involving the ghost molecules initially added
This function should be executed before beginning the simulation, to prevent any
explosions.
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
ghostResids : list
List of residue IDs corresponding to the ghost waters added
ghostFile : str
File containing residue IDs of ghost waters. Will switch off those on the
last line. This will be useful in restarting simulations
Returns
-------
context : simtk.openmm.Context
Updated context, with ghost waters switched off
"""
# Get a list of all ghost residue IDs supplied from list and file
ghost_resids = []
# Read in list
if ghostResids is not None:
for resid in ghostResids:
ghost_resids.append(resid)
# Read residues from file if needed
if ghostFile is not None:
with open(ghostFile, 'r') as f:
lines = f.readlines()
for resid in lines[-1].split(","):
ghost_resids.append(int(resid))
# Switch off the interactions involving ghost waters
for resid, residue in enumerate(self.topology.residues()):
if resid in ghost_resids:
# Switch off nonbonded interactions involving this water
atom_ids = []
for i, atom in enumerate(residue.atoms()):
atom_ids.append(atom.index)
self.adjustSpecificWater(atom_ids, 0.0)
# Mark that this water has been switched off
self.setWaterStatus(resid, 0)
# Calculate N
self.N = len(self.getWaterStatusResids(1))
return None
def adjustSpecificWater(self, atoms, new_lambda):
"""
Adjust the coupling of a specific water molecule, by adjusting the lambda value
Parameters
----------
atoms : list
List of the atom indices of the water to be adjusted
new_lambda : float
Value to set lambda to for this particle
"""
# Get lambda values
lambda_vdw, lambda_ele = get_lambda_values(new_lambda)
# Loop over parameters
for i, atom_idx in enumerate(atoms):
# Obtain original parameters
atom_params = self.water_params[i]
# Update charge in NonbondedForce
self.nonbonded_force.setParticleParameters(atom_idx,
charge=(lambda_ele * atom_params["charge"]),
sigma=atom_params["sigma"],
epsilon=abs(0.0))
# Update lambda in CustomNonbondedForce
self.custom_nb_force.setParticleParameters(atom_idx,
[atom_params["sigma"], atom_params["epsilon"], lambda_vdw])
# Update context with new parameters
self.nonbonded_force.updateParametersInContext(self.context)
self.custom_nb_force.updateParametersInContext(self.context)
return None
def report(self, simulation):
"""
Function to report any useful data
Parameters
----------
simulation : simtk.openmm.app.Simulation
Simulation object being used
"""
# Get state
state = simulation.context.getState(getPositions=True, getVelocities=True)
# Calculate rounded acceptance rate and mean N
if self.n_moves > 0:
acc_rate = np.round(self.n_accepted * 100.0 / self.n_moves, 4)
else:
acc_rate = np.nan
mean_N = np.round(np.mean(self.Ns), 4)
# Print out a line describing the acceptance rate and sampling of N
msg = "{} move(s) completed ({} accepted ({:.4f} %)). Current N = {}. Average N = {:.3f}".format(self.n_moves,
self.n_accepted,
acc_rate,
self.N,
mean_N)
print(msg)
self.logger.info(msg)
# Write to the file describing which waters are ghosts through the trajectory
self.writeGhostWaterResids()
# Append to the DCD and update the restart file
if self.dcd is not None:
self.dcd.report(simulation, state)
if self.restart is not None:
self.restart.report(simulation, state)
return None
def raiseError(self, error_msg):
"""
Make it nice and easy to report an error in a consisent way - also easier to manage error handling in future
Parameters
----------
error_msg : str
Message describing the error
"""
# Write to the log file
self.logger.error(error_msg)
# Raise an Exception
self.logger.error(error_msg)
return None
def writeGhostWaterResids(self):
"""
Write out a comma-separated list of the residue IDs of waters which are
non-interacting, so that they can be removed from visualisations. It is important
to execute this function when writing to trajectory files, so that each line
in the ghost water file corresponds to a frame in the trajectory
"""
# Need to write this function
with open(self.ghost_file, 'a') as f:
ghost_resids = self.getWaterStatusResids(0)
if len(ghost_resids) > 0:
f.write("{}".format(ghost_resids[0]))
if len(ghost_resids) > 1:
for resid in ghost_resids[1:]:
f.write(",{}".format(resid))
f.write("\n")
return None
def move(self, context, n=1):
"""
Returns an error if someone attempts to execute a move with the parent object
Parameters are designed to match the signature of the inheriting classes
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
n : int
Number of moves to execute
"""
error_msg = ("GrandCanonicalMonteCarloSampler is not designed to sample!")
self.logger.error(error_msg)
raise NotImplementedError(error_msg)
########################################################################################################################
########################################################################################################################
########################################################################################################################
class GCMCSphereSampler(BaseGrandCanonicalMonteCarloSampler):
"""
Base class for carrying out GCMC moves in OpenMM, using a GCMC sphere to sample the system
"""
def __init__(self, system, topology, temperature, adams=None,
excessChemicalPotential=-6.09*unit.kilocalories_per_mole,
standardVolume=30.345*unit.angstroms**3, adamsShift=0.0,
ghostFile="gcmc-ghost-wats.txt", referenceAtoms=None, sphereRadius=None, sphereCentre=None,
log='gcmc.log', dcd=None, rst=None, overwrite=False):
"""
Initialise the object to be used for sampling water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
referenceAtoms : list
List containing dictionaries describing the atoms to use as the centre of the GCMC region
Must contain 'name' and 'resname' as keys, and optionally 'resid' (recommended) and 'chain'
e.g. [{'name': 'C1', 'resname': 'LIG', 'resid': '123'}]
sphereRadius : simtk.unit.Quantity
Radius of the spherical GCMC region
sphereCentre : simtk.unit.Quantity
Coordinates around which the GCMC sphere is based
log : str
Log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Overwrite any data already present
"""
# Initialise base
BaseGrandCanonicalMonteCarloSampler.__init__(self, system, topology, temperature, ghostFile=ghostFile,
log=log, dcd=dcd, rst=rst,
overwrite=overwrite)
# Initialise variables specific to the GCMC sphere
self.sphere_radius = sphereRadius
self.sphere_centre = None
volume = (4 * np.pi * sphereRadius ** 3) / 3
if referenceAtoms is not None:
# Define sphere based on reference atoms
self.ref_atoms = self.getReferenceAtomIndices(referenceAtoms)
self.logger.info("GCMC sphere is based on reference atom IDs: {}".format(self.ref_atoms))
elif sphereCentre is not None:
# Define sphere based on coordinates
assert len(sphereCentre) == 3, "Sphere coordinates must be 3D"
self.sphere_centre = sphereCentre
self.ref_atoms = None
self.logger.info("GCMC sphere is fixed in space and centred on {}".format(self.sphere_centre))
else:
self.raiseError("A set of atoms or coordinates must be used to define the centre of the sphere!")
self.logger.info("GCMC sphere radius is {}".format(self.sphere_radius))
# Set or calculate the Adams value for the simulation
if adams is not None:
self.B = adams
else:
# Calculate Bequil from the chemical potential and volume
self.B = excessChemicalPotential / self.kT + math.log(volume / standardVolume)
# Shift B from Bequil if necessary
self.B += adamsShift
self.logger.info("Simulating at an Adams (B) value of {}".format(self.B))
self.logger.info("GCMCSphereSampler object initialised")
def getReferenceAtomIndices(self, ref_atoms):
"""
Get the index of the atom used to define the centre of the GCMC box
Parameters
----------
ref_atoms : list
List of dictionaries containing the atom name, residue name and (optionally) residue ID and chain,
as marked by keys 'name', 'resname', 'resid' and 'chain'
Returns
-------
atom_indices : list
Indices of the atoms chosen
"""
atom_indices = []
# Convert to list of lists, if not already
if not all(type(x) == dict for x in ref_atoms):
self.raiseError("Reference atoms must be a list of dictionaries! {}".format(ref_atoms))
# Find atom index for each of the atoms used
for atom_dict in ref_atoms:
found = False # Checks if the atom has been found
# Read in atom data
name = atom_dict['name']
resname = atom_dict['resname']
# Residue ID and chain may not be present
try:
resid = atom_dict['resid']
except:
resid = None
try:
chain = atom_dict['chain']
except:
chain = None
# Loop over all atoms to find one which matches these criteria
for c, chain_obj in enumerate(self.topology.chains()):
# Check chain, if specified
if chain is not None:
if c != chain:
continue
for residue in chain_obj.residues():
# Check residue name
if residue.name != resname:
continue
# Check residue ID, if specified
if resid is not None:
if residue.id != resid:
continue
# Loop over all atoms in this residue to find the one with the right name
for atom in residue.atoms():
if atom.name == name:
atom_indices.append(atom.index)
found = True
if not found:
self.raiseError("Atom {} of residue {}{} not found!".format(atom_dict['name'],
atom_dict['resname'].capitalize(),
atom_dict['resid']))
if len(atom_indices) == 0:
self.raiseError("No GCMC reference atoms found")
return atom_indices
def getSphereCentre(self):
"""
Update the coordinates of the sphere centre
Need to make sure it isn't affected by the reference atoms being split across PBCs
"""
if self.ref_atoms is None:
self.raiseError("No reference atoms defined, cannot get sphere coordinates...")
# Calculate the mean coordinate
self.sphere_centre = np.zeros(3) * unit.nanometers
for i, atom in enumerate(self.ref_atoms):
# Need to add on a correction in case the atoms get separated
correction = np.zeros(3) * unit.nanometers
if i != 0:
# Vector from the first reference atom
vec = self.positions[self.ref_atoms[0]] - self.positions[atom]
# Correct for PBCs
for j in range(3):
if vec[j] > 0.5 * self.simulation_box[j]:
correction[j] = self.simulation_box[j]
elif vec[j] < -0.5 * self.simulation_box[j]:
correction[j] = -self.simulation_box[j]
# Add vector and correction onto the running sum
self.sphere_centre += self.positions[atom] + correction
# Calculate the average coordinate
self.sphere_centre /= len(self.ref_atoms)
return None
def initialise(self, context, ghostResids=[]):
"""
Prepare the GCMC sphere for simulation by loading the coordinates from a
Context object.
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
ghostResids : list
List of residue IDs corresponding to the ghost waters added
"""
# Load context into sampler
self.context = context
# Load in positions and box vectors from context
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
box_vectors = state.getPeriodicBoxVectors(asNumpy=True)
# Check the symmetry of the box - currently only tolerate cuboidal boxes
# All off-diagonal box vector components must be zero
for i in range(3):
for j in range(3):
if i == j:
continue
if not np.isclose(box_vectors[i, j]._value, 0.0):
self.raiseError("grand only accepts cuboidal simulation cells at this time.")
self.simulation_box = np.array([box_vectors[0, 0]._value,
box_vectors[1, 1]._value,
box_vectors[2, 2]._value]) * unit.nanometer
# Check size of the GCMC sphere, relative to the box
for i in range(3):
if self.sphere_radius > 0.5 * self.simulation_box[i]:
self.raiseError("GCMC sphere radius cannot be larger than half a box length.")
# Calculate the centre of the GCMC sphere, if using reference atoms
if self.ref_atoms is not None:
self.getSphereCentre()
# Loop over waters and check which are in/out of the GCMC sphere at the beginning - may be able to replace this with updateGCMCSphere?
for resid, residue in enumerate(self.topology.residues()):
if resid not in self.water_resids:
continue
for atom in residue.atoms():
ox_index = atom.index
break
vector = self.positions[ox_index] - self.sphere_centre
# Correct PBCs of this vector - need to make this part cleaner
for i in range(3):
if vector[i] >= 0.5 * self.simulation_box[i]:
vector[i] -= self.simulation_box[i]
elif vector[i] <= -0.5 * self.simulation_box[i]:
vector[i] += self.simulation_box[i]
# Set the status of this water as appropriate
if np.linalg.norm(vector) * unit.nanometer <= self.sphere_radius:
self.setWaterStatus(resid, 1)
else:
self.setWaterStatus(resid, 2)
# Delete ghost waters
if len(ghostResids) > 0:
self.deleteGhostWaters(ghostResids)
return None
def deleteWatersInGCMCSphere(self):
"""
Function to delete all of the waters currently present in the GCMC region
This may be useful the plan is to generate a water distribution for this
region from scratch. If so, it would be recommended to interleave the GCMC
sampling with coordinate propagation, as this will converge faster.
Parameters
----------
context : simtk.openmm.Context
Current context of the system. Only needs to be supplied if the context
has changed since the last update
Returns
-------
context : simtk.openmm.Context
Updated context after deleting the relevant waters
"""
# Read in positions of the context and update GCMC box
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
# Loop over all residues to find those of interest
for resid, residue in enumerate(self.topology.residues()):
# Make sure this is a water
if resid not in self.water_resids:
continue
# Make sure this is a GCMC water
if self.getWaterStatusValue(resid) != 1:
continue
# Get atom IDs
atom_ids = []
for atom in residue.atoms():
atom_ids.append(atom.index)
# Switch off interactions involving the atoms of this residue
self.adjustSpecificWater(atom_ids, 0.0)
# Update relevant parameters
self.setWaterStatus(resid, 0)
self.N -= 1
return None
def updateGCMCSphere(self, state):
"""
Update the relevant GCMC-sphere related parameters. This also involves monitoring
which water molecules are in/out of the region
Parameters
----------
state : simtk.openmm.State
Current State
"""
# Make sure the positions are definitely updated
self.positions = deepcopy(state.getPositions(asNumpy=True))
# Get the sphere centre, if using reference atoms, otherwise this will be fine
if self.ref_atoms is not None:
self.getSphereCentre()
box_vectors = state.getPeriodicBoxVectors(asNumpy=True)
self.simulation_box = np.array([box_vectors[0, 0]._value,
box_vectors[1, 1]._value,
box_vectors[2, 2]._value]) * unit.nanometer
# Check which waters are in the GCMC region
for resid, residue in enumerate(self.topology.residues()):
# Make sure this is a water
if resid not in self.water_resids:
continue
# Get oxygen atom ID
for atom in residue.atoms():
ox_index = atom.index
break
# Ghost waters automatically count as GCMC waters
if self.getWaterStatusValue(resid) == 0:
continue
# Check if the water is within the sphere
vector = self.positions[ox_index] - self.sphere_centre
# Correct PBCs of this vector - need to make this part cleaner
for i in range(3):
if vector[i] >= 0.5 * self.simulation_box[i]:
vector[i] -= self.simulation_box[i]
elif vector[i] <= -0.5 * self.simulation_box[i]:
vector[i] += self.simulation_box[i]
# Set the status of this water as appropriate
if np.linalg.norm(vector) * unit.nanometer <= self.sphere_radius:
self.setWaterStatus(resid, 1)
else:
self.setWaterStatus(resid, 2)
# Update lists
self.N = len(self.getWaterStatusResids(1))
return None
def insertRandomWater(self):
"""
Translate a random ghost to a random point in the GCMC sphere to allow subsequent insertion
Returns
-------
new_positions : simtk.unit.Quantity
Positions following the 'insertion' of the ghost water
insert_water : int
Residue ID of the water to insert
atom_indices : list
List of the atom IDs for this molecule
"""
# Select a ghost water to insert
ghost_wats = self.getWaterStatusResids(0)
# Check that there are any ghosts present
if len(ghost_wats) == 0:
self.raiseError("No ghost water molecules left, so insertion moves cannot occur - add more ghost waters")
insert_water = np.random.choice(ghost_wats)
atom_indices = []
for resid, residue in enumerate(self.topology.residues()):
if resid == insert_water:
for atom in residue.atoms():
atom_indices.append(atom.index)
# Select a point to insert the water (based on O position)
rand_nums = np.random.randn(3)
insert_point = self.sphere_centre + (
self.sphere_radius * np.power(np.random.rand(), 1.0 / 3) * rand_nums) / np.linalg.norm(rand_nums)
# Generate a random rotation matrix
R = random_rotation_matrix()
new_positions = deepcopy(self.positions)
for i, index in enumerate(atom_indices):
# Translate coordinates to an origin defined by the oxygen atom, and normalise
atom_position = self.positions[index] - self.positions[atom_indices[0]]
# Rotate about the oxygen position
if i != 0:
vec_length = np.linalg.norm(atom_position)
atom_position = atom_position / vec_length
# Rotate coordinates & restore length
atom_position = vec_length * np.dot(R, atom_position) * unit.nanometer
# Translate to new position
new_positions[index] = atom_position + insert_point
return new_positions, insert_water, atom_indices
def deleteRandomWater(self):
"""
Choose a random water to be deleted
Returns
-------
delete_water : int
Resid of the water to delete
atom_indices : list
List of the atom IDs for this molecule
"""
# Cannot carry out deletion if there are no GCMC waters on
gcmc_wats = self.getWaterStatusResids(1)
if len(gcmc_wats) == 0:
return None, None
# Select a water residue to delete
delete_water = np.random.choice(gcmc_wats)
# Get atom indices
atom_indices = []
for resid, residue in enumerate(self.topology.residues()):
if resid == delete_water:
for atom in residue.atoms():
atom_indices.append(atom.index)
return delete_water, atom_indices
def report(self, simulation):
"""
Function to report any useful data
Parameters
----------
simulation : simtk.openmm.app.Simulation
Simulation object being used
"""
# Get state
state = simulation.context.getState(getPositions=True, getVelocities=True)
# Update GCMC sphere
self.updateGCMCSphere(state)
# Calculate rounded acceptance rate and mean N
if self.n_moves > 0:
acc_rate = np.round(self.n_accepted * 100.0 / self.n_moves, 4)
else:
acc_rate = np.nan
mean_N = np.round(np.mean(self.Ns), 4)
# Print out a line describing the acceptance rate and sampling of N
msg = "{} move(s) completed ({} accepted ({:.4f} %)). Current N = {}. Average N = {:.3f}".format(self.n_moves,
self.n_accepted,
acc_rate,
self.N,
mean_N)
print(msg)
self.logger.info(msg)
# Write to the file describing which waters are ghosts through the trajectory
self.writeGhostWaterResids()
# Append to the DCD and update the restart file
if self.dcd is not None:
self.dcd.report(simulation, state)
if self.restart is not None:
self.restart.report(simulation, state)
return None
########################################################################################################################
class StandardGCMCSphereSampler(GCMCSphereSampler):
"""
Class to carry out instantaneous GCMC moves in OpenMM
"""
def __init__(self, system, topology, temperature, adams=None, excessChemicalPotential=-6.09*unit.kilocalories_per_mole,
standardVolume=30.345*unit.angstroms**3, adamsShift=0.0, ghostFile="gcmc-ghost-wats.txt",
referenceAtoms=None, sphereRadius=None, sphereCentre=None, log='gcmc.log', dcd=None, rst=None,
overwrite=False):
"""
Initialise the object to be used for sampling instantaneous water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
referenceAtoms : list
List containing dictionaries describing the atoms to use as the centre of the GCMC region
Must contain 'name' and 'resname' as keys, and optionally 'resid' (recommended) and 'chain'
e.g. [{'name': 'C1', 'resname': 'LIG', 'resid': '123'}]
sphereRadius : simtk.unit.Quantity
Radius of the spherical GCMC region
sphereCentre : simtk.unit.Quantity
Coordinates around which the GCMC sphere is based
log : str
Name of the log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Indicates whether to overwrite already existing data
"""
# Initialise base class - don't need any more initialisation for the instantaneous sampler
GCMCSphereSampler.__init__(self, system, topology, temperature, adams=adams,
excessChemicalPotential=excessChemicalPotential, standardVolume=standardVolume,
adamsShift=adamsShift, ghostFile=ghostFile, referenceAtoms=referenceAtoms,
sphereRadius=sphereRadius, sphereCentre=sphereCentre, log=log, dcd=dcd, rst=rst,
overwrite=overwrite)
self.energy = None # Need to save energy
self.logger.info("StandardGCMCSphereSampler object initialised")
def move(self, context, n=1):
"""
Execute a number of GCMC moves on the current system
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
n : int
Number of moves to execute
"""
# Read in positions
self.context = context
state = self.context.getState(getPositions=True, enforcePeriodicBox=True, getEnergy=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
self.energy = state.getPotentialEnergy()
# Update GCMC region based on current state
self.updateGCMCSphere(state)
# Check change in N
if len(self.Ns) > 0:
dN = self.N - self.Ns[-1]
if abs(dN) > 0:
self.logger.info('Change in N of {:+} between GCMC batches'.format(dN))
# Execute moves
for i in range(n):
# Insert or delete a water, based on random choice
if np.random.randint(2) == 1:
# Attempt to insert a water
self.insertionMove()
else:
# Attempt to delete a water
self.deletionMove()
self.n_moves += 1
self.Ns.append(self.N)
return None
def insertionMove(self):
"""
Carry out a random water insertion move on the current system
"""
# Choose a random site in the sphere to insert a water
new_positions, resid, atom_indices = self.insertRandomWater()
# Recouple this water
self.adjustSpecificWater(atom_indices, 1.0)
self.context.setPositions(new_positions)
# Calculate new system energy and acceptance probability
final_energy = self.context.getState(getEnergy=True).getPotentialEnergy()
acc_prob = math.exp(self.B) * math.exp(-(final_energy - self.energy) / self.kT) / (self.N + 1)
self.acceptance_probabilities.append(acc_prob)
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Need to revert the changes made if the move is to be rejected
# Switch off nonbonded interactions involving this water
self.adjustSpecificWater(atom_indices, 0.0)
self.context.setPositions(self.positions)
else:
# Update some variables if move is accepted
self.positions = deepcopy(new_positions)
self.setWaterStatus(resid, 1)
self.N += 1
self.n_accepted += 1
# Update energy
self.energy = final_energy
return None
def deletionMove(self):
"""
Carry out a random water deletion move on the current system
"""
# Choose a random water in the sphere to be deleted
resid, atom_indices = self.deleteRandomWater()
# Deletion may not be possible
if resid is None:
return None
# Switch water off
self.adjustSpecificWater(atom_indices, 0.0)
# Calculate energy of new state and acceptance probability
final_energy = self.context.getState(getEnergy=True).getPotentialEnergy()
acc_prob = self.N * math.exp(-self.B) * math.exp(-(final_energy - self.energy) / self.kT)
self.acceptance_probabilities.append(acc_prob)
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Switch the water back on if the move is rejected
self.adjustSpecificWater(atom_indices, 1.0)
else:
# Update some variables if move is accepted
self.setWaterStatus(resid, 0)
self.N -= 1
self.n_accepted += 1
# Update energy
self.energy = final_energy
return None
########################################################################################################################
class NonequilibriumGCMCSphereSampler(GCMCSphereSampler):
"""
Class to carry out GCMC moves in OpenMM, using nonequilibrium candidate Monte Carlo (NCMC)
to boost acceptance rates
"""
def __init__(self, system, topology, temperature, integrator, adams=None,
excessChemicalPotential=-6.09*unit.kilocalories_per_mole, standardVolume=30.345*unit.angstroms**3,
adamsShift=0.0, nPertSteps=1, nPropStepsPerPert=1, timeStep=2 * unit.femtoseconds, lambdas=None,
ghostFile="gcmc-ghost-wats.txt", referenceAtoms=None, sphereRadius=None, sphereCentre=None,
log='gcmc.log', dcd=None, rst=None, overwrite=False):
"""
Initialise the object to be used for sampling NCMC-enhanced water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
integrator : simtk.openmm.CustomIntegrator
Integrator to use to propagate the dynamics of the system. Currently want to make sure that this
is the customised Langevin integrator found in openmmtools which uses BAOAB (VRORV) splitting.
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
nPertSteps : int
Number of pertubation steps over which to shift lambda between 0 and 1 (or vice versa).
nPropStepsPerPert : int
Number of propagation steps to carry out for
timeStep : simtk.unit.Quantity
Time step to use for non-equilibrium integration during the propagation steps
lambdas : list
Series of lambda values corresponding to the pathway over which the molecules are perturbed
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
referenceAtoms : list
List containing dictionaries describing the atoms to use as the centre of the GCMC region
Must contain 'name' and 'resname' as keys, and optionally 'resid' (recommended) and 'chain'
e.g. [{'name': 'C1', 'resname': 'LIG', 'resid': '123'}]
sphereRadius : simtk.unit.Quantity
Radius of the spherical GCMC region
sphereCentre : simtk.unit.Quantity
Coordinates around which the GCMC sphere is based
log : str
Name of the log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Indicates whether to overwrite already existing data
"""
# Initialise base class
GCMCSphereSampler.__init__(self, system, topology, temperature, adams=adams,
excessChemicalPotential=excessChemicalPotential, standardVolume=standardVolume,
adamsShift=adamsShift, ghostFile=ghostFile, referenceAtoms=referenceAtoms,
sphereRadius=sphereRadius, sphereCentre=sphereCentre, log=log, dcd=dcd, rst=rst,
overwrite=overwrite)
self.velocities = None # Need to store velocities for this type of sampling
# Load in extra NCMC variables
if lambdas is not None:
# Read in set of lambda values, if specified
assert np.isclose(lambdas[0], 0.0) and np.isclose(lambdas[-1], 1.0), "Lambda series must start at 0 and end at 1"
self.lambdas = lambdas
self.n_pert_steps = len(self.lambdas) - 1
else:
# Otherwise, assume they are evenly distributed
self.n_pert_steps = nPertSteps
self.lambdas = np.linspace(0.0, 1.0, self.n_pert_steps + 1)
self.n_pert_steps = nPertSteps
self.n_prop_steps_per_pert = nPropStepsPerPert
self.time_step = timeStep.in_units_of(unit.picosecond)
self.protocol_time = (self.n_pert_steps + 1) * self.n_prop_steps_per_pert * self.time_step
self.logger.info("Each NCMC move will be executed over a total of {}".format(self.protocol_time))
self.insert_works = [] # Store work values of moves
self.delete_works = []
self.n_explosions = 0
self.n_left_sphere = 0 # Number of moves rejected because the water left the sphere
# Define a compound integrator
self.compound_integrator = openmm.CompoundIntegrator()
# Add the MD integrator
self.compound_integrator.addIntegrator(integrator)
# Create and add the nonequilibrium integrator
self.ncmc_integrator = NonequilibriumLangevinIntegrator(temperature=temperature,
collision_rate=1.0/unit.picosecond,
timestep=self.time_step, splitting="V R O R V")
self.compound_integrator.addIntegrator(self.ncmc_integrator)
# Set the compound integrator to the MD integrator
self.compound_integrator.setCurrentIntegrator(0)
self.logger.info("NonequilibriumGCMCSphereSampler object initialised")
def move(self, context, n=1):
"""
Carry out a nonequilibrium GCMC move
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
n : int
Number of moves to execute
"""
# Read in positions
self.context = context
state = self.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
self.velocities = deepcopy(state.getVelocities(asNumpy=True))
# Update GCMC region based on current state
self.updateGCMCSphere(state)
# Set to NCMC integrator
self.compound_integrator.setCurrentIntegrator(1)
# Execute moves
for i in range(n):
# Insert or delete a water, based on random choice
if np.random.randint(2) == 1:
# Attempt to insert a water
self.insertionMove()
else:
# Attempt to delete a water
self.deletionMove()
self.n_moves += 1
self.Ns.append(self.N)
# Set to MD integrator
self.compound_integrator.setCurrentIntegrator(0)
return None
def insertionMove(self):
"""
Carry out a nonequilibrium insertion move for a random water molecule
"""
# Store initial positions
old_positions = deepcopy(self.positions)
# Choose a random site in the sphere to insert a water
new_positions, resid, atom_indices = self.insertRandomWater()
# Need to update the context positions
self.context.setPositions(new_positions)
# Start running perturbation and propagation kernels
protocol_work = 0.0 * unit.kilocalories_per_mole
explosion = False
self.ncmc_integrator.step(self.n_prop_steps_per_pert)
for i in range(self.n_pert_steps):
state = self.context.getState(getEnergy=True)
energy_initial = state.getPotentialEnergy()
# Adjust interactions of this water
self.adjustSpecificWater(atom_indices, self.lambdas[i+1])
state = self.context.getState(getEnergy=True)
energy_final = state.getPotentialEnergy()
protocol_work += energy_final - energy_initial
# Propagate the system
try:
self.ncmc_integrator.step(self.n_prop_steps_per_pert)
except:
print("Caught explosion!")
explosion = True
self.n_explosions += 1
break
# Store the protocol work
self.insert_works.append(protocol_work)
# Update variables and GCMC sphere
self.setWaterStatus(resid, 1)
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.positions = state.getPositions(asNumpy=True)
self.updateGCMCSphere(state)
# Check which waters are in the sphere
wats_in_sphere = self.getWaterStatusResids(1)
# Calculate acceptance probability
if resid not in wats_in_sphere:
# If the inserted water leaves the sphere, the move cannot be reversed and therefore cannot be accepted
acc_prob = -1
self.n_left_sphere += 1
self.logger.info("Move rejected due to water leaving the GCMC sphere")
elif explosion:
acc_prob = -1
self.logger.info("Move rejected due to an instability during integration")
else:
# Calculate acceptance probability based on protocol work
acc_prob = math.exp(self.B) * math.exp(-protocol_work/self.kT) / self.N # Here N is the new value
self.acceptance_probabilities.append(acc_prob)
# Update or reset the system, depending on whether the move is accepted or rejected
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Need to revert the changes made if the move is to be rejected
self.adjustSpecificWater(atom_indices, 0.0)
self.context.setPositions(old_positions)
self.context.setVelocities(-self.velocities) # Reverse velocities on rejection
self.positions = deepcopy(old_positions)
self.velocities = -self.velocities
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.setWaterStatus(resid, 0)
self.updateGCMCSphere(state)
else:
# Update some variables if move is accepted
self.N = len(wats_in_sphere)
self.n_accepted += 1
state = self.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
self.velocities = deepcopy(state.getVelocities(asNumpy=True))
self.updateGCMCSphere(state)
return None
def deletionMove(self):
"""
Carry out a nonequilibrium deletion move for a random water molecule
"""
# Store initial positions
old_positions = deepcopy(self.positions)
# Choose a random water in the sphere to be deleted
resid, atom_indices = self.deleteRandomWater()
# Deletion may not be possible
if resid is None:
return None
# Start running perturbation and propagation kernels
protocol_work = 0.0 * unit.kilocalories_per_mole
explosion = False
self.ncmc_integrator.step(self.n_prop_steps_per_pert)
for i in range(self.n_pert_steps):
state = self.context.getState(getEnergy=True)
energy_initial = state.getPotentialEnergy()
# Adjust interactions of this water
self.adjustSpecificWater(atom_indices, self.lambdas[-(2+i)])
state = self.context.getState(getEnergy=True)
energy_final = state.getPotentialEnergy()
protocol_work += energy_final - energy_initial
# Propagate the system
try:
self.ncmc_integrator.step(self.n_prop_steps_per_pert)
except:
print("Caught explosion!")
explosion = True
self.n_explosions += 1
break
# Get the protocol work
self.delete_works.append(protocol_work)
# Update variables and GCMC sphere
# Leaving the water as 'on' here to check that the deleted water doesn't leave
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.positions = state.getPositions(asNumpy=True)
old_N = self.N
self.updateGCMCSphere(state)
# Check which waters are in the sphere
wats_in_sphere = self.getWaterStatusResids(1)
# Calculate acceptance probability
if resid not in wats_in_sphere:
# If the deleted water leaves the sphere, the move cannot be reversed and therefore cannot be accepted
acc_prob = 0
self.n_left_sphere += 1
self.logger.info("Move rejected due to water leaving the GCMC sphere")
elif explosion:
acc_prob = 0
self.logger.info("Move rejected due to an instability during integration")
else:
# Calculate acceptance probability based on protocol work
acc_prob = old_N * math.exp(-self.B) * math.exp(-protocol_work/self.kT) # N is the old value
self.acceptance_probabilities.append(acc_prob)
# Update or reset the system, depending on whether the move is accepted or rejected
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Need to revert the changes made if the move is to be rejected
self.adjustSpecificWater(atom_indices, 1.0)
self.context.setPositions(old_positions)
self.context.setVelocities(-self.velocities) # Reverse velocities on rejection
self.positions = deepcopy(old_positions)
self.velocities = -self.velocities
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.updateGCMCSphere(state)
else:
# Update some variables if move is accepted
self.setWaterStatus(resid, 0)
self.N = len(wats_in_sphere) - 1 # Accounting for the deleted water
self.n_accepted += 1
state = self.context.getState(getPositions=True, enforcePeriodicBox=True, getVelocities=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
self.velocities = deepcopy(state.getVelocities(asNumpy=True))
self.updateGCMCSphere(state)
return None
def reset(self):
"""
Reset counted values (such as number of total or accepted moves) to zero
"""
self.logger.info('Resetting any tracked variables...')
self.n_accepted = 0
self.n_moves = 0
self.Ns = []
self.acceptance_probabilities = []
# NCMC-specific variables
self.insert_works = []
self.delete_works = []
self.n_explosions = 0
self.n_left_sphere = 0
return None
########################################################################################################################
########################################################################################################################
########################################################################################################################
class GCMCSystemSampler(BaseGrandCanonicalMonteCarloSampler):
"""
Base class for carrying out GCMC moves in OpenMM, sampling the whole system with GCMC
"""
def __init__(self, system, topology, temperature, adams=None,
excessChemicalPotential=-6.09*unit.kilocalories_per_mole,
standardVolume=30.345*unit.angstroms**3, adamsShift=0.0, boxVectors=None,
ghostFile="gcmc-ghost-wats.txt", log='gcmc.log', dcd=None, rst=None, overwrite=False):
"""
Initialise the object to be used for sampling water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
boxVectors : simtk.unit.Quantity
Box vectors for the simulation cell
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
log : str
Log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Overwrite any data already present
"""
# Initialise base
BaseGrandCanonicalMonteCarloSampler.__init__(self, system, topology, temperature, ghostFile=ghostFile, log=log,
dcd=dcd, rst=rst, overwrite=overwrite)
# Read in simulation box lengths
self.simulation_box = np.array([boxVectors[0, 0]._value,
boxVectors[1, 1]._value,
boxVectors[2, 2]._value]) * unit.nanometer
volume = self.simulation_box[0] * self.simulation_box[1] * self.simulation_box[2]
# Set or calculate the Adams value for the simulation
if adams is not None:
self.B = adams
else:
# Calculate Bequil from the chemical potential and volume
self.B = excessChemicalPotential / self.kT + math.log(volume / standardVolume)
# Shift B from Bequil if necessary
self.B += adamsShift
self.logger.info("Simulating at an Adams (B) value of {}".format(self.B))
self.logger.info("GCMCSystemSampler object initialised")
def initialise(self, context, ghostResids):
"""
Prepare the GCMC sphere for simulation by loading the coordinates from a
Context object.
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
ghostResids : list
List of residue IDs corresponding to the ghost waters added
"""
# Load context into sampler
self.context = context
# Load in positions and box vectors from context
state = self.context.getState(getPositions=True, enforcePeriodicBox=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
box_vectors = state.getPeriodicBoxVectors(asNumpy=True)
# Check the symmetry of the box - currently only tolerate cuboidal boxes
# All off-diagonal box vector components must be zero
for i in range(3):
for j in range(3):
if i == j:
continue
if not np.isclose(box_vectors[i, j]._value, 0.0):
self.raiseError("grand only accepts cuboidal simulation cells at this time.")
self.simulation_box = np.array([box_vectors[0, 0]._value,
box_vectors[1, 1]._value,
box_vectors[2, 2]._value]) * unit.nanometer
# Delete ghost waters
self.deleteGhostWaters(ghostResids)
# Count N
self.N = np.sum(self.water_status)
return None
def insertRandomWater(self):
"""
Translate a random ghost to a random point in the simulation box to allow subsequent insertion
Returns
-------
new_positions : simtk.unit.Quantity
Positions following the 'insertion' of the ghost water
insert_water : int
Residue ID of the water to insert
atom_indices : list
List of the atom IDs for this molecule
"""
# Select a ghost water to insert
ghost_wats = self.getWaterStatusResids(0)
# Check that there are any ghosts present
if len(ghost_wats) == 0:
self.raiseError("No ghost water molecules left, so insertion moves cannot occur - add more ghost waters")
insert_water = np.random.choice(ghost_wats)
atom_indices = []
for resid, residue in enumerate(self.topology.residues()):
if resid == insert_water:
for atom in residue.atoms():
atom_indices.append(atom.index)
# Select a point to insert the water (based on O position)
insert_point = np.random.rand(3) * self.simulation_box
# Generate a random rotation matrix
R = random_rotation_matrix()
new_positions = deepcopy(self.positions)
for i, index in enumerate(atom_indices):
# Translate coordinates to an origin defined by the oxygen atom, and normalise
atom_position = self.positions[index] - self.positions[atom_indices[0]]
# Rotate about the oxygen position
if i != 0:
vec_length = np.linalg.norm(atom_position)
atom_position = atom_position / vec_length
# Rotate coordinates & restore length
atom_position = vec_length * np.dot(R, atom_position) * unit.nanometer
# Translate to new position
new_positions[index] = atom_position + insert_point
return new_positions, insert_water, atom_indices
def deleteRandomWater(self):
"""
Choose a random water to be deleted
Returns
-------
delete_water : int
Resid of the water to delete
atom_indices : list
List of the atom IDs for this molecule
"""
# Cannot carry out deletion if there are no GCMC waters on
gcmc_wats = self.getWaterStatusResids(1)
if len(gcmc_wats) == 0:
return None, None
# Select a water residue to delete
delete_water = np.random.choice(gcmc_wats)
atom_indices = []
for resid, residue in enumerate(self.topology.residues()):
if resid == delete_water:
for atom in residue.atoms():
atom_indices.append(atom.index)
return delete_water, atom_indices
########################################################################################################################
class StandardGCMCSystemSampler(GCMCSystemSampler):
"""
Class to carry out instantaneous GCMC moves in OpenMM
"""
def __init__(self, system, topology, temperature, adams=None, excessChemicalPotential=-6.09*unit.kilocalories_per_mole,
standardVolume=30.345*unit.angstroms**3, adamsShift=0.0, boxVectors=None,
ghostFile="gcmc-ghost-wats.txt", log='gcmc.log', dcd=None, rst=None, overwrite=False):
"""
Initialise the object to be used for sampling instantaneous water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
boxVectors : simtk.unit.Quantity
Box vectors for the simulation cell
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
log : str
Name of the log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Indicates whether to overwrite already existing data
"""
# Initialise base class - don't need any more initialisation for the instantaneous sampler
GCMCSystemSampler.__init__(self, system, topology, temperature, adams=adams,
excessChemicalPotential=excessChemicalPotential, standardVolume=standardVolume,
adamsShift=adamsShift, boxVectors=boxVectors, ghostFile=ghostFile, log=log,
dcd=dcd, rst=rst, overwrite=overwrite)
self.energy = None # Need to save energy
self.logger.info("StandardGCMCSystemSampler object initialised")
def move(self, context, n=1):
"""
Execute a number of GCMC moves on the current system
Parameters
----------
context : simtk.openmm.Context
Current context of the simulation
n : int
Number of moves to execute
"""
# Read in positions
self.context = context
state = self.context.getState(getPositions=True, enforcePeriodicBox=True, getEnergy=True)
self.positions = deepcopy(state.getPositions(asNumpy=True))
self.energy = state.getPotentialEnergy()
# Execute moves
for i in range(n):
# Insert or delete a water, based on random choice
if np.random.randint(2) == 1:
# Attempt to insert a water
self.insertionMove()
else:
# Attempt to delete a water
self.deletionMove()
self.n_moves += 1
self.Ns.append(self.N)
return None
def insertionMove(self):
"""
Carry out a random water insertion move on the current system
"""
# Insert a ghost water to a random site
new_positions, resid, atom_indices = self.insertRandomWater()
# Recouple this water
self.adjustSpecificWater(atom_indices, 1.0)
self.context.setPositions(new_positions)
# Calculate new system energy and acceptance probability
final_energy = self.context.getState(getEnergy=True).getPotentialEnergy()
acc_prob = math.exp(self.B) * math.exp(-(final_energy - self.energy) / self.kT) / (self.N + 1)
self.acceptance_probabilities.append(acc_prob)
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Need to revert the changes made if the move is to be rejected
# Switch off nonbonded interactions involving this water
self.adjustSpecificWater(atom_indices, 0.0)
self.context.setPositions(self.positions) # Not sure this is necessary...
else:
# Update some variables if move is accepted
self.positions = deepcopy(new_positions)
self.setWaterStatus(resid, 1)
self.N += 1
self.n_accepted += 1
# Update energy
self.energy = final_energy
return None
def deletionMove(self):
"""
Carry out a random water deletion move on the current system
"""
# Choose a random water to be deleted
resid, atom_indices = self.deleteRandomWater()
# Deletion may not be possible
if resid is None:
return None
# Switch water off
self.adjustSpecificWater(atom_indices, 0.0)
# Calculate energy of new state and acceptance probability
final_energy = self.context.getState(getEnergy=True).getPotentialEnergy()
acc_prob = self.N * math.exp(-self.B) * math.exp(-(final_energy - self.energy) / self.kT)
self.acceptance_probabilities.append(acc_prob)
if acc_prob < np.random.rand() or np.isnan(acc_prob):
# Switch the water back on if the move is rejected
self.adjustSpecificWater(atom_indices, 1.0)
else:
# Update some variables if move is accepted
self.setWaterStatus(resid, 0)
self.N -= 1
self.n_accepted += 1
# Update energy
self.energy = final_energy
return None
########################################################################################################################
class NonequilibriumGCMCSystemSampler(GCMCSystemSampler):
"""
Class to carry out GCMC moves in OpenMM, using nonequilibrium candidate Monte Carlo (NCMC)
to boost acceptance rates
"""
def __init__(self, system, topology, temperature, integrator, adams=None,
excessChemicalPotential=-6.09*unit.kilocalories_per_mole, standardVolume=30.345*unit.angstroms**3,
adamsShift=0.0, nPertSteps=1, nPropStepsPerPert=1, timeStep=2 * unit.femtoseconds, boxVectors=None,
ghostFile="gcmc-ghost-wats.txt", log='gcmc.log', dcd=None, rst=None, overwrite=False,
lambdas=None):
"""
Initialise the object to be used for sampling NCMC-enhanced water insertion/deletion moves
Parameters
----------
system : simtk.openmm.System
System object to be used for the simulation
topology : simtk.openmm.app.Topology
Topology object for the system to be simulated
temperature : simtk.unit.Quantity
Temperature of the simulation, must be in appropriate units
integrator : simtk.openmm.CustomIntegrator
Integrator to use to propagate the dynamics of the system. Currently want to make sure that this
is the customised Langevin integrator found in openmmtools which uses BAOAB (VRORV) splitting.
adams : float
Adams B value for the simulation (dimensionless). Default is None,
if None, the B value is calculated from the box volume and chemical
potential
excessChemicalPotential : simtk.unit.Quantity
Excess chemical potential of the system that the simulation should be in equilibrium with, default is
-6.09 kcal/mol. This should be the hydration free energy of water, and may need to be changed for specific
simulation parameters.
standardVolume : simtk.unit.Quantity
Standard volume of water - corresponds to the volume per water molecule in bulk. The default value is 30.345 A^3
adamsShift : float
Shift the B value from Bequil, if B isn't explicitly set. Default is 0.0
nPertSteps : int
Number of pertubation steps over which to shift lambda between 0 and 1 (or vice versa).
nPropStepsPerPert : int
Number of propagation steps to carry out for
timeStep : simtk.unit.Quantity
Time step to use for non-equilibrium integration during the propagation steps
lambdas : list
Series of lambda values corresponding to the pathway over which the molecules are perturbed
boxVectors : simtk.unit.Quantity
Box vectors for the simulation cell
ghostFile : str
Name of a file to write out the residue IDs of ghost water molecules. This is
useful if you want to visualise the sampling, as you can then remove these waters
from view, as they are non-interacting. Default is 'gcmc-ghost-wats.txt'
log : str
Name of the log file to write out
dcd : str
Name of the DCD file to write the system out to
rst : str
Name of the restart file to write out (.pdb or .rst7)
overwrite : bool
Indicates whether to overwrite already existing data
"""
# Initialise base class
GCMCSystemSampler.__init__(self, system, topology, temperature, adams=adams,
excessChemicalPotential=excessChemicalPotential, standardVolume=standardVolume,
adamsShift=adamsShift, boxVectors=boxVectors, ghostFile=ghostFile, log=log, dcd=dcd,
rst=rst, overwrite=overwrite)
# Load in extra NCMC variables
if lambdas is not None:
# Read in set of lambda values, if specified
assert np.isclose(lambdas[0], 0.0) and
|
np.isclose(lambdas[-1], 1.0)
|
numpy.isclose
|
from satpy import Scene, find_files_and_readers
import sys
import os
import subprocess as sp
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from pyhdf.SD import SD, SDC
from itertools import repeat
import pandas as pd
import pickle
import datetime
from time import time
from pyorbital.orbital import get_observer_look
from pyorbital.astronomy import get_alt_az
sys.path.append(
os.path.dirname(os.path.realpath(__file__))
)
from him8analysis import read_h8_folder, halve_res, quarter_res, generate_band_arrays
from caliop_tools import number_to_bit, custom_feature_conversion, calipso_to_datetime
### Global Variables ###
band_dict={
'1': 0.4703, # all channels are in microns
'2': 0.5105,
'3': 0.6399,
'4': 0.8563,
'5': 1.6098,
'6': 2.257,
'7': 3.8848,
'8': 6.2383,
'9': 6.9395,
'10': 7.3471,
'11': 8.5905,
'12': 9.6347,
'13': 10.4029,
'14': 11.2432,
'15': 12.3828,
'16': 13.2844
}
### General Tools ###
def write_list(input_list, list_filename, list_dir):
"""
Writes a list to a .txt file. Specify the directory it is stored in.
:param input_list: list type.
:param list_filename: str type. The filename of the .txt file to be written.
:param list_dir: str type. Full path to the directory the file is stored in.
:return: list type. List of strings from the .txt file.
"""
if list_filename[-4:] != '.txt': # Check if the .txt file extension
list_filename += '.txt'
full_filename = os.path.join(list_dir, list_filename)
print(full_filename)
with open(full_filename, 'w') as f:
f.writelines('%s\n' % item for item in input_list)
print('List stored')
def read_list(list_filename, list_dir):
"""
Reads a list from a .txt file. Specify the directory it is stored in.
:param list_filename: str type. The filename of the .txt file to be read.
:param list_dir: str type. Full path to the directory the file is stored in.
:return: list type. List of strings from the .txt file.
"""
if list_filename[-4:] != '.txt': # Check if the .txt file extension
list_filename += '.txt'
full_name = os.path.join(list_dir, list_filename)
with open(full_name, 'r') as f: # Open and read the .txt file
list_of_lines = [line.rstrip() for line in f.readlines()] # For each line, remove newline character and store in a list
return list_of_lines
### Processing tools ###
def find_possible_collocated_him_folders(caliop_overpass):
"""
Will find Himawari folders that fall within the time range of the given CALIOP profile.
:param caliop_overpass: Loaded CALIOP .hdf file to collocate with Himawari data
:return: list of str type. Names of the folders that should collocate with the
given CALIOP profile
"""
cal_time = caliop_overpass.select('Profile_UTC_Time').get()
cal_time = calipso_to_datetime(cal_time)
start = cal_time[0][0]
end = cal_time[-1][-1]
print('Raw Start: %s' % datetime.datetime.strftime(start, '%Y%m%d_%H%M'))
print('Raw End: %s' % datetime.datetime.strftime(end, '%Y%m%d_%H%M'))
cal_lats = caliop_overpass.select('Latitude').get()
cal_lons = caliop_overpass.select('Longitude').get()
hemisphere_mask = (cal_lats <= 81.1) & (cal_lats >= -81.1) & \
(((cal_lons >= 60.6) & (cal_lons <= 180.)) | \
((cal_lons >= -180.) & (cal_lons <= -138.0))) # Due to looking at Eastern Hemisphere
cal_time = cal_time[hemisphere_mask]
print('MASKED START LAT/LON: (%s, %s)' % (cal_lats[hemisphere_mask][0], cal_lons[hemisphere_mask][0]))
print('MASKED END LAT/LON: (%s, %s)' % (cal_lats[hemisphere_mask][-1], cal_lons[hemisphere_mask][-1]))
if len(cal_time) == 0:
return None
print(len(cal_time))
start = cal_time[0]
end = cal_time[-1]
print('Masked Start: %s' % datetime.datetime.strftime(start, '%Y%m%d_%H%M'))
print('Masked End: %s' % datetime.datetime.strftime(end, '%Y%m%d_%H%M'))
start -= datetime.timedelta(minutes=start.minute % 10,
seconds=start.second,
microseconds=start.microsecond)
end -= datetime.timedelta(minutes=end.minute % 10,
seconds=end.second,
microseconds=end.microsecond)
print('First Folder: %s' % start)
print('Last Folder: %s' % end)
folder_names = []
while start <= end:
folder_name = datetime.datetime.strftime(start, '%Y%m%d_%H%M')
folder_names.append(folder_name)
start += datetime.timedelta(minutes=10)
return folder_names
def get_him_folders(him_names, data_dir):
"""
Finds the Himawari folders given in the list in the mdss on NCI.
:param him_names: list of str types of Himawari folder names.
:param data_dir: str type. Full path to the directory where the
data will be stored.
:return: Saves and un-tars Himawari data from mdss into a readable
folder system for further analysis
"""
for name in him_names:
year = name[:4]
month = name[4:6]
day = name[6:8]
filename = 'HS_H08_%s_FLDK.tar' % name
path = os.path.join('satellite/raw/ahi/FLDK', year, month, day, filename)
if sp.getoutput('mdss -P rr5 ls %s' % path) == path:
print('%s available' % name)
destination = os.path.join(data_dir, name)
if not os.path.isdir(destination):
os.mkdir(destination)
os.system('mdss -P rr5 get %s %s' % (path, destination))
else:
print('%s unavailable' % name)
def clear_him_folders(him_names, data_dir):
"""
Finds the Himawari folders given in the list in the mdss on NCI.
:param him_names: list of str types of Himawari folder names.
:param data_dir: str type. Full path to the directory where the
data will be stored.
:return: Removes the Himawari data folders w/n the him_name list
from /g/data/k10/dr1709/ahi/ directory.
"""
for name in him_names:
destination = os.path.join(data_dir, name)
if os.path.isdir(destination):
os.system('rm -r %s' % destination)
def define_collocation_area(geo_lons, geo_lats, central_geo_lon,
lidar_lons, lidar_lats, spatial_tolerance):
### Shift meridian to be defined by geostationary satellite ###
shifted_geo_lons = geo_lons - central_geo_lon # For geostationary satellite coordinates
shifted_geo_lons[shifted_geo_lons < -180.] += 360.
shifted_geo_lons[shifted_geo_lons > 180.] -= 360.
shifted_lidar_lons = lidar_lons - central_geo_lon # For active satellite coordinates
shifted_lidar_lons[shifted_lidar_lons < -180.] += 360.
shifted_lidar_lons[shifted_lidar_lons > 180.] -= 360.
### Find limits defined by active satellite ###
min_lidar_lat, max_lidar_lat = np.nanmin(lidar_lats), np.nanmax(lidar_lats)
min_lidar_lon, max_lidar_lon = np.nanmin(shifted_lidar_lons),
|
np.nanmax(shifted_lidar_lons)
|
numpy.nanmax
|
# -*- coding: utf-8 -*-
"""
Module to generate surrogates of a spike train by randomising its spike times
in different ways (see [1]). Different methods destroy different features of
the original data:
* randomise_spikes:
randomly reposition all spikes inside the time interval (t_start, t_stop).
Keeps spike count, generates Poisson spike trains with time-stationary
firing rate
* dither_spikes:
dither each spike time around original position by a random amount;
keeps spike count and firing rates computed on a slow temporal scale;
destroys ISIs, making them more exponentially distributed
* dither_spike_train:
dither the whole input spike train (i.e. all spikes equally) by a random
amount; keeps spike count, ISIs, and firing rates computed on a slow
temporal scale
* jitter_spikes:
discretise the full time interval (t_start, t_stop) into time segments
and locally randomise the spike times (see randomise_spikes) inside each
segment. Keeps spike count inside each segment and creates locally Poisson
spike trains with locally time-stationary rates
* shuffle_isis:
shuffle the inter-spike intervals (ISIs) of the spike train randomly,
keeping the first spike time fixed and generating the others from the
new sequence of ISIs. Keeps spike count and ISIs, flattens the firing rate
profile
[1] Louis et al (2010) Surrogate Spike Train Generation Through Dithering in
Operational Time. Front Comput Neurosci. 2010; 4: 127.
:original implementation by: <NAME> [<EMAIL>]
:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
import numpy as np
import quantities as pq
import neo
import elephant.statistics as es
def dither_spikes(spiketrain, dither, n=1, decimals=None, edges=True):
"""
Generates surrogates of a spike train by spike dithering.
The surrogates are obtained by uniformly dithering times around the
original position. The dithering is performed independently for each
surrogate.
The surrogates retain the :attr:`t_start` and :attr:`t_stop` of the
original `SpikeTrain` object. Spikes moved beyond this range are lost or
moved to the range's ends, depending on the parameter edge.
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates
dither : quantities.Quantity
Amount of dithering. A spike at time t is placed randomly within
]t-dither, t+dither[.
n : int (optional)
Number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
Number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
edges : bool (optional)
For surrogate spikes falling outside the range
`[spiketrain.t_start, spiketrain.t_stop)`, whether to drop them out
(for edges = True) or set that to the range's closest end
(for edges = False).
Default: True
Returns
-------
list of neo.SpikeTrain
A list of `neo.SpikeTrain`, each obtained from :attr:`spiketrain` by
randomly dithering its spikes. The range of the surrogate spike trains
is the same as :attr:`spiketrain`.
Examples
--------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print dither_spikes(st, dither = 20*pq.ms) # doctest: +SKIP
[<SpikeTrain(array([ 96.53801903, 248.57047376, 601.48865767,
815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print dither_spikes(st, dither = 20*pq.ms, n=2) # doctest: +SKIP
[<SpikeTrain(array([ 104.24942044, 246.0317873 , 584.55938657,
818.84446913]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 111.36693058, 235.15750163, 618.87388515,
786.1807108 ]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print dither_spikes(st, dither = 20*pq.ms, decimals=0) # doctest: +SKIP
[<SpikeTrain(array([ 81., 242., 595., 799.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Transform spiketrain into a Quantity object (needed for matrix algebra)
data = spiketrain.view(pq.Quantity)
# Main: generate the surrogates
surr = data.reshape((1, len(data))) + 2 * dither * np.random.random_sample(
(n, len(data))) - dither
# Round the surrogate data to decimal position, if requested
if decimals is not None:
surr = surr.round(decimals)
if edges is False:
# Move all spikes outside [spiketrain.t_start, spiketrain.t_stop] to
# the range's ends
surr = np.minimum(np.maximum(surr.base,
(spiketrain.t_start / spiketrain.units).base),
(spiketrain.t_stop / spiketrain.units).base) * spiketrain.units
else:
# Leave out all spikes outside [spiketrain.t_start, spiketrain.t_stop]
tstart, tstop = (spiketrain.t_start / spiketrain.units).base, \
(spiketrain.t_stop / spiketrain.units).base
surr = [s[np.all([s >= tstart, s < tstop], axis=0)] * spiketrain.units
for s in surr.base]
# Return the surrogates as SpikeTrains
return [neo.SpikeTrain(s,
t_start=spiketrain.t_start,
t_stop=spiketrain.t_stop).rescale(spiketrain.units)
for s in surr]
def randomise_spikes(spiketrain, n=1, decimals=None):
"""
Generates surrogates of a spike trains by spike time randomisation.
The surrogates are obtained by keeping the spike count of the original
`SpikeTrain` object, but placing them randomly into the interval
`[spiketrain.t_start, spiketrain.t_stop]`.
This generates independent Poisson neo.SpikeTrain objects (exponentially
distributed inter-spike intervals) while keeping the spike count as in
:attr:`spiketrain`.
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates
n : int (optional)
Number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
Number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of neo.SpikeTrain object(s)
A list of `neo.SpikeTrain` objects, each obtained from :attr:`spiketrain`
by randomly dithering its spikes. The range of the surrogate spike trains
is the same as :attr:`spiketrain`.
Examples
--------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print randomise_spikes(st) # doctest: +SKIP
[<SpikeTrain(array([ 131.23574603, 262.05062963, 549.84371387,
940.80503832]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print randomise_spikes(st, n=2) # doctest: +SKIP
[<SpikeTrain(array([ 84.53274955, 431.54011743, 733.09605806,
852.32426583]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 197.74596726, 528.93517359, 567.44599968,
775.97843799]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print randomise_spikes(st, decimals=0) # doctest: +SKIP
[<SpikeTrain(array([ 29., 667., 720., 774.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Create surrogate spike trains as rows of a Quantity array
sts = ((spiketrain.t_stop - spiketrain.t_start) *
np.random.random(size=(n, len(spiketrain))) +
spiketrain.t_start).rescale(spiketrain.units)
# Round the surrogate data to decimal position, if requested
if decimals is not None:
sts = sts.round(decimals)
# Convert the Quantity array to a list of SpikeTrains, and return them
return [neo.SpikeTrain(np.sort(st), t_start=spiketrain.t_start, t_stop=spiketrain.t_stop)
for st in sts]
def shuffle_isis(spiketrain, n=1, decimals=None):
"""
Generates surrogates of a neo.SpikeTrain object by inter-spike-interval
(ISI) shuffling.
The surrogates are obtained by randomly sorting the ISIs of the given input
:attr:`spiketrain`. This generates independent `SpikeTrain` object(s) with
same ISI distribution and spike count as in :attr:`spiketrain`, while
destroying temporal dependencies and firing rate profile.
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates
n : int (optional)
Number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
Number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
Returns
-------
list of SpikeTrain
A list of spike trains, each obtained from `spiketrain` by random ISI
shuffling. The range of the surrogate `neo.SpikeTrain` objects is the
same as :attr:`spiketrain`.
Examples
--------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>> print shuffle_isis(st) # doctest: +SKIP
[<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
>>> print shuffle_isis(st, n=2) # doctest: +SKIP
[<SpikeTrain(array([ 100., 300., 450., 800.]) * ms,
[0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 200., 350., 700., 800.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
if len(spiketrain) > 0:
isi0 = spiketrain[0] - spiketrain.t_start
ISIs = np.hstack([isi0, es.isi(spiketrain)])
# Round the ISIs to decimal position, if requested
if decimals is not None:
ISIs = ISIs.round(decimals)
# Create list of surrogate spike trains by random ISI permutation
sts = []
for i in range(n):
surr_times = np.cumsum(np.random.permutation(ISIs)) *\
spiketrain.units + spiketrain.t_start
sts.append(neo.SpikeTrain(
surr_times, t_start=spiketrain.t_start,
t_stop=spiketrain.t_stop))
else:
sts = []
empty_train = neo.SpikeTrain([]*spiketrain.units,
t_start=spiketrain.t_start,
t_stop=spiketrain.t_stop)
for i in range(n):
sts.append(empty_train)
return sts
def dither_spike_train(spiketrain, shift, n=1, decimals=None, edges=True):
"""
Generates surrogates of a neo.SpikeTrain by spike train shifting.
The surrogates are obtained by shifting the whole spike train by a
random amount (independent for each surrogate). Thus, ISIs and temporal
correlations within the spike train are kept. For small shifts, the
firing rate profile is also kept with reasonable accuracy.
The surrogates retain the :attr:`t_start` and :attr:`t_stop` of the
:attr:`spiketrain`. Spikes moved beyond this range are lost or moved to
the range's ends, depending on the parameter edge.
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates
shift : quantities.Quantity
Amount of shift. spiketrain is shifted by a random amount uniformly
drawn from the range ]-shift, +shift[.
n : int (optional)
Number of surrogates to be generated.
Default: 1
decimals : int or None (optional)
Number of decimal points for every spike time in the surrogates
If None, machine precision is used.
Default: None
edges : bool
For surrogate spikes falling outside the range `[spiketrain.t_start,
spiketrain.t_stop)`, whether to drop them out (for edges = True) or set
that to the range's closest end (for edges = False).
Default: True
Returns
-------
list of SpikeTrain
A list of spike trains, each obtained from spiketrain by randomly
dithering its spikes. The range of the surrogate spike trains is the
same as :attr:`spiketrain`.
Examples
--------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([100, 250, 600, 800]*pq.ms, t_stop=1*pq.s)
>>>
>>> print dither_spike_train(st, shift = 20*pq.ms) # doctest: +SKIP
[<SpikeTrain(array([ 96.53801903, 248.57047376, 601.48865767,
815.67209811]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print dither_spike_train(st, shift = 20*pq.ms, n=2) # doctest: +SKIP
[<SpikeTrain(array([ 92.89084054, 242.89084054, 592.89084054,
792.89084054]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 84.61079043, 234.61079043, 584.61079043,
784.61079043]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print dither_spike_train(st, shift = 20*pq.ms, decimals=0) # doctest: +SKIP
[<SpikeTrain(array([ 82., 232., 582., 782.]) * ms,
[0.0 ms, 1000.0 ms])>]
"""
# Transform spiketrain into a Quantity object (needed for matrix algebra)
data = spiketrain.view(pq.Quantity)
# Main: generate the surrogates by spike train shifting
surr = data.reshape((1, len(data))) + 2 * shift * \
np.random.random_sample((n, 1)) - shift
# Round the surrogate data to decimal position, if requested
if decimals is not None:
surr = surr.round(decimals)
if edges is False:
# Move all spikes outside [spiketrain.t_start, spiketrain.t_stop] to
# the range's ends
surr = np.minimum(np.maximum(surr.base,
(spiketrain.t_start / spiketrain.units).base),
(spiketrain.t_stop / spiketrain.units).base) * spiketrain.units
else:
# Leave out all spikes outside [spiketrain.t_start, spiketrain.t_stop]
tstart, tstop = (spiketrain.t_start / spiketrain.units).base,\
(spiketrain.t_stop / spiketrain.units).base
surr = [s[np.all([s >= tstart, s < tstop], axis=0)] * spiketrain.units
for s in surr.base]
# Return the surrogates as SpikeTrains
return [neo.SpikeTrain(s, t_start=spiketrain.t_start,
t_stop=spiketrain.t_stop).rescale(spiketrain.units)
for s in surr]
def jitter_spikes(spiketrain, binsize, n=1):
"""
Generates surrogates of a :attr:`spiketrain` by spike jittering.
The surrogates are obtained by defining adjacent time bins spanning the
:attr:`spiketrain` range, and random re-positioning (independently for each
surrogate) each spike in the time bin it falls into.
The surrogates retain the :attr:`t_start and :attr:`t_stop` of the
:attr:`spike train`. Note that within each time bin the surrogate
`neo.SpikeTrain` objects are locally poissonian (the inter-spike-interval
are exponentially distributed).
Parameters
----------
spiketrain : neo.SpikeTrain
The spike train from which to generate the surrogates
binsize : quantities.Quantity
Size of the time bins within which to randomise the spike times.
Note: the last bin arrives until `spiketrain.t_stop` and might have
width different from `binsize`.
n : int (optional)
Number of surrogates to be generated.
Default: 1
Returns
-------
list of SpikeTrain
A list of spike trains, each obtained from `spiketrain` by randomly
replacing its spikes within bins of user-defined width. The range of the
surrogate spike trains is the same as `spiketrain`.
Examples
--------
>>> import quantities as pq
>>> import neo
>>>
>>> st = neo.SpikeTrain([80, 150, 320, 480]*pq.ms, t_stop=1*pq.s)
>>> print jitter_spikes(st, binsize=100*pq.ms) # doctest: +SKIP
[<SpikeTrain(array([ 98.82898293, 178.45805954, 346.93993867,
461.34268507]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print jitter_spikes(st, binsize=100*pq.ms, n=2) # doctest: +SKIP
[<SpikeTrain(array([ 97.15720041, 199.06945744, 397.51928207,
402.40065162]) * ms, [0.0 ms, 1000.0 ms])>,
<SpikeTrain(array([ 80.74513157, 173.69371317, 338.05860962,
495.48869981]) * ms, [0.0 ms, 1000.0 ms])>]
>>> print jitter_spikes(st, binsize=100*pq.ms) # doctest: +SKIP
[<SpikeTrain(array([ 4.55064897e-01, 1.31927046e+02, 3.57846265e+02,
4.69370604e+02]) * ms, [0.0 ms, 1000.0 ms])>]
"""
# Define standard time unit; all time Quantities are converted to
# scalars after being rescaled to this unit, to use the power of numpy
std_unit = binsize.units
# Compute bin edges for the jittering procedure
# !: the last bin arrives until spiketrain.t_stop and might have
# size != binsize
start_dl = spiketrain.t_start.rescale(std_unit).magnitude
stop_dl = spiketrain.t_stop.rescale(std_unit).magnitude
bin_edges = start_dl + np.arange(start_dl, stop_dl, binsize.magnitude)
bin_edges = np.hstack([bin_edges, stop_dl])
# Create n surrogates with spikes randomly placed in the interval (0,1)
surr_poiss01 = np.random.random_sample((n, len(spiketrain)))
# Compute the bin id of each spike
bin_ids = np.array(
(spiketrain.view(pq.Quantity) /
binsize).rescale(pq.dimensionless).magnitude, dtype=int)
# Compute the size of each time bin (as a numpy array)
bin_sizes_dl =
|
np.diff(bin_edges)
|
numpy.diff
|
import types
import warnings
import pickle
import re
from copy import deepcopy
from functools import partial, wraps
from inspect import signature
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import joblib
from . import IS_PYPY
from .. import config_context
from ._testing import _get_args
from ._testing import assert_raise_message
from ._testing import assert_array_equal
from ._testing import assert_array_almost_equal
from ._testing import assert_allclose
from ._testing import assert_allclose_dense_sparse
from ._testing import assert_array_less
from ._testing import set_random_state
from ._testing import SkipTest
from ._testing import ignore_warnings
from ._testing import create_memmap_backed_data
from ._testing import raises
from . import is_scalar_nan
from ..linear_model import LinearRegression
from ..linear_model import LogisticRegression
from ..linear_model import RANSACRegressor
from ..linear_model import Ridge
from ..base import (
clone,
ClusterMixin,
is_classifier,
is_regressor,
is_outlier_detector,
RegressorMixin,
_is_pairwise,
)
from ..metrics import accuracy_score, adjusted_rand_score, f1_score
from ..random_projection import BaseRandomProjection
from ..feature_selection import SelectKBest
from ..pipeline import make_pipeline
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..exceptions import SkipTestWarning
from ..model_selection import train_test_split
from ..model_selection import ShuffleSplit
from ..model_selection._validation import _safe_split
from ..metrics.pairwise import rbf_kernel, linear_kernel, pairwise_distances
from ..utils.fixes import threadpool_info
from ..utils.validation import check_is_fitted
from . import shuffle
from ._tags import (
_DEFAULT_TAGS,
_safe_tags,
)
from .validation import has_fit_parameter, _num_samples
from ..preprocessing import StandardScaler
from ..preprocessing import scale
from ..datasets import (
load_iris,
make_blobs,
make_multilabel_classification,
make_regression,
)
REGRESSION_DATASET = None
CROSS_DECOMPOSITION = ["PLSCanonical", "PLSRegression", "CCA", "PLSSVD"]
def _yield_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
pairwise = _is_pairwise(estimator)
yield check_no_attributes_set_in_init
yield check_estimators_dtypes
yield check_fit_score_takes_y
if has_fit_parameter(estimator, "sample_weight"):
yield check_sample_weights_pandas_series
yield check_sample_weights_not_an_array
yield check_sample_weights_list
if not pairwise:
# We skip pairwise because the data is not pairwise
yield check_sample_weights_shape
yield check_sample_weights_not_overwritten
yield partial(check_sample_weights_invariance, kind="ones")
yield partial(check_sample_weights_invariance, kind="zeros")
yield check_estimators_fit_returns_self
yield partial(check_estimators_fit_returns_self, readonly_memmap=True)
# Check that all estimator yield informative messages when
# trained on empty datasets
if not tags["no_validation"]:
yield check_complex_data
yield check_dtype_object
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION:
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if not tags["allow_nan"] and not tags["no_validation"]:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if pairwise:
# Check that pairwise estimator throws error on non-square input
yield check_nonsquare_error
yield check_estimators_overwrite_params
if hasattr(estimator, "sparsify"):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
yield check_estimator_get_tags_default_keys
def _yield_classifier_checks(classifier):
tags = _safe_tags(classifier)
# test classifiers can handle non-array data and pandas objects
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_classifier_multioutput
# basic consistency testing
yield check_classifiers_train
yield partial(check_classifiers_train, readonly_memmap=True)
yield partial(check_classifiers_train, readonly_memmap=True, X_dtype="float32")
yield check_classifiers_regression_target
if tags["multilabel"]:
yield check_classifiers_multilabel_representation_invariance
yield check_classifiers_multilabel_output_format_predict
yield check_classifiers_multilabel_output_format_predict_proba
yield check_classifiers_multilabel_output_format_decision_function
if not tags["no_validation"]:
yield check_supervised_y_no_nan
if not tags["multioutput_only"]:
yield check_supervised_y_2d
if tags["requires_fit"]:
yield check_estimators_unfitted
if "class_weight" in classifier.get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=FutureWarning)
def check_supervised_y_no_nan(name, estimator_orig):
# Checks that the Estimator targets are not NaN.
estimator = clone(estimator_orig)
rng = np.random.RandomState(888)
X = rng.standard_normal(size=(10, 5))
for value in [np.nan, np.inf]:
y = np.full(10, value)
y = _enforce_estimator_tags_y(estimator, y)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# In scikit-learn we want the error message to mention the input
# name and be specific about the kind of unexpected value.
if np.isinf(value):
match = (
r"Input (y|Y) contains infinity or a value too large for"
r" dtype\('float64'\)."
)
else:
match = r"Input (y|Y) contains NaN."
else:
# Do not impose a particular error message to third-party libraries.
match = None
err_msg = (
f"Estimator {name} should have raised error on fitting array y with inf"
" value."
)
with raises(ValueError, match=match, err_msg=err_msg):
estimator.fit(X, y)
def _yield_regressor_checks(regressor):
tags = _safe_tags(regressor)
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield partial(check_regressors_train, readonly_memmap=True)
yield partial(check_regressors_train, readonly_memmap=True, X_dtype="float32")
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
if tags["multioutput"]:
yield check_regressor_multioutput
yield check_regressors_no_decision_function
if not tags["no_validation"] and not tags["multioutput_only"]:
yield check_supervised_y_2d
yield check_supervised_y_no_nan
name = regressor.__class__.__name__
if name != "CCA":
# check that the regressor handles int input
yield check_regressors_int
if tags["requires_fit"]:
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(transformer):
tags = _safe_tags(transformer)
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not tags["no_validation"]:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
if tags["preserves_dtype"]:
yield check_transformer_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, key="stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = [
"Isomap",
"KernelPCA",
"LocallyLinearEmbedding",
"RandomizedLasso",
"LogisticRegressionCV",
]
name = transformer.__class__.__name__
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(clusterer):
yield check_clusterer_compute_labels_predict
name = clusterer.__class__.__name__
if name not in ("WardAgglomeration", "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield partial(check_clustering, readonly_memmap=True)
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_outliers_checks(estimator):
# checks for outlier detectors that have a fit_predict method
if hasattr(estimator, "fit_predict"):
yield check_outliers_fit_predict
# checks for estimators that can be used on a test set
if hasattr(estimator, "predict"):
yield check_outliers_train
yield partial(check_outliers_train, readonly_memmap=True)
# test outlier detectors can handle non-array data
yield check_classifier_data_not_an_array
# test if NotFittedError is raised
if _safe_tags(estimator, key="requires_fit"):
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = _safe_tags(estimator)
if "2darray" not in tags["X_types"]:
warnings.warn(
"Can't test estimator {} which requires input of type {}".format(
name, tags["X_types"]
),
SkipTestWarning,
)
return
if tags["_skip_test"]:
warnings.warn(
"Explicit SKIP via _skip_test tag for estimator {}.".format(name),
SkipTestWarning,
)
return
for check in _yield_checks(estimator):
yield check
if is_classifier(estimator):
for check in _yield_classifier_checks(estimator):
yield check
if is_regressor(estimator):
for check in _yield_regressor_checks(estimator):
yield check
if hasattr(estimator, "transform"):
for check in _yield_transformer_checks(estimator):
yield check
if isinstance(estimator, ClusterMixin):
for check in _yield_clustering_checks(estimator):
yield check
if is_outlier_detector(estimator):
for check in _yield_outliers_checks(estimator):
yield check
yield check_parameters_default_constructible
yield check_methods_sample_order_invariance
yield check_methods_subset_invariance
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_get_params_invariance
yield check_set_params
yield check_dict_unchanged
yield check_dont_overwrite_parameters
yield check_fit_idempotent
yield check_fit_check_is_fitted
if not tags["no_validation"]:
yield check_n_features_in
yield check_fit1d
yield check_fit2d_predict1d
if tags["requires_y"]:
yield check_requires_y_none
if tags["requires_positive_X"]:
yield check_fit_non_negative
def _get_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyword arguments.
`_get_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`.
Returns
-------
id : str or None
See Also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v) for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
def _construct_instance(Estimator):
"""Construct Estimator instance if possible."""
required_parameters = getattr(Estimator, "_required_parameters", [])
if len(required_parameters):
if required_parameters in (["estimator"], ["base_estimator"]):
# `RANSACRegressor` will raise an error with any model other
# than `LinearRegression` if we don't fix `min_samples` parameter.
# For common test, we can enforce using `LinearRegression` that
# is the default estimator in `RANSACRegressor` instead of `Ridge`.
if issubclass(Estimator, RANSACRegressor):
estimator = Estimator(LinearRegression())
elif issubclass(Estimator, RegressorMixin):
estimator = Estimator(Ridge())
else:
estimator = Estimator(LogisticRegression(C=1))
elif required_parameters in (["estimators"],):
# Heterogeneous ensemble classes (i.e. stacking, voting)
if issubclass(Estimator, RegressorMixin):
estimator = Estimator(
estimators=[("est1", Ridge(alpha=0.1)), ("est2", Ridge(alpha=1))]
)
else:
estimator = Estimator(
estimators=[
("est1", LogisticRegression(C=0.1)),
("est2", LogisticRegression(C=1)),
]
)
else:
msg = (
f"Can't instantiate estimator {Estimator.__name__} "
f"parameters {required_parameters}"
)
# raise additional warning to be shown by pytest
warnings.warn(msg, SkipTestWarning)
raise SkipTest(msg)
else:
estimator = Estimator()
return estimator
def _maybe_mark_xfail(estimator, check, pytest):
# Mark (estimator, check) pairs as XFAIL if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_skip(), but this one is used by
# @parametrize_with_checks() instead of check_estimator()
should_be_marked, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_marked:
return estimator, check
else:
return pytest.param(estimator, check, marks=pytest.mark.xfail(reason=reason))
def _maybe_skip(estimator, check):
# Wrap a check so that it's skipped if needed (see conditions in
# _should_be_skipped_or_marked())
# This is similar to _maybe_mark_xfail(), but this one is used by
# check_estimator() instead of @parametrize_with_checks which requires
# pytest
should_be_skipped, reason = _should_be_skipped_or_marked(estimator, check)
if not should_be_skipped:
return check
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
@wraps(check)
def wrapped(*args, **kwargs):
raise SkipTest(
f"Skipping {check_name} for {estimator.__class__.__name__}: {reason}"
)
return wrapped
def _should_be_skipped_or_marked(estimator, check):
# Return whether a check should be skipped (when using check_estimator())
# or marked as XFAIL (when using @parametrize_with_checks()), along with a
# reason.
# Currently, a check should be skipped or marked if
# the check is in the _xfail_checks tag of the estimator
check_name = check.func.__name__ if isinstance(check, partial) else check.__name__
xfail_checks = _safe_tags(estimator, key="_xfail_checks") or {}
if check_name in xfail_checks:
return True, xfail_checks[check_name]
return False, "placeholder reason that will never be used"
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24. Pass an instance instead.
.. versionadded:: 0.24
Returns
-------
decorator : `pytest.mark.parametrize`
See Also
--------
check_estimator : Check if estimator adheres to scikit-learn conventions.
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
import pytest
if any(isinstance(est, type) for est in estimators):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize(
"estimator, check", checks_generator(), ids=_get_check_estimator_ids
)
def check_estimator(Estimator, generate_only=False):
"""Check if estimator adheres to scikit-learn conventions.
This function will run an extensive test-suite for input validation,
shapes, etc, making sure that the estimator complies with `scikit-learn`
conventions as detailed in :ref:`rolling_your_own_estimator`.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Setting `generate_only=True` returns a generator that yields (estimator,
check) tuples where the check can be called independently from each
other, i.e. `check(estimator)`. This allows all checks to be run
independently and report the checks that are failing.
scikit-learn provides a pytest specific decorator,
:func:`~sklearn.utils.parametrize_with_checks`, making it easier to test
multiple estimators.
Parameters
----------
Estimator : estimator object
Estimator instance to check.
.. versionchanged:: 0.24
Passing a class was deprecated in version 0.23, and support for
classes was removed in 0.24.
generate_only : bool, default=False
When `False`, checks are evaluated when `check_estimator` is called.
When `True`, `check_estimator` returns a generator that yields
(estimator, check) tuples. The check is run by calling
`check(estimator)`.
.. versionadded:: 0.22
Returns
-------
checks_generator : generator
Generator that yields (estimator, check) tuples. Returned when
`generate_only=True`.
See Also
--------
parametrize_with_checks : Pytest specific decorator for parametrizing estimator
checks.
"""
if isinstance(Estimator, type):
msg = (
"Passing a class was deprecated in version 0.23 "
"and isn't supported anymore from 0.24."
"Please pass an instance instead."
)
raise TypeError(msg)
estimator = Estimator
name = type(estimator).__name__
def checks_generator():
for check in _yield_all_checks(estimator):
check = _maybe_skip(estimator, check)
yield estimator, partial(check, name)
if generate_only:
return checks_generator()
for estimator, check in checks_generator():
try:
check(estimator)
except SkipTest as exception:
# SkipTest is thrown when pandas can't be imported, or by checks
# that are in the xfail_checks tag
warnings.warn(str(exception), SkipTestWarning)
def _regression_dataset():
global REGRESSION_DATASET
if REGRESSION_DATASET is None:
X, y = make_regression(
n_samples=200,
n_features=10,
n_informative=1,
bias=5.0,
noise=20,
random_state=42,
)
X = StandardScaler().fit_transform(X)
REGRESSION_DATASET = X, y
return REGRESSION_DATASET
def _set_checking_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
name = estimator.__class__.__name__
if "n_iter" in params and name != "TSNE":
estimator.set_params(n_iter=5)
if "max_iter" in params:
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR, LinearSVC
if estimator.__class__.__name__ in ["LinearSVR", "LinearSVC"]:
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == "NMF":
estimator.set_params(max_iter=500)
# MLP
if estimator.__class__.__name__ in ["MLPClassifier", "MLPRegressor"]:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if name == "MeanShift":
# In the case of check_fit2d_1sample, bandwidth is set to None and
# is thus estimated. De facto it is 0.0 as a single sample is provided
# and this makes the test fails. Hence we give it a placeholder value.
estimator.set_params(bandwidth=1.0)
if name == "TruncatedSVD":
# TruncatedSVD doesn't run with n_components = n_features
# This is ugly :-/
estimator.n_components = 1
if name == "LassoLarsIC":
# Noise variance estimation does not work when `n_samples < n_features`.
# We need to provide the noise variance explicitly.
estimator.set_params(noise_variance=1.0)
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = min(estimator.n_clusters, 2)
if hasattr(estimator, "n_best"):
estimator.n_best = 1
if name == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=0.5)
if name == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=2)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if name in ("HistGradientBoostingClassifier", "HistGradientBoostingRegressor"):
# The default min_samples_leaf (20) isn't appropriate for small
# datasets (only very shallow trees are built) that the checks use.
estimator.set_params(min_samples_leaf=5)
if name == "DummyClassifier":
# the default strategy prior would output constant predictions and fail
# for check_classifiers_predictions
estimator.set_params(strategy="stratified")
# Speed-up by reducing the number of CV or splits for CV estimators
loo_cv = ["RidgeCV", "RidgeClassifierCV"]
if name not in loo_cv and hasattr(estimator, "cv"):
estimator.set_params(cv=3)
if hasattr(estimator, "n_splits"):
estimator.set_params(n_splits=3)
if name == "OneHotEncoder":
estimator.set_params(handle_unknown="ignore")
if name in CROSS_DECOMPOSITION:
estimator.set_params(n_components=1)
class _NotAnArray:
"""An object that is convertible to an array.
Parameters
----------
data : array-like
The data.
"""
def __init__(self, data):
self.data = np.asarray(data)
def __array__(self, dtype=None):
return self.data
def __array_function__(self, func, types, args, kwargs):
if func.__name__ == "may_share_memory":
return True
raise TypeError("Don't want to call array_function {}!".format(func.__name__))
def _is_pairwise_metric(estimator):
"""Returns True if estimator accepts pairwise metric.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if _pairwise is set to True and False otherwise.
"""
metric = getattr(estimator, "metric", None)
return bool(metric == "precomputed")
def _pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel):
if _is_pairwise_metric(estimator):
return pairwise_distances(X, metric="euclidean")
if _is_pairwise(estimator):
return kernel(X, X)
return X
def _generate_sparse_matrix(X_csr):
"""Generate sparse matrices with {32,64}bit indices of diverse format.
Parameters
----------
X_csr: CSR Matrix
Input matrix in CSR format.
Returns
-------
out: iter(Matrices)
In format['dok', 'lil', 'dia', 'bsr', 'csr', 'csc', 'coo',
'coo_64', 'csc_64', 'csr_64']
"""
assert X_csr.format == "csr"
yield "csr", X_csr.copy()
for sparse_format in ["dok", "lil", "dia", "bsr", "csc", "coo"]:
yield sparse_format, X_csr.asformat(sparse_format)
# Generate large indices matrix only if its supported by scipy
X_coo = X_csr.asformat("coo")
X_coo.row = X_coo.row.astype("int64")
X_coo.col = X_coo.col.astype("int64")
yield "coo_64", X_coo
for sparse_format in ["csc", "csr"]:
X = X_csr.asformat(sparse_format)
X.indices = X.indices.astype("int64")
X.indptr = X.indptr.astype("int64")
yield sparse_format + "_64", X
def check_estimator_sparse_data(name, estimator_orig):
rng = np.random.RandomState(0)
X = rng.rand(40, 3)
X[X < 0.8] = 0
X = _pairwise_estimator_convert_X(X, estimator_orig)
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(int)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
tags = _safe_tags(estimator_orig)
for matrix_format, X in _generate_sparse_matrix(X_csr):
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
if name in ["Scaler", "StandardScaler"]:
estimator.set_params(with_mean=False)
# fit and predict
if "64" in matrix_format:
err_msg = (
f"Estimator {name} doesn't seem to support {matrix_format} "
"matrix, and is not failing gracefully, e.g. by using "
"check_array(X, accept_large_sparse=False)"
)
else:
err_msg = (
f"Estimator {name} doesn't seem to fail gracefully on sparse "
"data: error message should state explicitly that sparse "
"input is not supported if this is not the case."
)
with raises(
(TypeError, ValueError),
match=["sparse", "Sparse"],
may_pass=True,
err_msg=err_msg,
):
with ignore_warnings(category=FutureWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
if tags["multioutput_only"]:
assert pred.shape == (X.shape[0], 1)
else:
assert pred.shape == (X.shape[0],)
if hasattr(estimator, "predict_proba"):
probs = estimator.predict_proba(X)
if tags["binary_only"]:
expected_probs_shape = (X.shape[0], 2)
else:
expected_probs_shape = (X.shape[0], 4)
assert probs.shape == expected_probs_shape
@ignore_warnings(category=FutureWarning)
def check_sample_weights_pandas_series(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = clone(estimator_orig)
try:
import pandas as pd
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = pd.DataFrame(_pairwise_estimator_convert_X(X, estimator_orig))
y = pd.Series([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = pd.Series([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = pd.DataFrame(y)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError(
"Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name)
)
except ImportError:
raise SkipTest(
"pandas is not installed: not testing for "
"input of type pandas.Series to class weight."
)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_not_an_array(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type _NotAnArray in the 'fit' function.
estimator = clone(estimator_orig)
X = np.array(
[
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[3, 1],
[3, 2],
[3, 3],
[3, 4],
]
)
X = _NotAnArray(_pairwise_estimator_convert_X(X, estimator_orig))
y = _NotAnArray([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 2, 2])
weights = _NotAnArray([1] * 12)
if _safe_tags(estimator, key="multioutput_only"):
y = _NotAnArray(y.data.reshape(-1, 1))
estimator.fit(X, y, sample_weight=weights)
@ignore_warnings(category=(FutureWarning))
def check_sample_weights_list(name, estimator_orig):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)), estimator_orig)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator, y)
sample_weight = [3] * n_samples
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=FutureWarning)
def check_sample_weights_shape(name, estimator_orig):
# check that estimators raise an error if sample_weight
# shape mismatches the input
estimator = clone(estimator_orig)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
]
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2])
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y, sample_weight=np.ones(len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones(2 * len(y)))
with raises(ValueError):
estimator.fit(X, y, sample_weight=np.ones((len(y), 2)))
@ignore_warnings(category=FutureWarning)
def check_sample_weights_invariance(name, estimator_orig, kind="ones"):
# For kind="ones" check that the estimators yield same results for
# unit weights and no weights
# For kind="zeros" check that setting sample_weight to 0 is equivalent
# to removing corresponding samples.
estimator1 = clone(estimator_orig)
estimator2 = clone(estimator_orig)
set_random_state(estimator1, random_state=0)
set_random_state(estimator2, random_state=0)
X1 = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y1 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
if kind == "ones":
X2 = X1
y2 = y1
sw2 = np.ones(shape=len(y1))
err_msg = (
f"For {name} sample_weight=None is not equivalent to sample_weight=ones"
)
elif kind == "zeros":
# Construct a dataset that is very different to (X, y) if weights
# are disregarded, but identical to (X, y) given weights.
X2 = np.vstack([X1, X1 + 1])
y2 = np.hstack([y1, 3 - y1])
sw2 = np.ones(shape=len(y1) * 2)
sw2[len(y1) :] = 0
X2, y2, sw2 = shuffle(X2, y2, sw2, random_state=0)
err_msg = (
f"For {name}, a zero sample_weight is not equivalent to removing the sample"
)
else: # pragma: no cover
raise ValueError
y1 = _enforce_estimator_tags_y(estimator1, y1)
y2 = _enforce_estimator_tags_y(estimator2, y2)
estimator1.fit(X1, y=y1, sample_weight=None)
estimator2.fit(X2, y=y2, sample_weight=sw2)
for method in ["predict", "predict_proba", "decision_function", "transform"]:
if hasattr(estimator_orig, method):
X_pred1 = getattr(estimator1, method)(X1)
X_pred2 = getattr(estimator2, method)(X1)
assert_allclose_dense_sparse(X_pred1, X_pred2, err_msg=err_msg)
def check_sample_weights_not_overwritten(name, estimator_orig):
# check that estimators don't override the passed sample_weight parameter
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
X = np.array(
[
[1, 3],
[1, 3],
[1, 3],
[1, 3],
[2, 1],
[2, 1],
[2, 1],
[2, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
[4, 1],
[4, 1],
[4, 1],
[4, 1],
],
dtype=np.float64,
)
y = np.array([1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2], dtype=int)
y = _enforce_estimator_tags_y(estimator, y)
sample_weight_original = np.ones(y.shape[0])
sample_weight_original[0] = 10.0
sample_weight_fit = sample_weight_original.copy()
estimator.fit(X, y, sample_weight=sample_weight_fit)
err_msg = "{name} overwrote the original `sample_weight` given during fit"
assert_allclose(sample_weight_fit, sample_weight_original, err_msg=err_msg)
@ignore_warnings(category=(FutureWarning, UserWarning))
def check_dtype_object(name, estimator_orig):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = _pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig)
X = X.astype(object)
tags = _safe_tags(estimator_orig)
y = (X[:, 0] * 4).astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
with raises(Exception, match="Unknown label type", may_pass=True):
estimator.fit(X, y.astype(object))
if "string" not in tags["X_types"]:
X[0, 0] = {"foo": "bar"}
msg = "argument must be a string.* number"
with raises(TypeError, match=msg):
estimator.fit(X, y)
else:
# Estimators supporting string will not call np.asarray to convert the
# data to numeric and therefore, the error will not be raised.
# Checking for each element dtype in the input array will be costly.
# Refer to #11401 for full discussion.
estimator.fit(X, y)
def check_complex_data(name, estimator_orig):
rng = np.random.RandomState(42)
# check that estimators raise an exception on providing complex data
X = rng.uniform(size=10) + 1j * rng.uniform(size=10)
X = X.reshape(-1, 1)
# Something both valid for classification and regression
y = rng.randint(low=0, high=2, size=10) + 1j
estimator = clone(estimator_orig)
set_random_state(estimator, random_state=0)
with raises(ValueError, match="Complex data not supported"):
estimator.fit(X, y)
@ignore_warnings
def check_dict_unchanged(name, estimator_orig):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ["SpectralCoclustering"]:
return
rnd = np.random.RandomState(0)
if name in ["RANSACRegressor"]:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert estimator.__dict__ == dict_before, (
"Estimator changes __dict__ during %s" % method
)
def _is_public_parameter(attr):
return not (attr.startswith("_") or attr.endswith("_"))
@ignore_warnings(category=FutureWarning)
def check_dont_overwrite_parameters(name, estimator_orig):
# check that fit method only changes or sets private attributes
if hasattr(estimator_orig.__init__, "deprecated_original"):
# to not check deprecated classes
return
estimator = clone(estimator_orig)
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [
key for key in dict_after_fit.keys() if _is_public_parameter(key)
]
attrs_added_by_fit = [
key for key in public_keys_after_fit if key not in dict_before_fit.keys()
]
# check that fit doesn't add any public attribute
assert not attrs_added_by_fit, (
"Estimator adds public attribute(s) during"
" the fit method."
" Estimators are only allowed to add private attributes"
" either started with _ or ended"
" with _ but %s added"
% ", ".join(attrs_added_by_fit)
)
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [
key
for key in public_keys_after_fit
if (dict_before_fit[key] is not dict_after_fit[key])
]
assert not attrs_changed_by_fit, (
"Estimator changes public attribute(s) during"
" the fit method. Estimators are only allowed"
" to change attributes started"
" or ended with _, but"
" %s changed"
% ", ".join(attrs_changed_by_fit)
)
@ignore_warnings(category=FutureWarning)
def check_fit2d_predict1d(name, estimator_orig):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function", "predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(
ValueError, "Reshape your data", getattr(estimator, method), X[0]
)
def _apply_on_subsets(func, X):
# apply function on the whole set and on mini batches
result_full = func(X)
n_features = X.shape[1]
result_by_batch = [func(batch.reshape(1, n_features)) for batch in X]
# func can output tuple (e.g. score_samples)
if type(result_full) == tuple:
result_full = result_full[0]
result_by_batch = list(map(lambda x: x[0], result_by_batch))
if sparse.issparse(result_full):
result_full = result_full.A
result_by_batch = [x.A for x in result_by_batch]
return np.ravel(result_full), np.ravel(result_by_batch)
@ignore_warnings(category=FutureWarning)
def check_methods_subset_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on mini batches or the whole set
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = ("{method} of {name} is not invariant when applied to a subset.").format(
method=method, name=name
)
if hasattr(estimator, method):
result_full, result_by_batch = _apply_on_subsets(
getattr(estimator, method), X
)
assert_allclose(result_full, result_by_batch, atol=1e-7, err_msg=msg)
@ignore_warnings(category=FutureWarning)
def check_methods_sample_order_invariance(name, estimator_orig):
# check that method gives invariant results if applied
# on a subset with different sample order
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(np.int64)
if _safe_tags(estimator_orig, key="binary_only"):
y[y == 2] = 1
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 2
set_random_state(estimator, 1)
estimator.fit(X, y)
idx = np.random.permutation(X.shape[0])
for method in [
"predict",
"transform",
"decision_function",
"score_samples",
"predict_proba",
]:
msg = (
"{method} of {name} is not invariant when applied to a dataset"
"with different sample order."
).format(method=method, name=name)
if hasattr(estimator, method):
assert_allclose_dense_sparse(
getattr(estimator, method)(X)[idx],
getattr(estimator, method)(X[idx]),
atol=1e-9,
err_msg=msg,
)
@ignore_warnings
def check_fit2d_1sample(name, estimator_orig):
# Check that fitting a 2d array with only one sample either works or
# returns an informative message. The error message should either mention
# the number of samples or the number of classes.
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
# min_cluster_size cannot be less than the data size for OPTICS.
if name == "OPTICS":
estimator.set_params(min_samples=1)
msgs = [
"1 sample",
"n_samples = 1",
"n_samples=1",
"one sample",
"1 class",
"one class",
]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit2d_1feature(name, estimator_orig):
# check fitting a 2d array with only 1 feature either works or returns
# informative message
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = X[:, 0].astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
# ensure two labels in subsample for RandomizedLogisticRegression
if name == "RandomizedLogisticRegression":
estimator.sample_fraction = 1
# ensure non skipped trials for RANSACRegressor
if name == "RANSACRegressor":
estimator.residual_threshold = 0.5
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator, 1)
msgs = [r"1 feature\(s\)", "n_features = 1", "n_features=1"]
with raises(ValueError, match=msgs, may_pass=True):
estimator.fit(X, y)
@ignore_warnings
def check_fit1d(name, estimator_orig):
# check fitting 1d X array raises a ValueError
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(int)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
with raises(ValueError):
estimator.fit(X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_general(name, transformer, readonly_memmap=False):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
_check_transformer(name, transformer, X, y)
@ignore_warnings(category=FutureWarning)
def check_transformer_data_not_an_array(name, transformer):
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - 0.1
X = _pairwise_estimator_convert_X(X, transformer)
this_X = _NotAnArray(X)
this_y = _NotAnArray(np.asarray(y))
_check_transformer(name, transformer, this_X, this_y)
# try the same with some list
_check_transformer(name, transformer, X.tolist(), y.tolist())
@ignore_warnings(category=FutureWarning)
def check_transformers_unfitted(name, transformer):
X, y = _regression_dataset()
transformer = clone(transformer)
with raises(
(AttributeError, ValueError),
err_msg=(
"The unfitted "
f"transformer {name} does not raise an error when "
"transform is called. Perhaps use "
"check_is_fitted in transform."
),
):
transformer.transform(X)
def _check_transformer(name, transformer_orig, X, y):
n_samples, n_features = np.asarray(X).shape
transformer = clone(transformer_orig)
set_random_state(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[np.asarray(y), np.asarray(y)]
y_[::2, 1] *= 2
if isinstance(X, _NotAnArray):
y_ = _NotAnArray(y_)
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert x_pred.shape[0] == n_samples
else:
# check for consistent n_samples
assert X_pred.shape[0] == n_samples
if hasattr(transformer, "transform"):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if _safe_tags(transformer_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_allclose_dense_sparse(
x_pred,
x_pred2,
atol=1e-2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
)
assert_allclose_dense_sparse(
x_pred,
x_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
else:
assert_allclose_dense_sparse(
X_pred,
X_pred2,
err_msg="fit_transform and transform outcomes not consistent in %s"
% transformer,
atol=1e-2,
)
assert_allclose_dense_sparse(
X_pred,
X_pred3,
atol=1e-2,
err_msg="consecutive fit_transform outcomes not consistent in %s"
% transformer,
)
assert _num_samples(X_pred2) == n_samples
assert _num_samples(X_pred3) == n_samples
# raises error on malformed input for transform
if (
hasattr(X, "shape")
and not _safe_tags(transformer, key="stateless")
and X.ndim == 2
and X.shape[1] > 1
):
# If it's not an array, it does not have a 'T' property
with raises(
ValueError,
err_msg=(
f"The transformer {name} does not raise an error "
"when the number of features in transform is different from "
"the number of features in fit."
),
):
transformer.transform(X[:, :-1])
@ignore_warnings
def check_pipeline_consistency(name, estimator_orig):
if _safe_tags(estimator_orig, key="non_deterministic"):
msg = name + " is non deterministic"
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_allclose_dense_sparse(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, estimator_orig):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
X = _pairwise_estimator_convert_X(X, estimator_orig)
y = np.arange(n_samples) % 3
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
if args[0] == "self":
# if_delegate_has_method makes methods into functions
# with an explicit "self", so need to shift arguments
args = args[1:]
assert args[1] in ["y", "Y"], (
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, type(estimator).__name__, args)
)
@ignore_warnings
def check_estimators_dtypes(name, estimator_orig):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_32 = _pairwise_estimator_convert_X(X_train_32, estimator_orig)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = _enforce_estimator_tags_y(estimator_orig, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_transformer_preserve_dtypes(name, transformer_orig):
# check that dtype are preserved meaning if input X is of some dtype
# X_transformed should be from the same dtype.
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
X -= X.min()
X = _pairwise_estimator_convert_X(X, transformer_orig)
for dtype in _safe_tags(transformer_orig, key="preserves_dtype"):
X_cast = X.astype(dtype)
transformer = clone(transformer_orig)
set_random_state(transformer)
X_trans = transformer.fit_transform(X_cast, y)
if isinstance(X_trans, tuple):
# cross-decompostion returns a tuple of (x_scores, y_scores)
# when given y with fit_transform; only check the first element
X_trans = X_trans[0]
# check that the output dtype is preserved
assert X_trans.dtype == dtype, (
f"Estimator transform dtype: {X_trans.dtype} - "
f"original/expected dtype: {dtype.__name__}"
)
@ignore_warnings(category=FutureWarning)
def check_estimators_empty_data_messages(name, estimator_orig):
e = clone(estimator_orig)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
err_msg = (
f"The estimator {name} does not raise a ValueError when an "
"empty data is used to train. Perhaps use check_array in train."
)
with raises(ValueError, err_msg=err_msg):
e.fit(X_zero_samples, [])
X_zero_features = np.empty(0).reshape(12, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = _enforce_estimator_tags_y(e, np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]))
msg = r"0 feature\(s\) \(shape=\(\d*, 0\)\) while a minimum of \d* " "is required."
with raises(ValueError, match=msg):
e.fit(X_zero_features, y)
@ignore_warnings(category=FutureWarning)
def check_estimators_nan_inf(name, estimator_orig):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = _pairwise_estimator_convert_X(
rnd.uniform(size=(10, 3)), estimator_orig
)
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = _enforce_estimator_tags_y(estimator_orig, y)
error_string_fit = f"Estimator {name} doesn't check for NaN and inf in fit."
error_string_predict = f"Estimator {name} doesn't check for NaN and inf in predict."
error_string_transform = (
f"Estimator {name} doesn't check for NaN and inf in transform."
)
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
estimator = clone(estimator_orig)
set_random_state(estimator, 1)
# try to fit
with raises(ValueError, match=["inf", "NaN"], err_msg=error_string_fit):
estimator.fit(X_train, y)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_predict,
):
estimator.predict(X_train)
# transform
if hasattr(estimator, "transform"):
with raises(
ValueError,
match=["inf", "NaN"],
err_msg=error_string_transform,
):
estimator.transform(X_train)
@ignore_warnings
def check_nonsquare_error(name, estimator_orig):
"""Test that error is thrown when non-square data provided."""
X, y = make_blobs(n_samples=20, n_features=10)
estimator = clone(estimator_orig)
with raises(
ValueError,
err_msg=(
f"The pairwise estimator {name} does not raise an error on non-square data"
),
):
estimator.fit(X, y)
@ignore_warnings
def check_estimators_pickle(name, estimator_orig):
"""Test that we can pickle all estimators."""
check_methods = ["predict", "transform", "decision_function", "predict_proba"]
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
# some estimators can't do features less than 0
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel)
tags = _safe_tags(estimator_orig)
# include NaN values when the estimator should deal with them
if tags["allow_nan"]:
# set randomly 10 elements to np.nan
rng = np.random.RandomState(42)
mask = rng.choice(X.size, 10, replace=False)
X.reshape(-1)[mask] = np.nan
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
set_random_state(estimator)
estimator.fit(X, y)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
module_name = estimator.__module__
if module_name.startswith("sklearn.") and not (
"test_" in module_name or module_name.endswith("_testing")
):
# strict check for sklearn estimators that are not implemented in test
# modules.
assert b"version" in pickled_estimator
unpickled_estimator = pickle.loads(pickled_estimator)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_allclose_dense_sparse(result[method], unpickled_result)
@ignore_warnings(category=FutureWarning)
def check_estimators_partial_fit_n_features(name, estimator_orig):
# check if number of features changes between calls to partial_fit.
if not hasattr(estimator_orig, "partial_fit"):
return
estimator = clone(estimator_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
y = _enforce_estimator_tags_y(estimator_orig, y)
try:
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
except NotImplementedError:
return
with raises(
ValueError,
err_msg=(
f"The estimator {name} does not raise an error when the "
"number of features changes between calls to partial_fit."
),
):
estimator.partial_fit(X[:, :-1], y)
@ignore_warnings(category=FutureWarning)
def check_classifier_multioutput(name, estimator):
n_samples, n_labels, n_classes = 42, 5, 3
tags = _safe_tags(estimator)
estimator = clone(estimator)
X, y = make_multilabel_classification(
random_state=42, n_samples=n_samples, n_labels=n_labels, n_classes=n_classes
)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples, n_classes), (
"The shape of the prediction for multioutput data is "
"incorrect. Expected {}, got {}.".format((n_samples, n_labels), y_pred.shape)
)
assert y_pred.dtype.kind == "i"
if hasattr(estimator, "decision_function"):
decision = estimator.decision_function(X)
assert isinstance(decision, np.ndarray)
assert decision.shape == (n_samples, n_classes), (
"The shape of the decision function output for "
"multioutput data is incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), decision.shape
)
)
dec_pred = (decision > 0).astype(int)
dec_exp = estimator.classes_[dec_pred]
assert_array_equal(dec_exp, y_pred)
if hasattr(estimator, "predict_proba"):
y_prob = estimator.predict_proba(X)
if isinstance(y_prob, list) and not tags["poor_score"]:
for i in range(n_classes):
assert y_prob[i].shape == (n_samples, 2), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, 2), y_prob[i].shape
)
)
assert_array_equal(
np.argmax(y_prob[i], axis=1).astype(int), y_pred[:, i]
)
elif not tags["poor_score"]:
assert y_prob.shape == (n_samples, n_classes), (
"The shape of the probability for multioutput data is"
" incorrect. Expected {}, got {}.".format(
(n_samples, n_classes), y_prob.shape
)
)
assert_array_equal(y_prob.round().astype(int), y_pred)
if hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba"):
for i in range(n_classes):
y_proba = estimator.predict_proba(X)[:, i]
y_decision = estimator.decision_function(X)
assert_array_equal(rankdata(y_proba), rankdata(y_decision[:, i]))
@ignore_warnings(category=FutureWarning)
def check_regressor_multioutput(name, estimator):
estimator = clone(estimator)
n_samples = n_features = 10
if not _is_pairwise_metric(estimator):
n_samples = n_samples + 1
X, y = make_regression(
random_state=42, n_targets=5, n_samples=n_samples, n_features=n_features
)
X = _pairwise_estimator_convert_X(X, estimator)
estimator.fit(X, y)
y_pred = estimator.predict(X)
assert y_pred.dtype == np.dtype("float64"), (
"Multioutput predictions by a regressor are expected to be"
" floating-point precision. Got {} instead".format(y_pred.dtype)
)
assert y_pred.shape == y.shape, (
"The shape of the prediction for multioutput data is incorrect."
" Expected {}, got {}."
)
@ignore_warnings(category=FutureWarning)
def check_clustering(name, clusterer_orig, readonly_memmap=False):
clusterer = clone(clusterer_orig)
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
rng = np.random.RandomState(7)
X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))])
if readonly_memmap:
X, y, X_noise = create_memmap_backed_data([X, y, X_noise])
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
if hasattr(clusterer, "n_clusters"):
clusterer.set_params(n_clusters=3)
set_random_state(clusterer)
if name == "AffinityPropagation":
clusterer.set_params(preference=-100)
clusterer.set_params(max_iter=100)
# fit
clusterer.fit(X)
# with lists
clusterer.fit(X.tolist())
pred = clusterer.labels_
assert pred.shape == (n_samples,)
assert adjusted_rand_score(pred, y) > 0.4
if _safe_tags(clusterer, key="non_deterministic"):
return
set_random_state(clusterer)
with warnings.catch_warnings(record=True):
pred2 = clusterer.fit_predict(X)
assert_array_equal(pred, pred2)
# fit_predict(X) and labels_ should be of type int
assert pred.dtype in [np.dtype("int32"), np.dtype("int64")]
assert pred2.dtype in [np.dtype("int32"), np.dtype("int64")]
# Add noise to X to test the possible values of the labels
labels = clusterer.fit_predict(X_noise)
# There should be at least one sample in every cluster. Equivalently
# labels_ should contain all the consecutive values between its
# min and its max.
labels_sorted = np.unique(labels)
assert_array_equal(
labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1)
)
# Labels are expected to start at 0 (no noise) or -1 (if noise)
assert labels_sorted[0] in [0, -1]
# Labels should be less than n_clusters - 1
if hasattr(clusterer, "n_clusters"):
n_clusters = getattr(clusterer, "n_clusters")
assert n_clusters - 1 >= labels_sorted[-1]
# else labels should be less than max(labels_) which is necessarily true
@ignore_warnings(category=FutureWarning)
def check_clusterer_compute_labels_predict(name, clusterer_orig):
"""Check that predict is invariant of compute_labels."""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = clone(clusterer_orig)
set_random_state(clusterer)
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
@ignore_warnings(category=FutureWarning)
def check_classifiers_one_label(name, classifier_orig):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = "Classifier can't predict when only one class is present."
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=FutureWarning):
classifier = clone(classifier_orig)
with raises(
ValueError, match="class", may_pass=True, err_msg=error_string_fit
) as cm:
classifier.fit(X_train, y)
if cm.raised_and_matched:
# ValueError was raised with proper error message
return
assert_array_equal(classifier.predict(X_test), y, err_msg=error_string_predict)
def _create_memmap_backed_data(numpy_arrays):
# OpenBLAS is known to segfault with unaligned data on the Prescott architecture
# See: https://github.com/scipy/scipy/issues/14886
has_prescott_openblas = any(
True
for info in threadpool_info()
if info["internal_api"] == "openblas"
# Prudently assume Prescott might be the architecture if it is unknown.
and info.get("architecture", "prescott").lower() == "prescott"
)
return [
create_memmap_backed_data(array, aligned=has_prescott_openblas)
for array in numpy_arrays
]
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(
name, classifier_orig, readonly_memmap=False, X_dtype="float64"
):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m = X_m.astype(X_dtype)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
if name in ["BernoulliNB", "MultinomialNB", "ComplementNB", "CategoricalNB"]:
X_m -= X_m.min()
X_b -= X_b.min()
if readonly_memmap:
X_m, y_m, X_b, y_b = _create_memmap_backed_data([X_m, y_m, X_b, y_b])
problems = [(X_b, y_b)]
tags = _safe_tags(classifier_orig)
if not tags["binary_only"]:
problems.append((X_m, y_m))
for (X, y) in problems:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = clone(classifier_orig)
X = _pairwise_estimator_convert_X(X, classifier)
y = _enforce_estimator_tags_y(classifier, y)
set_random_state(classifier)
# raises error on malformed input for fit
if not tags["no_validation"]:
with raises(
ValueError,
err_msg=(
f"The classifier {name} does not raise an error when "
"incorrect/malformed input data for fit is passed. The number "
"of training examples is not the same as the number of "
"labels. Perhaps use check_X_y in fit."
),
):
classifier.fit(X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert hasattr(classifier, "classes_")
y_pred = classifier.predict(X)
assert y_pred.shape == (n_samples,)
# training set performance
if not tags["poor_score"]:
assert accuracy_score(y, y_pred) > 0.83
# raises error on malformed input for predict
msg_pairwise = (
"The classifier {} does not raise an error when shape of X in "
" {} is not equal to (n_test_samples, n_training_samples)"
)
msg = (
"The classifier {} does not raise an error when the number of "
"features in {} is different from the number of features in "
"fit."
)
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict"),
):
classifier.predict(X.reshape(-1, 1))
else:
with raises(ValueError, err_msg=msg.format(name, "predict")):
classifier.predict(X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes == 2:
if not tags["multioutput_only"]:
assert decision.shape == (n_samples,)
else:
assert decision.shape == (n_samples, 1)
dec_pred = (decision.ravel() > 0).astype(int)
assert_array_equal(dec_pred, y_pred)
else:
assert decision.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input for decision_function
if not tags["no_validation"]:
if _is_pairwise(classifier):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "decision_function"),
):
classifier.decision_function(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "decision_function"),
):
classifier.decision_function(X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert y_prob.shape == (n_samples, n_classes)
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples))
if not tags["no_validation"]:
# raises error on malformed input for predict_proba
if _is_pairwise(classifier_orig):
with raises(
ValueError,
err_msg=msg_pairwise.format(name, "predict_proba"),
):
classifier.predict_proba(X.reshape(-1, 1))
else:
with raises(
ValueError,
err_msg=msg.format(name, "predict_proba"),
):
classifier.predict_proba(X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
def check_outlier_corruption(num_outliers, expected_outliers, decision):
# Check for deviation from the precise given contamination level that may
# be due to ties in the anomaly scores.
if num_outliers < expected_outliers:
start = num_outliers
end = expected_outliers + 1
else:
start = expected_outliers
end = num_outliers + 1
# ensure that all values in the 'critical area' are tied,
# leading to the observed discrepancy between provided
# and actual contamination levels.
sorted_decision = np.sort(decision)
msg = (
"The number of predicted outliers is not equal to the expected "
"number of outliers and this difference is not explained by the "
"number of ties in the decision_function values"
)
assert len(np.unique(sorted_decision[start:end])) == 1, msg
def check_outliers_train(name, estimator_orig, readonly_memmap=True):
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
if readonly_memmap:
X = create_memmap_backed_data(X)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X)
# with lists
estimator.fit(X.tolist())
y_pred = estimator.predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == "i"
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
decision = estimator.decision_function(X)
scores = estimator.score_samples(X)
for output in [decision, scores]:
assert output.dtype == np.dtype("float")
assert output.shape == (n_samples,)
# raises error on malformed input for predict
with raises(ValueError):
estimator.predict(X.T)
# decision_function agrees with predict
dec_pred = (decision >= 0).astype(int)
dec_pred[dec_pred == 0] = -1
assert_array_equal(dec_pred, y_pred)
# raises error on malformed input for decision_function
with raises(ValueError):
estimator.decision_function(X.T)
# decision_function is a translation of score_samples
y_dec = scores - estimator.offset_
assert_allclose(y_dec, decision)
# raises error on malformed input for score_samples
with raises(ValueError):
estimator.score_samples(X.T)
# contamination parameter (not for OneClassSVM which has the nu parameter)
if hasattr(estimator, "contamination") and not hasattr(estimator, "novelty"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'. This is true for the training set and cannot thus be
# checked as follows for estimators with a novelty parameter such as
# LocalOutlierFactor (tested in check_outliers_fit_predict)
expected_outliers = 30
contamination = expected_outliers / n_samples
estimator.set_params(contamination=contamination)
estimator.fit(X)
y_pred = estimator.predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method, i.e. all estimators except LOF which is already
# excluded from this if branch.
if num_outliers != expected_outliers:
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit(X)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_representation_invariance(name, classifier_orig):
X, y = make_multilabel_classification(
n_samples=100,
n_features=2,
n_classes=5,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, y_train = X[:80], y[:80]
X_test = X[80:]
y_train_list_of_lists = y_train.tolist()
y_train_list_of_arrays = list(y_train)
classifier = clone(classifier_orig)
set_random_state(classifier)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
y_pred_list_of_lists = classifier.fit(X_train, y_train_list_of_lists).predict(
X_test
)
y_pred_list_of_arrays = classifier.fit(X_train, y_train_list_of_arrays).predict(
X_test
)
assert_array_equal(y_pred, y_pred_list_of_arrays)
assert_array_equal(y_pred, y_pred_list_of_lists)
assert y_pred.dtype == y_pred_list_of_arrays.dtype
assert y_pred.dtype == y_pred_list_of_lists.dtype
assert type(y_pred) == type(y_pred_list_of_arrays)
assert type(y_pred) == type(y_pred_list_of_lists)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict(name, classifier_orig):
"""Check the output of the `predict` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train, y_test = y[:-test_size], y[-test_size:]
classifier.fit(X_train, y_train)
response_method_name = "predict"
predict_method = getattr(classifier, response_method_name, None)
if predict_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_method(X_test)
# y_pred.shape -> y_test.shape with the same dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.predict is expected to output a NumPy array. Got "
f"{type(y_pred)} instead."
)
assert y_pred.shape == y_test.shape, (
f"{name}.predict outputs a NumPy array of shape {y_pred.shape} "
f"instead of {y_test.shape}."
)
assert y_pred.dtype == y_test.dtype, (
f"{name}.predict does not output the same dtype than the targets. "
f"Got {y_pred.dtype} instead of {y_test.dtype}."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_predict_proba(name, classifier_orig):
"""Check the output of the `predict_proba` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "predict_proba"
predict_proba_method = getattr(classifier, response_method_name, None)
if predict_proba_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = predict_proba_method(X_test)
# y_pred.shape -> 2 possibilities:
# - list of length n_outputs of shape (n_samples, 2);
# - ndarray of shape (n_samples, n_outputs).
# dtype should be floating
if isinstance(y_pred, list):
assert len(y_pred) == n_outputs, (
f"When {name}.predict_proba returns a list, the list should "
"be of length n_outputs and contain NumPy arrays. Got length "
f"of {len(y_pred)} instead of {n_outputs}."
)
for pred in y_pred:
assert pred.shape == (test_size, 2), (
f"When {name}.predict_proba returns a list, this list "
"should contain NumPy arrays of shape (n_samples, 2). Got "
f"NumPy arrays of shape {pred.shape} instead of "
f"{(test_size, 2)}."
)
assert pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a list, it should "
"contain NumPy arrays with floating dtype. Got "
f"{pred.dtype} instead."
)
# check that we have the correct probabilities
err_msg = (
f"When {name}.predict_proba returns a list, each NumPy "
"array should contain probabilities for each class and "
"thus each row should sum to 1 (or close to 1 due to "
"numerical errors)."
)
assert_allclose(pred.sum(axis=1), 1, err_msg=err_msg)
elif isinstance(y_pred, np.ndarray):
assert y_pred.shape == (test_size, n_outputs), (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected shape is (n_samples, n_outputs). Got {y_pred.shape}"
f" instead of {(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"When {name}.predict_proba returns a NumPy array, the "
f"expected data type is floating. Got {y_pred.dtype} instead."
)
err_msg = (
f"When {name}.predict_proba returns a NumPy array, this array "
"is expected to provide probabilities of the positive class "
"and should therefore contain values between 0 and 1."
)
assert_array_less(0, y_pred, err_msg=err_msg)
assert_array_less(y_pred, 1, err_msg=err_msg)
else:
raise ValueError(
f"Unknown returned type {type(y_pred)} by {name}."
"predict_proba. A list or a Numpy array is expected."
)
@ignore_warnings(category=FutureWarning)
def check_classifiers_multilabel_output_format_decision_function(name, classifier_orig):
"""Check the output of the `decision_function` method for classifiers supporting
multilabel-indicator targets."""
classifier = clone(classifier_orig)
set_random_state(classifier)
n_samples, test_size, n_outputs = 100, 25, 5
X, y = make_multilabel_classification(
n_samples=n_samples,
n_features=2,
n_classes=n_outputs,
n_labels=3,
length=50,
allow_unlabeled=True,
random_state=0,
)
X = scale(X)
X_train, X_test = X[:-test_size], X[-test_size:]
y_train = y[:-test_size]
classifier.fit(X_train, y_train)
response_method_name = "decision_function"
decision_function_method = getattr(classifier, response_method_name, None)
if decision_function_method is None:
raise SkipTest(f"{name} does not have a {response_method_name} method.")
y_pred = decision_function_method(X_test)
# y_pred.shape -> y_test.shape with floating dtype
assert isinstance(y_pred, np.ndarray), (
f"{name}.decision_function is expected to output a NumPy array."
f" Got {type(y_pred)} instead."
)
assert y_pred.shape == (test_size, n_outputs), (
f"{name}.decision_function is expected to provide a NumPy array "
f"of shape (n_samples, n_outputs). Got {y_pred.shape} instead of "
f"{(test_size, n_outputs)}."
)
assert y_pred.dtype.kind == "f", (
f"{name}.decision_function is expected to output a floating dtype."
f" Got {y_pred.dtype} instead."
)
@ignore_warnings(category=FutureWarning)
def check_estimators_fit_returns_self(name, estimator_orig, readonly_memmap=False):
"""Check if self is returned when calling fit."""
X, y = make_blobs(random_state=0, n_samples=21)
# some want non-negative input
X -= X.min()
X = _pairwise_estimator_convert_X(X, estimator_orig)
estimator = clone(estimator_orig)
y = _enforce_estimator_tags_y(estimator, y)
if readonly_memmap:
X, y = create_memmap_backed_data([X, y])
set_random_state(estimator)
assert estimator.fit(X, y) is estimator
@ignore_warnings
def check_estimators_unfitted(name, estimator_orig):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise a NotFittedError.
"""
# Common test for Regressors, Classifiers and Outlier detection estimators
X, y = _regression_dataset()
estimator = clone(estimator_orig)
for method in (
"decision_function",
"predict",
"predict_proba",
"predict_log_proba",
):
if hasattr(estimator, method):
with raises(NotFittedError):
getattr(estimator, method)(X)
@ignore_warnings(category=FutureWarning)
def check_supervised_y_2d(name, estimator_orig):
tags = _safe_tags(estimator_orig)
rnd = np.random.RandomState(0)
n_samples = 30
X = _pairwise_estimator_convert_X(rnd.uniform(size=(n_samples, 3)), estimator_orig)
y = np.arange(n_samples) % 3
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator = clone(estimator_orig)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % ", ".join(
[str(w_x) for w_x in w]
)
if not tags["multioutput"]:
# check that we warned if we don't support multi-output
assert len(w) > 0, msg
assert (
"DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected"
in msg
)
assert_allclose(y_pred.ravel(), y_pred_2d.ravel())
@ignore_warnings
def check_classifiers_predictions(X, y, name, classifier_orig):
classes = np.unique(y)
classifier = clone(classifier_orig)
if name == "BernoulliNB":
X = X > X.mean()
set_random_state(classifier)
classifier.fit(X, y)
y_pred = classifier.predict(X)
if hasattr(classifier, "decision_function"):
decision = classifier.decision_function(X)
assert isinstance(decision, np.ndarray)
if len(classes) == 2:
dec_pred = (decision.ravel() > 0).astype(int)
dec_exp = classifier.classes_[dec_pred]
assert_array_equal(
dec_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (
classifier,
", ".join(map(str, dec_exp)),
", ".join(map(str, y_pred)),
),
)
elif getattr(classifier, "decision_function_shape", "ovr") == "ovr":
decision_y = np.argmax(decision, axis=1).astype(int)
y_exp = classifier.classes_[decision_y]
assert_array_equal(
y_exp,
y_pred,
err_msg=(
"decision_function does not match "
"classifier for %r: expected '%s', got '%s'"
)
% (classifier, ", ".join(map(str, y_exp)), ", ".join(map(str, y_pred))),
)
# training set performance
if name != "ComplementNB":
# This is a pathological data set for ComplementNB.
# For some specific cases 'ComplementNB' predicts less classes
# than expected
assert_array_equal(np.unique(y), np.unique(y_pred))
assert_array_equal(
classes,
classifier.classes_,
err_msg="Unexpected classes_ attribute for %r: expected '%s', got '%s'"
% (
classifier,
", ".join(map(str, classes)),
", ".join(map(str, classifier.classes_)),
),
)
def _choose_check_classifiers_labels(name, y, y_names):
# Semisupervised classifiers use -1 as the indicator for an unlabeled
# sample.
return (
y
if name in ["LabelPropagation", "LabelSpreading", "SelfTrainingClassifier"]
else y_names
)
def check_classifiers_classes(name, classifier_orig):
X_multiclass, y_multiclass = make_blobs(
n_samples=30, random_state=0, cluster_std=0.1
)
X_multiclass, y_multiclass = shuffle(X_multiclass, y_multiclass, random_state=7)
X_multiclass = StandardScaler().fit_transform(X_multiclass)
# We need to make sure that we have non negative data, for things
# like NMF
X_multiclass -= X_multiclass.min() - 0.1
X_binary = X_multiclass[y_multiclass != 2]
y_binary = y_multiclass[y_multiclass != 2]
X_multiclass = _pairwise_estimator_convert_X(X_multiclass, classifier_orig)
X_binary = _pairwise_estimator_convert_X(X_binary, classifier_orig)
labels_multiclass = ["one", "two", "three"]
labels_binary = ["one", "two"]
y_names_multiclass = np.take(labels_multiclass, y_multiclass)
y_names_binary = np.take(labels_binary, y_binary)
problems = [(X_binary, y_binary, y_names_binary)]
if not _safe_tags(classifier_orig, key="binary_only"):
problems.append((X_multiclass, y_multiclass, y_names_multiclass))
for X, y, y_names in problems:
for y_names_i in [y_names, y_names.astype("O")]:
y_ = _choose_check_classifiers_labels(name, y, y_names_i)
check_classifiers_predictions(X, y_, name, classifier_orig)
labels_binary = [-1, 1]
y_names_binary = np.take(labels_binary, y_binary)
y_binary = _choose_check_classifiers_labels(name, y_binary, y_names_binary)
check_classifiers_predictions(X_binary, y_binary, name, classifier_orig)
@ignore_warnings(category=FutureWarning)
def check_regressors_int(name, regressor_orig):
X, _ = _regression_dataset()
X = _pairwise_estimator_convert_X(X[:50], regressor_orig)
rnd =
|
np.random.RandomState(0)
|
numpy.random.RandomState
|
import numpy as np
import random
import math
import json
import pkg_resources
action_to_dxdy = {7: (-1, -1),
2: (1, 0),
9: (-1, 1),
4: (0, -1),
5: (0, 0),
6: (0, 1),
1: (1, -1),
8: (-1, 0),
3: (1, 1)}
def distance(p0, p1): # euclidian distance between 2d-points
return math.sqrt((p0[0] - p1[0]) ** 2 + (p0[1] - p1[1]) ** 2)
class PacmanGame:
def __init__(self, field_shape, nmonsters, ndiamonds, nwalls,
monster_vision_range, max_moves=1000):
assert field_shape[0] * field_shape[1] > 1 + nmonsters + ndiamonds + nwalls, "Low field size"
self.field_shape = field_shape
self.nmonsters = nmonsters
self.ndiamonds = ndiamonds
self.nwalls = nwalls
self.monster_vision_range = monster_vision_range
self.max_moves = max_moves
self.game_window = None
self.total_score = 0
self.end_game = True
self.n_moves = 0
self.delta_score = 0
self.player = None
self.monsters = None
self.diamonds = None
self.walls = None
self.reset()
def __del__(self):
self.close()
def close(self):
self.close_window()
self.end_game = True
def close_window(self):
if self.game_window is not None:
self.game_window.close()
self.game_window = None
def reset(self):
self.total_score = 0
self.end_game = False
self.n_moves = 0
self.delta_score = 0
# init positions of player, monsters, diamonds and walls
reserved_coords = set()
x = np.random.randint(self.field_shape[0])
y = np.random.randint(self.field_shape[1])
self.player = (x, y)
reserved_coords.add(self.player)
self.monsters = []
for i in range(self.nmonsters):
while (x, y) in reserved_coords:
x = np.random.randint(self.field_shape[0])
y = np.random.randint(self.field_shape[1])
monster_coord = (x, y)
reserved_coords.add(monster_coord)
self.monsters.append(monster_coord)
self.monsters.sort() # to make state-space lower
self.diamonds = []
for i in range(self.ndiamonds):
while (x, y) in reserved_coords:
x = np.random.randint(self.field_shape[0])
y = np.random.randint(self.field_shape[1])
diamond_coord = (x, y)
reserved_coords.add(diamond_coord)
self.diamonds.append(diamond_coord)
self.diamonds.sort() # to make state-space lower
self.walls = []
for i in range(self.nwalls):
while (x, y) in reserved_coords:
x = np.random.randint(self.field_shape[0])
y =
|
np.random.randint(self.field_shape[1])
|
numpy.random.randint
|
#%%
import tiledb
from tiledb.libtiledb import ustring
import numpy as np
import unittest, os, time
from tiledb.tests.common import *
class MetadataTest(DiskTestCase):
def test_metadata_basic(self):
path = self.path("test_md_basic")
with tiledb.from_numpy(path, np.ones((5,), np.float64)) as A:
pass
# sanity checks
A = tiledb.open(path)
A.close()
# can't read from a closed array
with self.assertRaises(tiledb.TileDBError):
A.meta['x']
with tiledb.Array(path) as A:
# can't write to a mode='r' array
with self.assertRaises(tiledb.TileDBError):
A.meta['invalid_write'] = 1
# missing key raises KeyError
with self.assertRaises(KeyError):
A.meta['xyz123nokey']
# test invalid input
with tiledb.Array(path, 'w') as A:
# keys must be strings
with self.assertRaises(ValueError):
A.meta[123] = 1
# can't write an int > typemax(Int64)
with self.assertRaises(OverflowError):
A.meta['bigint'] = np.iinfo(np.int64).max + 1
# can't write mixed-type list
with self.assertRaises(TypeError):
A.meta['mixed_list'] = [1, 2.1]
# can't write mixed-type tuple
with self.assertRaises(TypeError):
A.meta['mixed_list'] = (0, 3.1)
# can't write objects
with self.assertRaises(ValueError):
A.meta['object'] = object
test_vals = {
'int': 10,
'double': 1.000001212,
'bytes': b"0123456789abcdeF0123456789abcdeF",
'str': "abcdefghijklmnopqrstuvwxyz",
'emptystr': "",
'tuple_int': (1,2,3,2,1, int(np.random.randint(0,10000,1)[0]) ),
'list_int': [1,2,3,2,1, int(np.random.randint(0,10000,1)[0]) ],
'tuple_float': (10.0, 11.0, float(np.random.rand(1)[0]) ),
'list_float': [10.0, 11.0, float(np.random.rand(1)[0]) ]
}
def tupleize(v):
if isinstance(v, list):
v = tuple(v)
return v
with tiledb.Array(path, mode='w') as A:
for k,v in test_vals.items():
A.meta[k] = v
with tiledb.Array(path) as A:
for k,v in test_vals.items():
# metadata only has one iterable type: tuple, so we cannot
# perfectly round-trip the input type.
self.assertEqual(A.meta[k], tupleize(v))
# test dict-like functionality
with tiledb.Array(path) as A:
self.assertSetEqual(set(A.meta.keys()), set(test_vals.keys()))
self.assertFalse('gnokey' in A.meta)
self.assertEqual(len(A.meta), len(test_vals))
for k,v in A.meta.items():
self.assertTrue(k in test_vals.keys())
self.assertEqual(tupleize(v), tupleize(test_vals[k]),)
# test a 1 MB blob
blob = np.random.rand(int((1024**2)/8)).tostring()
with tiledb.Array(path, 'w') as A:
A.meta['bigblob'] = blob
with tiledb.Array(path) as A:
self.assertEqual(A.meta['bigblob'], blob)
self.assertEqual(len(A.meta), len(test_vals)+1)
# test del key
with tiledb.Array(path, 'w') as A:
del A.meta['bigblob']
with tiledb.Array(path) as A:
self.assertTrue('bigblob' not in A.meta)
self.assertEqual(len(A.meta), len(test_vals))
with self.assertRaises(KeyError):
A.meta['bigblob']
# test pop NotImplementedError
with tiledb.Array(path, 'w') as A:
with self.assertRaises(NotImplementedError):
A.meta.pop('nokey', 'hello!')
# Note: this requires a work-around to check all keys
# test empty value
with tiledb.Array(path, 'w') as A:
A.meta['empty_val'] = ()
with tiledb.Array(path) as A:
self.assertTrue('empty_val' in A.meta)
self.assertEqual(A.meta['empty_val'], ())
def test_metadata_consecutive(self):
ctx = tiledb.Ctx({
'sm.vacuum.mode': 'array_meta',
'sm.consolidation.mode': 'array_meta'
})
vfs = tiledb.VFS(ctx=ctx)
path = self.path("test_md_consecutive")
write_count = 100
with tiledb.from_numpy(path,
|
np.ones((5,), np.float64)
|
numpy.ones
|
"""
Plot the kinetic reactions of biomass pyrolysis for the Ranzi 2014 kinetic
scheme for biomass pyrolysis.
Reference:
<NAME>, 2014. Chemical Engineering Science, 110, pp 2-12.
"""
import numpy as np
import pandas as pd
# Parameters
# ------------------------------------------------------------------------------
# T = 773 # temperature for rate constants, K
# weight percent (%) cellulose, hemicellulose, lignin for beech wood
# wtcell = 48
# wthemi = 28
# wtlig = 24
# dt = 0.001 # time step, delta t
# tmax = 4 # max time, s
# t = np.linspace(0, tmax, num=int(tmax/dt)) # time vector
# nt = len(t) # total number of time steps
# Functions for Ranzi 2014 Kinetic Scheme
# ------------------------------------------------------------------------------
def ranzicell(wood, wt, T, dt, nt):
"""
Cellulose reactions CELL from Ranzi 2014 paper for biomass pyrolysis.
Parameters
----------
wood = wood concentration, kg/m^3
wt = weight percent wood as cellulose, %
T = temperature, K
dt = time step, s
nt = total number of time steps
Returns
-------
main = mass concentration of main group, (-)
prod = mass concentration of product group, (-)
"""
# vector for initial wood concentration, kg/m^3
pw = np.ones(nt)*wood
# vectors to store main product concentrations, kg/m^3
cell = pw*(wt/100) # initial cellulose conc. in wood
g1 = np.zeros(nt) # G1
cella = np.zeros(nt) # CELLA
lvg = np.zeros(nt) # LVG
g4 = np.zeros(nt) # G4
R = 1.987 # universal gas constant, kcal/kmol*K
# reaction rate constant for each reaction, 1/s
# A = pre-factor (1/s) and E = activation energy (kcal/kmol)
K1 = 4e7 *
|
np.exp(-31000 / (R * T))
|
numpy.exp
|
import pytest
import numpy as np
from numpy.linalg import matrix_rank
from scipy.linalg import inv
from nengo.utils.numpy import norm
from nengolib.signal.lyapunov import (
_H2P, state_norm, control_gram, observe_gram, balanced_transformation,
hsvd, l1_norm)
from nengolib.signal import sys2ss, cont2discrete, s, z
from nengolib.synapses import Bandpass, PadeDelay
from nengolib import Lowpass, Alpha
def test_lyapunov():
A = np.asarray([[1, 0.5], [1, 0]])
B = np.asarray([[1, 0], [0, 1]])
P = _H2P(A, B, analog=True)
assert np.allclose(np.dot(A, P) + np.dot(P, A.T) + np.dot(B, B.T), 0)
P = _H2P(A, B, analog=False)
assert np.allclose(np.dot(A, np.dot(P, A.T)) - P + np.dot(B, B.T), 0)
def test_state_norm(plt):
# Choose a filter, timestep, and number of simulation timesteps
sys = Alpha(0.1)
dt = 0.000001
length = 2000000
assert np.allclose(dt*length, 2.0)
# Check that the power of each state equals the H2-norm of each state
# The analog case is the same after scaling since dt is approx 0.
response = sys.X.impulse(length, dt)
actual = norm(response, axis=0) * dt
assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))
step = int(0.002/dt)
plt.figure()
plt.plot(response[::step, 0], label="$x_0$")
plt.plot(response[::step, 1], label="$x_1$")
plt.plot(np.dot(response[::step], sys.C.T), label="$y$")
plt.legend()
def test_grams():
sys = 0.6*Alpha(0.01) + 0.4*Lowpass(0.05)
A, B, C, D = sys2ss(sys)
P = control_gram(sys)
assert np.allclose(np.dot(A, P) + np.dot(P, A.T), -np.dot(B, B.T))
assert matrix_rank(P) == len(P) # controllable
Q = observe_gram(sys)
assert np.allclose(np.dot(A.T, Q) + np.dot(Q, A), -np.dot(C.T, C))
assert matrix_rank(Q) == len(Q) # observable
def test_balreal():
isys = Lowpass(0.05)
noise = 0.5*Lowpass(0.01) + 0.5*Alpha(0.005)
p = 0.8
sys = p*isys + (1-p)*noise
T, Tinv, S = balanced_transformation(sys)
assert np.allclose(inv(T), Tinv)
assert np.allclose(S, hsvd(sys))
balsys = sys.transform(T, Tinv)
assert balsys == sys
assert np.all(S >= 0)
assert np.all(S[0] > 0.3)
assert np.all(S[1:] < 0.05)
assert np.allclose(sorted(S, reverse=True), S)
P = control_gram(balsys)
Q = observe_gram(balsys)
diag = np.diag_indices(len(P))
offdiag = np.ones_like(P, dtype=bool)
offdiag[diag] = False
offdiag = np.where(offdiag)
assert np.allclose(P[diag], S)
assert np.allclose(P[offdiag], 0)
assert np.allclose(Q[diag], S)
assert np.allclose(Q[offdiag], 0)
@pytest.mark.parametrize("sys", [
PadeDelay(0.1, 4), PadeDelay(0.2, 5, 5), Alpha(0.2)])
def test_hankel(sys):
assert np.allclose(hsvd(sys), balanced_transformation(sys)[2])
def test_l1_norm_known():
# Check that Lowpass has a norm of exactly 1
l1, rtol = l1_norm(Lowpass(0.1))
assert np.allclose(l1, 1)
assert np.allclose(rtol, 0)
# Check that passthrough is handled properly
assert np.allclose(l1_norm(Lowpass(0.1) + 5)[0], 6)
assert np.allclose(l1_norm(Lowpass(0.1) - 5)[0], 6)
# Check that Alpha scaled by a has a norm of approximately abs(a)
for a in (-2, 3):
for desired_rtol in (1e-1, 1e-2, 1e-4, 1e-8):
l1, rtol = l1_norm(a*Alpha(0.1), rtol=desired_rtol)
assert np.allclose(l1, abs(a), rtol=rtol)
assert rtol <= desired_rtol
@pytest.mark.parametrize("sys", [
Bandpass(10, 3), Bandpass(50, 50), PadeDelay(0.02, 4),
PadeDelay(0.2, 4, 4)])
def test_l1_norm_unknown(sys):
# These impulse responses have zero-crossings which makes computing their
# exact L1-norm infeasible without simulation.
dt = 0.0001
length = 500000
response = sys.impulse(length, dt)
assert np.allclose(response[-10:], 0)
l1_est = np.sum(abs(response) * dt)
desired_rtol = 1e-6
l1, rtol = l1_norm(sys, rtol=desired_rtol, max_length=2*length)
assert
|
np.allclose(l1, l1_est, rtol=1e-3)
|
numpy.allclose
|
import numpy as np
from environement_features.discrete_features import DiscreteHighLevelFeatures
class DiscreteFeatures1TeammateV1(DiscreteHighLevelFeatures):
"""
Features:
- position: field regions [0,1,2,3,4,5]
- teammate further : [0, 1]
- goal opening angle: [0, 1]
- teammate goal angle: [0, 1]
- ball_position: [0, 1, 2, 3, 4, 5]
"""
positions_names = {0: "TOP LEFT", 1: "TOP RIGHT", 2: "MID LEFT",
3: "MID RIGHT", 4: "BOTTOM LEFT", 5: "BOTTOM RIGHT"}
teammate_further = {0: "teammate near goal", 1: "teammate further goal"}
goal_opening_angle_values = {1: "open angle", 0: "closed_angle"}
teammate_goal_angle_values = {1: "open angle", 0: "closed_angle"}
ball_position = {0: "Player Has Ball", 1: "Teammate Has Ball", 2: "Right",
3: "Left", 4: "Down", 5: "Up"}
num_features = 5
features = np.zeros(num_features)
# numpy arrays:
ball_coord = np.array([0, 0])
agent_coord = np.array([0, 0])
teammate_coord = np.array(([0, 0]))
def _has_ball(self) -> int:
"""
: return: 1 if agent can kick, else return 0.
:rtype: int
"""
return 1 if self.agent.able_to_kick else 0
def _position_finder(self):
"""
:return: Q Table index of agent position.
Position of agent in terms of quartile block:
0 == Top Left, 1 == Top right,
2 == Mid Left, 3 == Mid Right,
4 == Bottom Left, 5 == Bottom Right
:rtype: int
"""
if -1 < self.agent.y_pos < -0.4:
return 0 if self.agent.x_pos < 0 else 1
elif -0.4 < self.agent.y_pos < 0.4:
return 2 if self.agent.x_pos < 0 else 3
else: # y in [0.4, 1]
return 4 if self.agent.x_pos < 0 else 5
def _teammate_further(self) -> int:
"""
@return: 0 if teammate near goal, 1 otherwise
@rtype: int
"""
goal_coord = np.array([1, 0])
if self.teammate_coord[0] == -2 or self.teammate_coord[1] == -2:
return 1 # invalid teammate position (out of scope)
team_dist = np.linalg.norm(self.teammate_coord - goal_coord)
agent_dist = np.linalg.norm(self.agent_coord - goal_coord)
if team_dist < agent_dist:
return 0
else:
return 1
def _get_ball_position(self) -> int:
# Agent has ball
if self._has_ball():
return 0
# Teammate has ball
elif np.linalg.norm(self.teammate_coord - self.ball_coord) <= 0.1:
return 1
# Ball direction
y_diff = abs(self.agent.y_pos - self.agent.ball_y)
x_diff = abs(self.agent.x_pos - self.agent.ball_x)
if x_diff >= y_diff and x_diff > 0:
if self.agent.x_pos < self.agent.ball_x:
return 2 # RIGHT
else:
return 3 # LEFT
else:
if self.agent.y_pos < self.agent.ball_y:
return 4 # DOWN
else:
return 5 # UP
def get_pos_tuple(self, round_ndigits: int = -1) -> tuple:
""" @return (x axis pos, y axis pos)"""
if round_ndigits >= 0:
x_pos = round(self.agent.x_pos.item(), round_ndigits)
x_pos = abs(x_pos) if x_pos == -0.0 else x_pos
y_pos = round(self.agent.y_pos.item(), round_ndigits)
y_pos = abs(y_pos) if y_pos == -0.0 else y_pos
return x_pos, y_pos
else:
return self.agent.x_pos, self.agent.y_pos
def update_features(self, observation: list):
"""
Features:
- position: field regions [0,1,2,3,4,5]
- teammate further : [0, 1]
- goal opening angle: [0, 1]
- teammate goal angle: [0, 1]
- ball_position: [0, 1, 2, 3, 4, 5]
"""
self._encapsulate_data(observation)
# numpy arrays coordenates:
self.ball_coord = np.array([self.agent.ball_x, self.agent.ball_y])
self.agent_coord = np.array([self.agent.x_pos, self.agent.y_pos])
self.teammate_coord = np.array([self.teammates[0].x_pos,
self.teammates[0].y_pos])
# features:
self.features[0] = self._position_finder()
self.features[1] = self._teammate_further()
self.features[2] = 1 if abs(self.agent.goal_opening_angle) > 0.2 else 0
self.features[3] = 1 if abs(self.teammates[0].goal_angle) > 0.2 else 0
self.features[4] = self._get_ball_position()
def get_position_name(self):
pos = self.features[0]
return self.positions_names.get(pos)
def get_features(self, _=None):
return self.features
def get_num_features(self) -> int:
return self.num_features
def get_num_states(self):
""" Returns the total number of possible states """
size = len(self.positions_names) # positions
size *= len(self.teammate_further) # teammate relative position
size *= len(self.goal_opening_angle_values) # open_angle
size *= len(self.teammate_goal_angle_values) # teammates open angle
size *= len(self.ball_position)
return size
def get_state_index(self, _=None) -> int:
idx = 0
size = 1
# Agent positions:
idx += self.features[0] * size
size *= len(self.positions_names)
# Teammate relative position:
idx += self.features[1] * size
size *= len(self.teammate_further)
# Agent Open angle:
idx += self.features[2] * size
size *= len(self.goal_opening_angle_values)
# Teammate Open angle:
idx += self.features[3] * size
size *= len(self.teammate_goal_angle_values)
# Ball position
idx += self.features[4] * size
size *= len(self.ball_position)
return int(idx)
def teammate_has_ball(self) -> bool:
return True if self.features[4] == 1 else False
def teammate_further_from_ball(self) -> bool:
if
|
np.linalg.norm(self.teammate_coord - self.ball_coord)
|
numpy.linalg.norm
|
''' Routely '''
import math
import copy
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.ticker import MultipleLocator
from scipy.interpolate import interp1d
class Route:
"""
Create a Route.
Args:
x (array-like) : List or array of x-coordinates of the route.
y (array-like) : List or array of y-coordinates of the route.
z (dict, optional) : List or array of z data for the route. This does not need to be elevation, but any data corresponding to the route in the x-y plane. Defaults to None.
"""
def __init__(self, x, y, z=None):
self.x = x
self.y = y
self.z = z
self._prep_inputs()
self._check_inputlengths()
self._check_inputvalues()
self.d = self._calculate_distance()
def _prep_inputs(self):
"""
Convert args to array if not none.
"""
if self.x is not None:
self.x = np.array(self.x)
if self.y is not None:
self.y = np.array(self.y)
if self.z is not None:
for k in self.z.keys():
self.z[k] = np.array(self.z[k])
def _check_inputlengths(self):
"""
Check input args lengths meet requirements
"""
# Check x and y have more than 1 item, and x and y are equal length
if not len(self.x) > 1:
raise ValueError("Route input 'x' must contain more than 1 item")
if not (len(self.y) > 1):
raise ValueError("Route input 'y' must contain more than 1 item")
if not (len(self.x) == len(self.y)):
raise ValueError("Route inputs 'x' and 'y' must be of equal length")
# Performs checks on z if not empty
if self.z is not None:
for v in self.z.values():
if not (len(v) == len(self.x)):
raise ValueError("Route input 'z' must be of equal length to 'x' and 'y'")
def _check_inputvalues(self):
"""
Check Route argument inputs and raise exceptions where necessary.
"""
# Check x, y and z are int or float dtypes
# ie do not contain any unusable values like strings
if not (self.x.dtype in [np.int, np.float]):
raise TypeError("Route input 'x' must be either int or float dtypes")
if not (self.y.dtype in [np.int, np.float]):
raise TypeError("Route input 'x' must be either int or float dtypes")
# Performs checks on z if not empty
if self.z is not None:
for v in self.z.values():
if not (v.dtype in [np.int, np.float]):
raise TypeError("Route input 'x' must be either int or float dtypes")
def copy(self):
return copy.copy(self)
def dataframe(self):
"""
Returns route data in list form -> [(x, y, z, distance)]. z will be included if specified as an input arguement.
"""
df = pd.DataFrame({'x':self.x, 'y':self.y, 'd':self.d})
if self.z is not None:
for k, v in self.z.items():
df[k] = v
return df
def bbox(self):
"""Get the bounding box coordinates of the route.
Returns:
tuple: (lower-left corner coordinates, upper-right corner coordinates).
"""
lower = (self.x.min(), self.y.min())
upper = (self.x.max(), self.y.max())
return (lower, upper)
def width(self):
"""Get the width of the route (from min x to max x).
Returns:
float: route width.
"""
return self.x.max() - self.x.min()
def height(self):
"""Get the height of the route (from min y to max y).
Returns:
float: route height.
"""
return self.y.max() - self.y.min()
def size(self):
"""Returns the width and height (w, h) of the route along the x and y axes.
Returns:
tuple: (width, height)
"""
return (self.width(), self.height())
def center(self):
"""Get the center point of the route as defined as the mid-point between the max and min extents on each axis.
Returns:
tuple: (x, y) coordinates of the route center point
"""
xc = (self.x.max() + self.x.min())/2.
yc = (self.y.max() + self.y.min())/2.
return (xc, yc)
def nr_points(self):
"""Get the number of coordinate points that comprise the route.
Returns:
float: number of coordinates.
"""
return len(self.x)
# TODO: close off the route
# def close_off_route(self):
# """Close off the route by ensuring the first and last coordinates are equal.
# """
# return
def plotroute(self, markers=True, equal_aspect=True, equal_lims=True, canvas_style=False):
"""Plot the route (x vs y).
Args:
markers (bool, optional): Choose to display markers. Defaults to True.
equal_aspect (bool, optional): Choose to maintain an equal aspect ration in the plot. Defaults to True.
equal_lims (bool, optional): Choose to display equal x and y limits. Defaults to True.
canvas_Style (bool, optional): Create a canvas style plot by removing all chart axes. Defails to False.
"""
if markers:
marker = 'o'
else:
marker = None
fig, ax = plt.subplots()
ax.plot(self.x, self.y, 'k', marker=marker)
fig.tight_layout()
if equal_aspect:
ax.set_aspect('equal', 'box')
# Set equal lims if chosen. If not, let matplotlib set lims automatically
if equal_lims:
# Determine plot limits centered on the route center point
c = self.center()
lim = round((max(self.size())/2) * 1.1, 0) # add approx 10% to the lims
x_lim = [c[0] - lim, c[0] + lim]
y_lim = [c[1] - lim, c[1] + lim]
# Set lims on plot
ax.set_xlim(x_lim)
ax.set_ylim(y_lim)
# Axis formating
if canvas_style is False:
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.grid(True)
elif canvas_style:
ax.set_axis_off()
return ax
def plot_z(self, markers=True):
"""Plot Route z-data (d vs z).
Args:
markers (bool, optional): Choose to display markers. Defaults to True.
"""
# Check is z data is present
if self.z is None:
print('No z data provided')
return
if markers:
marker = 'o'
else:
marker = None
nr_plots = len(self.z)
fig, _ = plt.subplots(nr_plots, sharex=True)
# Use enumerate on fig which works with one axes or multiple axes
for idx, ax in enumerate(fig.axes):
# data and corersponding label
label = list(self.z.keys())[idx]
z_data = list(self.z.values())[idx]
# Plot data and label
ax.plot(self.d, z_data, 'k', marker=marker)
ax.set(xlabel='d', ylabel=label)
ax.grid(True)
ax.label_outer()
fig.tight_layout()
return ax
def _calculate_distance(self):
"""Calculate cumulative distance given Route x and y coordinates lists.
Returns:
array: 1d array of cumulative distance from the start of the Route to the end.
"""
xy = list(zip(self.x, self.y))
dist = [0]
for i in range(1, len(xy)):
dist.append(self.distance_between_two_points(xy[i-1], xy[i]))
return np.array(dist).cumsum()
@staticmethod
def distance_between_two_points(p1, p2):
"""Calulate the Euclidean distance between two (x, y) points.
Args:
p1 (tuple): (x, y) tuple of the first point
p2 (tuple): (x, y) tuple of the second point
Returns:
float: distance between point 1 and point 2
"""
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
def clean_coordinates(self, duplicates='consecutive'):
"""Clean the coordinate lists by removing duplicate x and y tuples. This is done by finding the index list of unique x and y tuples, and returning the correspondong coordinates for x, y and z data. Two methods for finding duplicates are available: consecutive or any. See args for description.
Args:
duplicates(str, optional): Choose the method for dealing with duplicate coordinate tuples. If "consecutive" then remove consecutive duplicates keeping the first. If "any", remove all duplicate coordinate tuples. Defaults to consecutive.
Returns:
Route: Return a new Route object.
"""
# idx_x = set(np.unique(self.x, return_index=True)[1])
# idx_y = set(np.unique(self.y, return_index=True)[1])
# idx = idx_x.intersection(idx_y)
if duplicates == 'consecutive':
xy = list(zip(self.x, self.y))
idx = [0]
for i, p in list(enumerate(xy))[1:]:
if p != xy[i-1]:
idx.append(i)
elif duplicates == 'any':
idx = set(np.unique(list(zip(self.x, self.y)), axis=0, return_index=True)[1])
else:
raise ValueError("'duplicates' arg not valid see docs for valid options")
new_x = self.x[list(idx)]
new_y = self.y[list(idx)]
if self.z is not None:
# for each z, interpolate and add to the new dict
zz = {}
for k, v in self.z.items():
zz[k] = v[list(idx)]
else:
zz = None
return Route(new_x, new_y, z=zz)
def interpolate(self, kind='equidistant_steps', num=1):
"""
Interpolate Route x and y coordinate lists given various interpolation stategies.
Available strategies include (specify chosen strategy in 'kind' args):
--> 'equidistant_Steps': equally spaced steps along the route path from start to end, using np.arange(). Step spacing specified by 'num' arg. This is not equidistant steps along x or y axis but distance along the path ie between route waypoints. Note: some variation in steps may occur in order to coincide with existing x and y coordinates.
--> 'absolute_steps': the total number of points along the full route as specified by 'num' arg. The spacing of the points will be linear along the length of the route using np.linspace.
Note, in all cases, the total route distance may vary from the original but the start and end coordinates will remain the same.
Args:
kind (str, optional): See docs for options. Defaults to 'equidistant_steps'.
num (int, optional): step value corresponding to chosen 'kind' of interpolation. Defaults to 1.
Returns:
Route: Return a new Route object.
"""
x = self.x
y = self.y
d = self.d
# cubic requires a different calculation, so check the kind first
if not (kind == 'equidistant_steps') | (kind == 'absolute_steps'):
raise ValueError("Keyword argument for 'kind' not recognised. See docs for options.")
if kind == 'equidistant_steps':
# New list of distance points to interpolate Route data against
dist = list(np.arange(d.min(), d.max()+num, step=num))
elif kind == 'absolute_steps':
# New list of distance points to interpolate Route data against
dist = list(np.linspace(d.min(), d.max(), num=num))
# Interpolate x and y wrt to d against the new list of distanced points
xx = np.interp(dist, d, x)
yy = np.interp(dist, d, y)
# interpolate z too if it exists
if self.z is not None:
# Start a new dict of z-axis data
zz = {}
# for each, interpolate and add to the new dict
for k, v in self.z.items():
zz[k] = np.interp(dist, d, v)
else:
zz = None
return Route(xx, yy, z=zz)
# TODO: Add Univariate Spline
# def add_spline(self):
# return
def smooth(self, smoothing_factor=None):
"""Smooth the route using cubic interpolation by varying the smoothing factor from 0 to 1.
The smoothing factor dictates how much smoothing will be applied. The factor reduces the number of route coordinate points relative to the mean change in distance between coordinates. With a reduced number of points, the route is smoothed using Scipy's cubic interpolation. Consquently, the higher the factor, the fewer coordinate points and the higher level of smoothing. The smoothing factor must be greater than or equal to 0 and less than 1.0.
Args:
smoothing_factor (float): level of smoothing to apply between 0 (no smoothing) and 1 (max smoothing). Must be less than 1.
Returns:
Route: Return a new Route object.
"""
if smoothing_factor is not None:
nr_points = int(np.diff(self.d).mean()/(1 - smoothing_factor))
#interpolate first
r = self.interpolate(kind='equidistant_steps', num=nr_points)
else:
# if none, simply interpolate through the existing coord points
r = self.copy()
# clean coords list first. Interpolation cannot handle duplicate values in the list.
r = r.clean_coordinates()
# Use linspace to get a new list of distanced points
dist = np.linspace(r.d.min(), r.d.max(), num=5000)
# interpolation functions for x and y wrt to d
fx = interp1d(r.d, r.x, kind='cubic')
fy = interp1d(r.d, r.y, kind='cubic')
# apply function to distanced points
xx, yy = fx(dist), fy(dist)
# repeat for z if it exists
if self.z is not None:
zz = {}
for k, v in r.z.items():
fz = interp1d(r.d, v, kind='cubic')
zz[k] = fz(dist)
else:
zz = None
return Route(xx, yy, z=zz)
def center_on_origin(self, new_origin=(0, 0)):
"""Translate the Route to the origin, where the Route center point will be equal to the origin.
Args:
new_origin (tuple, optional): New Route origin, which will correspond to the Route's center point. Defaults to (0, 0).
Returns:
Route: Return a new Route object.
"""
center = self.center()
# translate x and y
x_new = self.x - center[0] + new_origin[0]
y_new = self.y - center[1] + new_origin[1]
return Route(x_new, y_new, z=self.z)
def align_to_origin(self, origin=(0, 0), align_corner='bottomleft'):
"""Align a corner of Route extents to the origin.
Args:
origin (tuple, optional): Route origin to align a chosen corner to. Defaults to (0, 0).
align_corner (str, optional): Choose a corner to align. Options: 'bottomleft', 'bottomright', 'topleft', 'topright'. Defaults to 'bottomleft'.
Returns:
Route: Return a new Route object.
"""
# Options: bottomleft, bottomright, topleft, topright
if align_corner == 'bottomleft':
corner = self.bbox()[0]
elif align_corner == 'topright':
corner = self.bbox()[1]
elif align_corner == 'bottomright':
corner = (self.bbox()[1][0], self.bbox()[0][1])
elif align_corner == 'topleft':
corner = (self.bbox()[0][0], self.bbox()[1][1])
else:
raise Exception ("Keyword argument for 'align_corner' not recognised. Please choose one from 'bottomleft', 'bottomright', 'topleft', 'topright'.")
# scale x and y
x_new = self.x - corner[0] + origin[0]
y_new = self.y - corner[1] + origin[1]
return Route(x_new, y_new, z=self.z)
@staticmethod
def _rotate_point(origin, point, angle):
"""Rotate a point counterclockwise by a given angle around a given origin.
Args:
origin (tuple): (x, y) point about which to rotate the point
point (tuple): (x, y) point to rotate
angle (float): angle to rotate point. The angle should be given in radians.
Returns:
tuple: (x, y) coordinates of rotated point
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
def rotate(self, angle_deg):
"""Rotate Route x and y coordinates clockwise for a given angle in degrees. This does not modify z-axis data.
Args:
angle_deg (float): angle of rotation in degrees.
Returns:
Route: Return a new Route object.
"""
xy = list(zip(self.x, self.y))
c = self.center()
rad = -math.radians(angle_deg)
x_new = []
y_new = []
for x, y in xy:
p = self._rotate_point(c, (x, y), rad)
x_new.append(p[0])
y_new.append(p[1])
return Route(x_new, y_new, z=self.z)
def mirror(self, about_x=False, about_y=False, about_axis=False):
"""Mirror Route x and y coordinates in the x and y planes as may be specified.
Args:
about_x (bool, optional): If True, mirror Route horizontally. Defaults to False.
about_y (bool, optional): If True, mirror Route vertically. Defaults to False.
about_axis (bool, optional): If True, mirror Route about the x or y axis. If False, mirror Route about the Route's center point. Defaults to False.
Returns:
Route: Return a new Route object.
"""
if about_axis:
c = (0, 0)
elif about_axis is False:
c = self.center()
if about_y:
x_new = []
for p in self.x:
x_new.append(c[0] + (c[0] - p))
else:
x_new = self.x
if about_x:
y_new = []
for p in self.y:
y_new.append(c[1] + (c[1] - p))
else:
y_new = self.y
return Route(x_new, y_new, z=self.z)
def fit_to_box(self, box_width, box_height, keep_aspect=True):
"""Scale the Route to fit within a specified bounding box of given width and height. This modifies the x, y and d Route attributes.
Args:
box_width (float): Desired width.
box_height (float): Desired height.
keep_aspect (bool, optional): If True, the route will be scalled equal in both x and y directions ensuring the new route will fit within the smallest extent. If False, x and y coordinates will be scalled independently such that the modified route will fill the specified width and height. Note: this modifies the aspect ratio of the route. Defaults to True.
Returns:
Route: Return a new Route object.
"""
#Scale factors for width and height
if keep_aspect:
sfactor = max(self.height()/box_height, self.width()/box_width)
sfactor_x = sfactor
sfactor_y = sfactor
elif keep_aspect is False:
sfactor_x = abs(self.width()/box_width)
sfactor_y = abs(self.height()/box_height)
# scale x and y
x_new = self.x/sfactor_x
y_new = self.y/sfactor_y
return Route(x_new, y_new, z=self.z)
def optimise_bbox(self, box_width, box_height):
"""Rotate the route to the most efficient use of space given the width and height of a bounding box. This does not scale the route to fill the space but rather find the best aspect ratio of the route that best matches that of the specified box width and height.
The route is rotated 90 degrees clockwise in steps of one degree about the route's center point.
Args:
box_width (float): box width.
box_height (float): box height.
Returns:
Route: Return a new Route object.
"""
target = box_width/box_height
angles = []
spatial_eff = [] # spatial efficiency
for angle in
|
np.arange(-90, 91, 1)
|
numpy.arange
|
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import AffineTransformer
import numpy as np
import scipy.misc
# Input image retrieved from:
# https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
im = ndimage.imread('data/cat.jpg')
im = im / 255.
im = im.astype('float32')
# input batch
batch_size = 1
batch = np.expand_dims(im, axis=0)
batch =
|
np.tile(batch, [batch_size, 1, 1, 1])
|
numpy.tile
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 UPennEoR
# Licensed under the MIT License
from functools import reduce
import healpy as hp
import numpy as np
import pyuvdata
import ssht_numba as sshtn
from astropy import _erfa
from astropy import constants as const
from astropy import coordinates as coord
from astropy import units
from astropy.time import Time
from pyuvdata import UVData
from pyuvdata.utils import polstr2num
from scipy import linalg, optimize
from spin1_beam_model.cst_processing import ssht_power_spectrum
from .beam_models import az_shiftflip
# misc.
HERA_LAT = np.radians(-30.72152777777791)
HERA_LON = np.radians(21.428305555555557)
HERA_HEIGHT = 1073.0000000093132 # meters
def coords_to_location(array_lat, array_lon, array_height):
return coord.EarthLocation(
lat=array_lat * units.rad,
lon=array_lon * units.rad,
height=array_height * units.meter,
)
def kernel_cutoff_estimate(max_baseline_length_meters, max_freq_hz, width_estimate=100):
c_mps = const.c.to("m/s").value
ell_peak_est = 2 * np.pi * max_freq_hz * max_baseline_length_meters / c_mps
ell_cutoff = int(np.ceil(ell_peak_est + width_estimate))
if ell_cutoff % 2 != 0:
ell_cutoff += 1
return ell_cutoff
# antenna array stuff
# This method of finding reducencies is aimed at the HERA hexagonal lattices
# since it first finds clusters in baseline length, then for each length
# group finds clusters in angle.
# Not sure how well it would do with general arrays. Might work?
def b_arc(b, precision=3):
b = np.array(b)
if b[0] == 0.0 and b[1] == 0.0:
arc = np.nan
else:
if b[0] == 0.0: # i.e. b[1]/b[0] is -np.inf or np.inf
arc = np.pi / 2.0
else:
b_grp = np.around(np.linalg.norm(b), precision)
arc = np.around(b_grp * np.arctan(b[1] / b[0]), precision)
return arc
def B(b, precision=3):
return np.around(np.linalg.norm(b), precision)
def get_minimal_antenna_set(r_axis, precision=3):
ant_inds = np.arange(0, r_axis.shape[0])
a2u = {}
for (ri, ai) in zip(r_axis, ant_inds):
for (rj, aj) in zip(r_axis, ant_inds):
b_v = ri - rj
b, arc = B(b_v, precision=precision), b_arc(b_v, precision=precision)
a2u[(ai, aj)] = (b, arc)
u2a = {}
for (ai, aj) in a2u:
b, arc = a2u[(ai, aj)]
if b not in u2a:
u2a[b] = {}
if arc not in u2a[b]:
u2a[b][arc] = []
u2a[b][arc].append((ai, aj))
minimal_ant_pairs = []
for b in u2a:
for arc in u2a[b]:
for (i, j) in u2a[b][arc]:
ri = r_axis[i]
rj = r_axis[j]
bv_ij = ri - rj
if bv_ij[0] >= 0.0:
if (bv_ij[1] < 0.0) and (bv_ij[0] == 0.0):
pass
else:
minimal_ant_pairs.append([i, j])
break
return np.array(minimal_ant_pairs), u2a, a2u
def generate_hex_positions(lattice_scale=14.7, u_lim=3, v_lim=3, w_lim=3):
"""
Generates antenna position on a hexagonal lattice.
The lattice is centered at the origin coordinates so that there is always
an antenna with coordinates [0,0,0]. The coordinate values intended to
be in units of meters
Parameters
----------
lattice_scale : float
The distance between any adjacent points in the lattice.
u_lim, v_lim, w_lim : int
The extent of the array in each hexagonal coordinate.
Returns
-------
r_axis : ndarray, shape (N_antennas, 3)
A list of coordinates for antennas in the array.
"""
u_ang = np.radians(-30.0 + 30.0)
v_ang = np.radians(210.0 + 30.0)
w_ang = np.radians(90.0 + 30.0)
e_u = np.array([np.cos(u_ang), np.sin(u_ang)])
e_v = np.array([np.cos(v_ang), np.sin(v_ang)])
e_w = np.array([np.cos(w_ang), np.sin(w_ang)])
u_axis = np.arange(0, u_lim)
v_axis = np.arange(0, v_lim)
w_axis = np.arange(0, w_lim)
r_vecs = []
for u in u_axis:
for v in v_axis:
for w in w_axis:
r_vecs.append(u * e_u + v * e_v + w * e_w)
r_vecs = np.unique(np.around(r_vecs, 8), axis=0)
r_axis = lattice_scale * np.append(r_vecs, np.zeros((r_vecs.shape[0], 1)), 1)
return r_axis
# coordinates and visibility function parameters
def JD2era(JD):
JD_time = Time(JD, format="jd", scale="ut1")
era = _erfa.era00(JD_time.jd1, JD_time.jd2)
return era
def JD2era_tot(JD):
jd_time = Time(JD, format="jd", scale="ut1")
# from USNO Circular 179, Eqn 2.10
D_U = jd_time.jd - 2451545.0
theta = 2 * np.pi * (0.7790572732640 + 1.00273781191135448 * D_U)
return theta
def era2JD(era, nearby_JD):
def f(jd):
return era - JD2era_tot(jd)
JD_out = optimize.newton(f, nearby_JD, tol=1e-8)
return JD_out
def era_tot2JD(theta):
"""
Parameters
----------
theta: float, array
Total earth rotation angle in radians
Returns
-------
JD: float, array
The Julian Date correponding to the input total earth rotation angle
This function (and it's inverse) could be written using
the two-part JD form for higher precision, unclear if necessary at this point.
1 arcsecond of earth rotation is a difference of 7.69738107919693e-07 in JD,
which is getting into the last couple digits of a 64bit JD float.
"""
b = 1.00273781191135448
a = 0.7790572732640
D_U = ((theta / 2 / np.pi) - a) / b
JD = D_U + 2451545.0
return JD
def get_rotations_realistic(era_axis, JD_INIT, array_location):
p1 = np.array([1.0, 0.0, 0.0])
p2 = np.array([0.0, 1.0, 0.0])
p3 = np.array([0.0, 0.0, 1.0])
jd_axis = map(lambda era: era2JD(era, JD_INIT), era_axis)
JDs = Time(jd_axis, format="jd", scale="ut1")
rotations_axis =
|
np.zeros((JDs.size, 3, 3), dtype=np.float)
|
numpy.zeros
|
from __future__ import division, absolute_import, print_function
import numpy as np
import heapq as heapq
import itertools
from traffic_info.map import Map, Edge, Location
from traffic_info.car import Car
# priority queue for astar
class PriorityQueue:
REMOVED = '<removed-task>' # placeholder for a removed task
def __init__(self):
self.elements = []
self.entry_finder = {}
# counter = itertools.count() # unique sequence count
# mark an item as REMOVED
def remove(self, item):
entry = self.entry_finder.pop(item)
entry[-1] = self.REMOVED
# add a new item or update the priority
def put(self, item, priority):
if item in self.entry_finder:
self.entry_finder.pop(item)
# count = next(self.counter)
entry = [priority, item]
self.entry_finder[item] = entry
heapq.heappush(self.elements, entry)
# remove and return the lowest priority item
def get(self):
while self.elements:
priority, item = heapq.heappop(self.elements)
if item in self.entry_finder:
del self.entry_finder[item]
return item
raise KeyError('pop from an empty priority queue')
def getitem(self):
return self.entry_finder
def _calculate_footprint(length, edge, vehicle_number):
"""
This method is used to calculate footprint of an edge
"""
avg_length = length
lane = edge.width
if lane == 0:
lane = 1
if edge.id in vehicle_number:
n = vehicle_number[edge.id] + 1
else:
n = 1
w = avg_length / (edge.length * lane)
fc = w * n
return fc
def _calculate_distance(current, goal):
"""
This method is used to calculate the spatial distance between two edges
"""
start = current.position
end = goal.position
distance = np.sqrt(np.sum(np.square(start - end)))
return distance
def _sumFscore(FScore, agenda):
"""
This method is used to calculate the sum of FScore
"""
sum_fscore = 0
entries = agenda.getitem()
for item in entries:
sum_fscore = sum_fscore + FScore[item.id]
return sum_fscore
def _sumRscore(RScore, agenda):
"""
This method is used to calculate the sum of RScore
"""
sum_gscore = 0
entries = agenda.getitem()
for item in entries:
sum_gscore = sum_gscore + RScore[item.id]
return sum_gscore
def _reconstruct_path(came_from, start, goal):
"""
This method is used to construct the path from start edge to goal edge
"""
current = goal
path = []
while current != start:
path.append(current)
current = came_from.get(current)
path.append(start)
path.reverse()
return path
def arstar(graph, estimator, cars):
'''
This is the implementation of ARstar algorithm
:param graph: map
:param start: start location
:param destination: destination location
:param estimator: Estimator, used to generate estimated travel time
:param cars: list of cars
:return: path id for each car in cars
'''
repulation_weight = 1
car_routes = {}
vehicle_number = {}
for car in cars:
agenda = PriorityQueue()
closeset = []
GScore = {}
HScore = {}
FScore = {}
RScore = {}
came_from = {}
start = car.location
destination = car.destination
begin = start.edge
goal = destination.edge
# GScore[begin.id] = estimator.estimate_time(begin, car, 0)
GScore[begin.id] = begin.length
HScore[begin.id] = _calculate_distance(begin, goal)
FScore[begin.id] = GScore[begin.id] + HScore[begin.id]
agenda.put(begin, FScore[begin.id])
came_from[begin.id] = None
# not sure
sum_edge_length = 0
graph_edges = graph.get_edges()
for edge_id, edge in graph_edges.items():
sum_edge_length = sum_edge_length + edge.length
avg_length = sum_edge_length / len(graph_edges)
RScore[begin.id] = _calculate_footprint(avg_length, begin, vehicle_number)
while agenda:
sumF = _sumFscore(FScore, agenda)
sumR = _sumRscore(RScore, agenda)
items = agenda.getitem()
for item in items:
FScore[item.id] = (1-repulation_weight) * FScore[item.id] / sumF + repulation_weight * RScore[item.id] / sumR
# update edge with new priority FScore
agenda.put(graph.get_edge(item.id), FScore[item.id])
# get the edge with smallest cost,agenda remove current
current = agenda.get()
if current == goal:
break
else:
# closeset add current
closeset.append(current)
for next_id in current.downflow_ids:
next = graph.get_edge(next_id)
if next in closeset:
continue
#tentative_g = GScore[current.id] + estimator.estimate_time(next, car, GScore[current.id])
tentative_g = GScore[current.id] + next.length
tentative_r = RScore[current.id] + _calculate_footprint(avg_length, next, vehicle_number)
items = agenda.getitem()
if next not in items:
came_from[next.id] = current.id
# agenda.put(next, priority)
HScore[next.id] = _calculate_distance(next, goal)
tentative_is_better = True
else:
if tentative_g < GScore[next.id]:
tentative_is_better = True
else:
tentative_is_better = False
if tentative_is_better:
# P[edge]
GScore[next.id] = tentative_g
RScore[next.id] = tentative_r
FScore[next.id] = GScore[next.id] + HScore[next.id]
agenda.put(next, FScore[next.id])
route = _reconstruct_path(came_from, begin.id, goal.id)
for edge_id in route:
if edge_id in vehicle_number:
vehicle_number[edge_id] = vehicle_number[edge_id] + 1
else:
vehicle_number[edge_id] = 1
car_routes[car.id] = route
return car_routes
# used for testing
if __name__ == "__main__":
edge_info = [
("A", 4, np.array([2, 4]), ["B", "D"]),
("B", 4,
|
np.array([6, 4])
|
numpy.array
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
def acoustics2D(iplot=False,kernel_language='Fortran',htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d acoustics equations.
"""
if use_petsc:
from clawpack import petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=True
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
if kernel_language != 'Fortran':
raise Exception('Unrecognized value of kernel_language for 2D acoustics')
from clawpack.riemann import rp2_acoustics
solver.rp = rp2_acoustics
solver.cfl_max = 0.5
solver.cfl_desired = 0.45
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=100; my=100
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
state = pyclaw.State(domain,num_eqn)
rho = 1.0
bulk = 4.0
cc = np.sqrt(bulk/rho)
zz = rho*cc
state.problem_data['rho']= rho
state.problem_data['bulk']=bulk
state.problem_data['zz']= zz
state.problem_data['cc']=cc
qinit(state)
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
solver.dt_initial=np.min(domain.grid.delta)/state.problem_data['cc']*solver.cfl_desired
claw.solver = solver
claw.outdir = outdir
num_output_times = 10
claw.num_output_times = num_output_times
# Solve
claw.tfinal = 0.12
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format)
return claw.frames[-1].state
def qinit(state,width=0.2):
grid = state.grid
x =grid.x.centers
y =grid.y.centers
Y,X = np.meshgrid(y,x)
r = np.sqrt(X**2 + Y**2)
state.q[0,:,:] = (
|
np.abs(r-0.5)
|
numpy.abs
|
# coding: utf-8
import numpy as np
import pandas as pd
import mplleaflet
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# from matplotlib.ticker import FixedLocator, LinearLocator, FormatStrFormatter
# import datetime
# Import data
df_GHCN = pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')
df_GHCN.head()
# Mask leap day
mask_leap = df_GHCN.loc[:, 'Date'].str.endswith('02-29', na=False)
df_GHCN = df_GHCN[~mask_leap]
# Create filters
is_2015 = df_GHCN.loc[:, 'Date'].str.startswith('2015', na=False)
is_tmin = df_GHCN.loc[:, 'Element'] == 'TMIN'
is_2015_tmin = is_2015 & is_tmin
is_2015_tmax = is_2015 & ~is_tmin
is_2005_tmin = ~is_2015 & is_tmin
is_2005_tmax = ~is_2015 & ~is_tmin
# Select datasets for plot
tmin_2005 = df_GHCN.loc[is_2005_tmin].groupby(df_GHCN.loc[:, 'Date'].str.replace(r'^\d*-', ''))['Data_Value'].min().reset_index()
tmax_2005 = df_GHCN.loc[is_2005_tmax].groupby(df_GHCN.loc[:, 'Date'].str.replace(r'^\d*-', ''))['Data_Value'].max().reset_index()
tmin_2015 = df_GHCN.loc[is_2015_tmin].groupby(df_GHCN.loc[:, 'Date'].str.replace(r'^\d*-', ''))['Data_Value'].min().reset_index()
tmax_2015 = df_GHCN.loc[is_2015_tmax].groupby(df_GHCN.loc[:, 'Date'].str.replace(r'^\d*-', ''))['Data_Value'].max().reset_index()
# Select outside bandwidth
tlow_2015 = tmin_2015[tmin_2015['Data_Value'] < tmin_2005['Data_Value']]
thigh_2015 = tmax_2015[tmax_2015['Data_Value'] > tmax_2005['Data_Value']]
# Setup plot
plt.rcdefaults() # restore plot defaults
plt.figure(figsize=(20, 6))
fig, ax = plt.gcf(), plt.gca()
rc('mathtext', default='regular')
# Line plots record low and high
plt.plot(tmin_2005.index, tmin_2005.loc[:, 'Data_Value'], '-', c='black', linewidth=.7, label='Extremes 2005-2014')
plt.plot(tmax_2005.index, tmax_2005.loc[:, 'Data_Value'], '-', c='black', linewidth=.7, label='')
# Fill the area between the High and Low temperatures
plt.gca().fill_between(tmin_2005.index, tmin_2005.loc[:, 'Data_Value'], tmax_2005.loc[:, 'Data_Value'],
facecolor='gray', alpha=0.25)
# Scatter outside bandwidth
plt.scatter(tlow_2015.index, tlow_2015.loc[:, 'Data_Value'], c='red', s=25, marker = 'o', alpha=.6, label='Extremes 2015')
plt.scatter(thigh_2015.index, thigh_2015.loc[:, 'Data_Value'], c='red', s=25, marker = 'o', alpha=.6, label='')
# Title, axis, labels, legend
plt.title(r'2015 Temperature Extremes - Outside 2005-2014' +
'\n(Ann Arbor, Michigan, United States)')
plt.suptitle('')
ax.legend(loc=4, frameon=False)
# yticks
ylim = ax.get_ylim()
y_ticks =
|
np.arange((ylim[0]//50 - 1)*50, (ylim[1]//50 + 1)*50, 100)
|
numpy.arange
|
import os
import numpy as np
from astropy.table import Table
import astropy.units as u
import astropy.constants as const
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import mpl_scatter_density # adds projection='scatter_density'
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import extinction
from dustmaps.sfd import SFDQuery
import context
from emlines_estimator import EmLine3Filters
def mag2flux(m, weff):
fnu = np.power(10, -0.4 * (m + 48.6)) * context.fnu_unit
return (fnu * const.c.to("km/s") / weff[:,None]**2).to(context.flam_unit)
def test_sdss_halpha(fname, fluxkey, magkey, magerrprefix, bands,
scatter_density=True, zkey="z", xmin=-17, xmax=-13,
output=None, bands_ebv=None, wline=6562.8 * u.AA):
bands_ebv = ["G", "I"] if bands_ebv is None else bands_ebv
############################################################################
# Reading transmission curves for S-PLUS
filters_dir = os.path.join(os.getcwd(), "filter_curves-master")
filenames = sorted([os.path.join(filters_dir, _) for _ in os.listdir( \
filters_dir)])
filternames = [os.path.split(_)[1].replace(".dat", "") for _ in filenames]
filternames = [_.replace("F0", "F").replace("JAVA", "") for _ in
filternames]
filternames = [_.replace("SDSS", "").upper() for _ in filternames]
fcurves = [np.loadtxt(f) for f in filenames]
wcurves = [curve[:,0] * u.AA for curve in fcurves]
trans = [curve[:,1] for curve in fcurves]
wcurves = dict(zip(filternames, wcurves))
trans = dict(zip(filternames, trans))
halpha = EmLine3Filters(wline, [_.upper() for _ in bands],
wcurves, trans)
############################################################################
# Reading catalog and remove objects out of z-range
wdir = os.path.join(context.home_dir, "catalogs")
filename = os.path.join(wdir, fname)
table = Table.read(filename)
if zkey in table.colnames:
table = table[table[zkey] < 0.02]
############################################################################
# Cleaning table against large errors in mgnitudes
magerrs = np.array([table["{}{}{}".format(magerrprefix, band, magkey)].data
for band in bands])
idx = np.where(
|
np.nanmax(magerrs, axis=0)
|
numpy.nanmax
|
""" PIPA data I/O
"""
import numpy as np
from numpy import linalg as LA
import random
import sklearn
import sklearn.metrics
import sklearn.preprocessing
from PIPA_db import Manager
import performance_test_config as config
import CRF_opt
from multiprocessing import Pool, Value, Lock
from multiprocessing import cpu_count
# create CRF optimizer
crf = CRF_opt.CRFOptimizer()
lock = Lock()
shared_counter = Value('i', 0)
total = Value('i', 0)
cls_scores_photo = []
def worker_func(photo_idx):
global cls_scores_photo
global shared_counter, total
with lock:
print('processing photo {0}'.format(photo_idx))
predicts_photo, predicts_photo_before_refine = crf.run_LBP(cls_scores_photo[photo_idx])
with lock:
shared_counter.value += len(predicts_photo)
print('{0}/{1}:({2}) predictions are refined.'.format(shared_counter.value, total.value, photo_idx))
return predicts_photo
def test_pipeline(manager):
# create test splits
photos = manager.get_testing_photos()
print('randomly spliting test photos...')
test_photos_0, test_photos_1 = split_test_photo(photos)
test_detections_0 = Manager.get_detections_from_photos(test_photos_0)
test_detections_1 = Manager.get_detections_from_photos(test_photos_1)
if not config.refine_with_photo_level_context:
# compute feature similarity
feature_similarity = {}
for feature_name in config.features:
similarity = cal_feature_similarity(test_detections_0, test_detections_1, feature_name)
feature_similarity[feature_name] = similarity
similarity = fuse_feature_similarity(feature_similarity)
# save similarity
import cPickle as pickle
pickle.dump(similarity , open('test_similarity.pkl', 'wb'))
# compute accuracy with each modality
module_accuracy = {}
for feature_name in config.features:
module_accuracy[feature_name] = evaluate_accuracy_with_inst_similarity(test_detections_0, test_detections_1, feature_similarity[feature_name])
else:
import cPickle as pickle
similarity = pickle.load(open('test_similarity.pkl', 'rb'))
# use CRF to refine the predicted results
if config.refine_with_photo_level_context:
print('extracting class scores from instance similarity...')
num_identity = manager.get_num_labels_testing()
lbl_map_global_to_test = manager.get_label_mapping_global_to_test()
lbl_map_test_to_global = manager.get_label_mapping_test_to_global()
cls_scores0, cls_scores1 = convert_inst_scores_to_cls_scores(similarity, test_detections_0, test_detections_1, num_identity, lbl_map_global_to_test)
global shared_counter, total, cls_scores_photo
# refine the predictions photo-by-photo for test split 1
print('building binary potential matrix for test split 1')
crf.build_compat_func(test_photos_0, lbl_map_global_to_test)
# preparing tasks
print('preparing tasks...')
shared_counter.value = 0
total.value = len(cls_scores0)
cls_scores_photo = []
cur = 0
for photo in test_photos_1:
cls_scores_photo.append(cls_scores1[cur: cur + len(photo.human_detections)])
cur += len(photo.human_detections)
# map the tasks to workers
print('start multi-processing...')
print('creating worker pool with {0} workers'.format(cpu_count()))
worker_pool = Pool(processes=32)
predicts_1 = worker_pool.map(worker_func, range(len(cls_scores_photo)))
predicts_1 = [item for sublist in predicts_1 for item in sublist]
predicts_1 = [lbl_map_test_to_global[predict] for predict in predicts_1]
predicts_1 = np.array(predicts_1)
# refine the predictions photo-by-photo for test split 0
print('building binary potential matrix for test split 0')
crf.build_compat_func(test_photos_1, lbl_map_global_to_test)
# preparing tasks
print('preparing tasks...')
shared_counter.value = 0
total.value = len(cls_scores0)
cur = 0
for photo in test_photos_0:
cls_scores_photo.append(cls_scores0[cur: cur + len(photo.human_detections)])
cur += len(photo.human_detections)
# map the tasks to workers
print('start multi-processing...')
print('creating worker pool with {0} workers'.format(cpu_count()))
worker_pool = Pool(processes=32)
predicts_0 = worker_pool.map(worker_func, range(len(cls_scores_photo)))
predicts_0 = [item for sublist in predicts_0 for item in sublist]
predicts_0 = [lbl_map_test_to_global[predict] for predict in predicts_0]
predicts_0 = np.array(predicts_0)
# compute accuracy
accuracy = evaluate_accuracy_with_predicts(test_detections_0, test_detections_1, predicts_0, predicts_1)
print('fused accuracy after MRF optimization: {0}'.format(accuracy))
exit(-1)
else:
accuracy = evaluate_accuracy_with_inst_similarity(test_detections_0, test_detections_1, similarity)
return accuracy, module_accuracy
def evaluate_accuracy_with_predicts(test_0, test_1, predicts_0, predicts_1):
identity_set_0 = get_identity_set(test_0)
identity_set_1 = get_identity_set(test_1)
total_count = len(test_0) + len(test_1)
correct = 0
for i in range(len(test_0)):
detection = test_0[i]
gt_label = detection.identity_id
predict = predicts_0[i]
if gt_label not in identity_set_1:
total_count -= 1
if predict == gt_label:
correct += 1
for i in range(len(test_1)):
detection = test_1[i]
gt_label = detection.identity_id
predict = predicts_1[i]
if gt_label not in identity_set_0:
total_count -= 1
if predict == gt_label:
correct += 1
accuracy = float(correct)/total_count
return accuracy
def evaluate_accuracy_with_inst_similarity(test_0, test_1, similarity):
identity_set_0 = get_identity_set(test_0)
identity_set_1 = get_identity_set(test_1)
total_count = len(test_0) + len(test_1)
correct = 0
test_result_0to1 =
|
np.argmax(similarity, axis=1)
|
numpy.argmax
|
# Main Room class using to encapsulate the room acoustics simulator
# Copyright (C) 2019 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
r"""
Room
====
The three main classes are :py:obj:`pyroomacoustics.room.Room`,
:py:obj:`pyroomacoustics.soundsource.SoundSource`, and
:py:obj:`pyroomacoustics.beamforming.MicrophoneArray`. On a high level, a
simulation scenario is created by first defining a room to which a few sound
sources and a microphone array are attached. The actual audio is attached to
the source as raw audio samples.
Then, a simulation method is used to create artificial room impulse responses
(RIR) between the sources and microphones. The current default method is the
image source which considers the walls as perfect reflectors. An experimental
hybrid simulator based on image source method (ISM) [1]_ and ray tracing (RT) [2]_, [3]_, is also available. Ray tracing
better capture the later reflections and can also model effects such as
scattering.
The microphone signals are then created by convolving audio samples associated
to sources with the appropriate RIR. Since the simulation is done on
discrete-time signals, a sampling frequency is specified for the room and the
sources it contains. Microphones can optionally operate at a different sampling
frequency; a rate conversion is done in this case.
Simulating a Shoebox Room with the Image Source Model
-----------------------------------------------------
We will first walk through the steps to simulate a shoebox-shaped room in 3D.
We use the ISM is to find all image sources up to a maximum specified order and
room impulse responses (RIR) are generated from their positions.
The code for the full example can be found in `examples/room_from_rt60.py`.
Create the room
~~~~~~~~~~~~~~~
So-called shoebox rooms are pallelepipedic rooms with 4 or 6 walls (in 2D and
3D respectiely), all at right angles. They are defined by a single vector that
contains the lengths of the walls. They have the advantage of being simple to
define and very efficient to simulate. In the following example, we define a
``9m x 7.5m x 3.5m`` room. In addition, we use `Sabine's formula <https://en.wikipedia.org/wiki/Reverberation>`_
to find the wall energy absorption and maximum order of the ISM required
to achieve a desired reverberation time (*RT60*, i.e. the time it takes for
the RIR to decays by 60 dB).
.. code-block:: python
import pyroomacoustics as pra
# The desired reverberation time and dimensions of the room
rt60 = 0.5 # seconds
room_dim = [9, 7.5, 3.5] # meters
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60, room_dim)
# Create the room
room = pra.ShoeBox(
room_dim, fs=16000, materials=pra.Material(e_absorption), max_order=max_order
)
The second argument is the sampling frequency at which the RIR will be
generated. Note that the default value of ``fs`` is 8 kHz.
The third argument is the material of the wall, that itself takes the absorption as a parameter.
The fourth and last argument is the maximum number of reflections allowed in the ISM.
.. note::
Note that Sabine's formula is only an approximation and that the actually
simulated RT60 may vary by quite a bit.
.. warning::
Until recently, rooms would take an ``absorption`` parameter that was
actually **not** the energy absorption we use now. The ``absorption``
parameter is now deprecated and will be removed in the future.
Add sources and microphones
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sources are fairly straightforward to create. They take their location as single
mandatory argument, and a signal and start time as optional arguments. Here we
create a source located at ``[2.5, 3.73, 1.76]`` within the room, that will utter
the content of the wav file ``speech.wav`` starting at ``1.3 s`` into the
simulation. The ``signal`` keyword argument to the
:py:func:`~pyroomacoustics.room.Room.add_source` method should be a
one-dimensional ``numpy.ndarray`` containing the desired sound signal.
.. code-block:: python
# import a mono wavfile as the source signal
# the sampling frequency should match that of the room
from scipy.io import wavfile
_, audio = wavfile.read('speech.wav')
# place the source in the room
room.add_source([2.5, 3.73, 1.76], signal=audio, delay=1.3)
The locations of the microphones in the array should be provided in a numpy
``nd-array`` of size ``(ndim, nmics)``, that is each column contains the
coordinates of one microphone. Note that it can be different from that
of the room, in which case resampling will occur. Here, we create an array
with two microphones placed at ``[6.3, 4.87, 1.2]`` and ``[6.3, 4.93, 1.2]``.
.. code-block:: python
# define the locations of the microphones
import numpy as np
mic_locs = np.c_[
[6.3, 4.87, 1.2], # mic 1
[6.3, 4.93, 1.2], # mic 2
]
# finally place the array in the room
room.add_microphone_array(mic_locs)
A number of routines exist to create regular array geometries in 2D.
- :py:func:`~pyroomacoustics.beamforming.linear_2D_array`
- :py:func:`~pyroomacoustics.beamforming.circular_2D_array`
- :py:func:`~pyroomacoustics.beamforming.square_2D_array`
- :py:func:`~pyroomacoustics.beamforming.poisson_2D_array`
- :py:func:`~pyroomacoustics.beamforming.spiral_2D_array`
Adding source or microphone directivity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The directivity pattern of a source or microphone can be conveniently set
through the ``directivity`` keyword argument.
First, a :py:obj:`pyroomacoustics.directivities.Directivity` object needs to be created. As of
Sep 6, 2021, only frequency-independent directivities from the
`cardioid family <https://en.wikipedia.org/wiki/Microphone#Cardioid,_hypercardioid,_supercardioid,_subcardioid>`_
are supported, namely figure-eight, hypercardioid, cardioid, and subcardioid.
Below is how a :py:obj:`pyroomacoustics.directivities.Directivity` object can be created, for
example a hypercardioid pattern pointing at an azimuth angle of 90 degrees and a colatitude
angle of 15 degrees.
.. code-block:: python
# create directivity object
from pyroomacoustics.directivities import (
DirectivityPattern,
DirectionVector,
CardioidFamily,
)
dir_obj = CardioidFamily(
orientation=DirectionVector(azimuth=90, colatitude=15, degrees=True),
pattern_enum=DirectivityPattern.HYPERCARDIOID,
)
After creating a :py:obj:`pyroomacoustics.directivities.Directivity` object, it is straightforward
to set the directivity of a source, microphone, or microphone array, namely by using the
``directivity`` keyword argument.
For example, to set a source's directivity:
.. code-block:: python
# place the source in the room
room.add_source(position=[2.5, 3.73, 1.76], directivity=dir_obj)
To set a single microphone's directivity:
.. code-block:: python
# place the microphone in the room
room.add_microphone(loc=[2.5, 5, 1.76], directivity=dir_obj)
The same directivity pattern can be used for all microphones in an array:
.. code-block:: python
# place microphone array in the room
import numpy as np
mic_locs = np.c_[
[6.3, 4.87, 1.2], # mic 1
[6.3, 4.93, 1.2], # mic 2
]
room.add_microphone_array(mic_locs, directivity=dir_obj)
Or a different directivity can be used for each microphone by passing a list of
:py:obj:`pyroomacoustics.directivities.Directivity` objects:
.. code-block:: python
# place the microphone array in the room
room.add_microphone_array(mic_locs, directivity=[dir_1, dir_2])
.. warning::
As of Sep 6, 2021, setting directivity patterns for sources and microphone is only supported for
the image source method (ISM). Moreover, source direcitivities are only supported for
shoebox-shaped rooms.
Create the Room Impulse Response
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At this point, the RIRs are simply created by invoking the ISM via
:py:func:`~pyroomacoustics.room.Room.image_source_model`. This function will
generate all the images sources up to the order required and use them to
generate the RIRs, which will be stored in the ``rir`` attribute of ``room``.
The attribute ``rir`` is a list of lists so that the outer list is on microphones
and the inner list over sources.
.. code-block:: python
room.compute_rir()
# plot the RIR between mic 1 and source 0
import matplotlib.pyplot as plt
plt.plot(room.rir[1][0])
plt.show()
.. warning::
The simulator uses a fractional delay filter that introduce a global delay
in the RIR. The delay can be obtained as follows.
.. code-block:: python
global_delay = pra.constants.get("frac_delay_length") // 2
Simulate sound propagation
~~~~~~~~~~~~~~~~~~~~~~~~~~
By calling :py:func:`~pyroomacoustics.room.Room.simulate`, a convolution of the
signal of each source (if not ``None``) will be performed with the
corresponding room impulse response. The output from the convolutions will be summed up
at the microphones. The result is stored in the ``signals`` attribute of ``room.mic_array``
with each row corresponding to one microphone.
.. code-block:: python
room.simulate()
# plot signal at microphone 1
plt.plot(room.mic_array.signals[1,:])
Full Example
~~~~~~~~~~~~
This example is partly exctracted from `./examples/room_from_rt60.py`.
.. code-block:: python
import numpy as np
import matplotlib.pyplot as plt
import pyroomacoustics as pra
from scipy.io import wavfile
# The desired reverberation time and dimensions of the room
rt60_tgt = 0.3 # seconds
room_dim = [10, 7.5, 3.5] # meters
# import a mono wavfile as the source signal
# the sampling frequency should match that of the room
fs, audio = wavfile.read("examples/samples/guitar_16k.wav")
# We invert Sabine's formula to obtain the parameters for the ISM simulator
e_absorption, max_order = pra.inverse_sabine(rt60_tgt, room_dim)
# Create the room
room = pra.ShoeBox(
room_dim, fs=fs, materials=pra.Material(e_absorption), max_order=max_order
)
# place the source in the room
room.add_source([2.5, 3.73, 1.76], signal=audio, delay=0.5)
# define the locations of the microphones
mic_locs = np.c_[
[6.3, 4.87, 1.2], [6.3, 4.93, 1.2], # mic 1 # mic 2
]
# finally place the array in the room
room.add_microphone_array(mic_locs)
# Run the simulation (this will also build the RIR automatically)
room.simulate()
room.mic_array.to_wav(
f"examples/samples/guitar_16k_reverb_{args.method}.wav",
norm=True,
bitdepth=np.int16,
)
# measure the reverberation time
rt60 = room.measure_rt60()
print("The desired RT60 was {}".format(rt60_tgt))
print("The measured RT60 is {}".format(rt60[1, 0]))
# Create a plot
plt.figure()
# plot one of the RIR. both can also be plotted using room.plot_rir()
rir_1_0 = room.rir[1][0]
plt.subplot(2, 1, 1)
plt.plot(np.arange(len(rir_1_0)) / room.fs, rir_1_0)
plt.title("The RIR from source 0 to mic 1")
plt.xlabel("Time [s]")
# plot signal at microphone 1
plt.subplot(2, 1, 2)
plt.plot(room.mic_array.signals[1, :])
plt.title("Microphone 1 signal")
plt.xlabel("Time [s]")
plt.tight_layout()
plt.show()
Hybrid ISM/Ray Tracing Simulator
--------------------------------
.. warning::
The hybrid simulator has not been thoroughly tested yet and should be used with
care. The exact implementation and default settings may also change in the future.
Currently, the default behavior of :py:obj:`~pyroomacoustics.room.Room`
and :py:obj:`~pyroomacoustics.room.ShoeBox` has been kept as in previous
versions of the package. Bugs and user experience can be reported on
`github <https://github.com/LCAV/pyroomacoustics>`_.
The hybrid ISM/RT simulator uses ISM to simulate the early reflections in the RIR
and RT for the diffuse tail. Our implementation is based on [2]_ and [3]_.
The simulator has the following features.
- Scattering: Wall scattering can be defined by assigning a scattering
coefficient to the walls together with the energy absorption.
- Multi-band: The simulation can be carried out with different parameters for
different `octave bands <https://en.wikipedia.org/wiki/Octave_band>`_. The
octave bands go from 125 Hz to half the sampling frequency.
- Air absorption: The frequency dependent absorption of the air can be turned
by providing the keyword argument ``air_absorption=True`` to the room
constructor.
Here is a simple example using the hybrid simulator.
We suggest to use ``max_order=3`` with the hybrid simulator.
.. code-block:: python
# Create the room
room = pra.ShoeBox(
room_dim,
fs=16000,
materials=pra.Material(e_absorption),
max_order=3,
ray_tracing=True,
air_absorption=True,
)
# Activate the ray tracing
room.set_ray_tracing()
A few example programs are provided in ``./examples``.
- ``./examples/ray_tracing.py`` demonstrates use of ray tracing for rooms of different sizes
and with different amounts of reverberation
- ``./examples/room_L_shape_3d_rt.py`` shows how to simulate a polyhedral room
- ``./examples/room_from_stl.py`` demonstrates how to import a model from an STL file
Wall Materials
--------------
The wall materials are handled by the
:py:obj:`~pyroomacoustics.parameters.Material` objects. A material is defined
by at least one *absorption* coefficient that represents the ratio of sound
energy absorbed by a wall upon reflection.
A material may have multiple absorption coefficients corresponding to different
abosrptions at different octave bands.
When only one coefficient is provided, the absorption is assumed to be uniform at
all frequencies.
In addition, materials may have one or more scattering coefficients
corresponding to the ratio of energy scattered upon reflection.
The materials can be defined by providing the coefficients directly, or they can
be defined by chosing a material from the :doc:`materials database<pyroomacoustics.materials.database>` [2]_.
.. code-block:: python
import pyroomacoustics as pra
m = pra.Material(energy_absorption="hard_surface")
room = pra.ShoeBox([9, 7.5, 3.5], fs=16000, materials=m, max_order=17)
We can use different materials for different walls. In this case, the materials should be
provided in a dictionary. For a shoebox room, this can be done as follows.
.. code-block:: python
import pyroomacoustics as pra
m = pra.make_materials(
ceiling="hard_surface",
floor="6mm_carpet",
east="brickwork",
west="brickwork",
north="brickwork",
south="brickwork",
)
room = pra.ShoeBox(
[9, 7.5, 3.5], fs=16000, materials=m, max_order=17, air_absorption=True, ray_tracing=True
)
.. note::
For shoebox rooms, the walls are labelled as follows:
- ``west``/``east`` for the walls in the y-z plane with a small/large x coordinates, respectively
- ``south``/``north`` for the walls in the x-z plane with a small/large y coordinates, respectively
- ``floor``/``ceiling`` for the walls int x-y plane with small/large z coordinates, respectively
Controlling the signal-to-noise ratio
-------------------------------------
It is in general necessary to scale the signals from different sources to
obtain a specific signal-to-noise or signal-to-interference ratio (SNR and SIR,
respectively). This can be done by passing some options to the :py:func:`simulate()`
function. Because the relative amplitude of signals will change at different microphones
due to propagation, it is necessary to choose a reference microphone. By default, this
will be the first microphone in the array (index 0). The simplest choice is to choose
the variance of the noise \\(\\sigma_n^2\\) to achieve a desired SNR with respect
to the cumulative signal from all sources. Assuming that the signals from all sources
are scaled to have the same amplitude (e.g., unit amplitude) at the reference microphone,
the SNR is defined as
.. math::
\mathsf{SNR} = 10 \log_{10} \frac{K}{\sigma_n^2}
where \\(K\\) is the number of sources. For example, an SNR of 10 decibels (dB)
can be obtained using the following code
.. code-block:: python
room.simulate(reference_mic=0, snr=10)
Sometimes, more challenging normalizations are necessary. In that case,
a custom callback function can be provided to simulate. For example,
we can imagine a scenario where we have ``n_src`` out of which ``n_tgt``
are the targets, the rest being interferers. We will assume all
targets have unit variance, and all interferers have equal
variance \\(\\sigma_i^2\\) (at the reference microphone). In
addition, there is uncorrelated noise \\(\\sigma_n^2\\) at
every microphones. We will define SNR and SIR with respect
to a single target source:
.. math::
\mathsf{SNR} & = 10 \log_{10} \frac{1}{\sigma_n^2}
\mathsf{SIR} & = 10 \log_{10} \frac{1}{(\mathsf{n_{src}} - \mathsf{n_{tgt}}) \sigma_i^2}
The callback function ``callback_mix`` takes as argument an nd-array
``premix_signals`` of shape ``(n_src, n_mics, n_samples)`` that contains the
microphone signals prior to mixing. The signal propagated from the ``k``-th
source to the ``m``-th microphone is contained in ``premix_signals[k,m,:]``. It
is possible to provide optional arguments to the callback via
``callback_mix_kwargs`` optional argument. Here is the code
implementing the example described.
.. code-block:: python
# the extra arguments are given in a dictionary
callback_mix_kwargs = {
'snr' : 30, # SNR target is 30 decibels
'sir' : 10, # SIR target is 10 decibels
'n_src' : 6,
'n_tgt' : 2,
'ref_mic' : 0,
}
def callback_mix(premix, snr=0, sir=0, ref_mic=0, n_src=None, n_tgt=None):
# first normalize all separate recording to have unit power at microphone one
p_mic_ref = np.std(premix[:,ref_mic,:], axis=1)
premix /= p_mic_ref[:,None,None]
# now compute the power of interference signal needed to achieve desired SIR
sigma_i = np.sqrt(10 ** (- sir / 10) / (n_src - n_tgt))
premix[n_tgt:n_src,:,:] *= sigma_i
# compute noise variance
sigma_n = np.sqrt(10 ** (- snr / 10))
# Mix down the recorded signals
mix = np.sum(premix[:n_src,:], axis=0) + sigma_n * np.random.randn(*premix.shape[1:])
return mix
# Run the simulation
room.simulate(
callback_mix=callback_mix,
callback_mix_kwargs=callback_mix_kwargs,
)
mics_signals = room.mic_array.signals
In addition, it is desirable in some cases to obtain the microphone signals
with individual sources, prior to mixing. For example, this is useful to
evaluate the output from blind source separation algorithms. In this case, the
``return_premix`` argument should be set to ``True``
.. code-block:: python
premix = room.simulate(return_premix=True)
Reverberation Time
------------------
The reverberation time (RT60) is defined as the time needed for the enery of
the RIR to decrease by 60 dB. It is a useful measure of the amount of
reverberation. We provide a method in the
:py:func:`~pyroomacoustics.experimental.rt60.measure_rt60` to measure the RT60
of recorded or simulated RIR.
The method is also directly integrated in the :py:obj:`~pyroomacoustics.room.Room` object as the method :py:func:`~pyroomacoustics.room.Room.measure_rt60`.
.. code-block:: python
# assuming the simulation has already been carried out
rt60 = room.measure_rt60()
for m in room.n_mics:
for s in room.n_sources:
print(
"RT60 between the {}th mic and {}th source: {:.3f} s".format(m, s, rt60[m, s])
)
References
----------
.. [1] <NAME> and <NAME>, *Image method for efficiently simulating small-room acoustics,* J. Acoust. Soc. Am., vol. 65, no. 4, p. 943, 1979.
.. [2] <NAME>, Auralization, 1st ed. Berlin: Springer-Verlag, 2008, pp. 1-340.
.. [3] <NAME>, Physically based real-time auralization of interactive virtual environments. PhD Thesis, RWTH Aachen University, 2011.
"""
from __future__ import division, print_function
import math
import warnings
import numpy as np
import scipy.spatial as spatial
from . import beamforming as bf
from . import libroom
from .acoustics import OctaveBandsFactory, rt60_eyring, rt60_sabine
from .beamforming import MicrophoneArray
from .directivities import CardioidFamily, source_angle_shoebox
from .experimental import measure_rt60
from .libroom import Wall, Wall2D
from .parameters import Material, Physics, constants, eps, make_materials
from .soundsource import SoundSource
from .utilities import angle_function
def wall_factory(corners, absorption, scattering, name=""):
"""Call the correct method according to wall dimension"""
if corners.shape[0] == 3:
return Wall(corners, absorption, scattering, name,)
elif corners.shape[0] == 2:
return Wall2D(corners, absorption, scattering, name,)
else:
raise ValueError("Rooms can only be 2D or 3D")
def sequence_generation(volume, duration, c, fs, max_rate=10000):
# repeated constant
fpcv = 4 * np.pi * c ** 3 / volume
# initial time
t0 = ((2 * np.log(2)) / fpcv) ** (1.0 / 3.0)
times = [t0]
while times[-1] < t0 + duration:
# uniform random variable
z = np.random.rand()
# rate of the point process at this time
mu = np.minimum(fpcv * (t0 + times[-1]) ** 2, max_rate)
# time interval to next point
dt = np.log(1 / z) / mu
times.append(times[-1] + dt)
# convert from continuous to discrete time
indices = (np.array(times) * fs).astype(np.int)
seq = np.zeros(indices[-1] + 1)
seq[indices] = np.random.choice([1, -1], size=len(indices))
return seq
def find_non_convex_walls(walls):
"""
Finds the walls that are not in the convex hull
Parameters
----------
walls: list of Wall objects
The walls that compose the room
Returns
-------
list of int
The indices of the walls no in the convex hull
"""
all_corners = []
for wall in walls[1:]:
all_corners.append(wall.corners.T)
X = np.concatenate(all_corners, axis=0)
convex_hull = spatial.ConvexHull(X, incremental=True)
# Now we need to check which walls are on the surface
# of the hull
in_convex_hull = [False] * len(walls)
for i, wall in enumerate(walls):
# We check if the center of the wall is co-linear or co-planar
# with a face of the convex hull
point = np.mean(wall.corners, axis=1)
for simplex in convex_hull.simplices:
if point.shape[0] == 2:
# check if co-linear
p0 = convex_hull.points[simplex[0]]
p1 = convex_hull.points[simplex[1]]
if libroom.ccw3p(p0, p1, point) == 0:
# co-linear point add to hull
in_convex_hull[i] = True
elif point.shape[0] == 3:
# Check if co-planar
p0 = convex_hull.points[simplex[0]]
p1 = convex_hull.points[simplex[1]]
p2 = convex_hull.points[simplex[2]]
normal = np.cross(p1 - p0, p2 - p0)
if np.abs(np.inner(normal, point - p0)) < eps:
# co-planar point found!
in_convex_hull[i] = True
return [i for i in range(len(walls)) if not in_convex_hull[i]]
class Room(object):
"""
A Room object has as attributes a collection of
:py:obj:`pyroomacoustics.wall.Wall` objects, a
:py:obj:`pyroomacoustics.beamforming.MicrophoneArray` array, and a list of
:py:obj:`pyroomacoustics.soundsource.SoundSource`. The room can be two
dimensional (2D), in which case the walls are simply line segments. A factory method
:py:func:`pyroomacoustics.room.Room.from_corners`
can be used to create the room from a polygon. In three dimensions (3D), the
walls are two dimensional polygons, namely a collection of points lying on a
common plane. Creating rooms in 3D is more tedious and for convenience a method
:py:func:`pyroomacoustics.room.Room.extrude` is provided to lift a 2D room
into 3D space by adding vertical walls and parallel floor and ceiling.
The Room is sub-classed by :py:obj:pyroomacoustics.room.ShoeBox` which
creates a rectangular (2D) or parallelepipedic (3D) room. Such rooms
benefit from an efficient algorithm for the image source method.
:attribute walls: (Wall array) list of walls forming the room
:attribute fs: (int) sampling frequency
:attribute max_order: (int) the maximum computed order for images
:attribute sources: (SoundSource array) list of sound sources
:attribute mics: (MicrophoneArray) array of microphones
:attribute corners: (numpy.ndarray 2xN or 3xN, N=number of walls) array containing a point belonging to each wall, used for calculations
:attribute absorption: (numpy.ndarray size N, N=number of walls) array containing the absorption factor for each wall, used for calculations
:attribute dim: (int) dimension of the room (2 or 3 meaning 2D or 3D)
:attribute wallsId: (int dictionary) stores the mapping "wall name -> wall id (in the array walls)"
Parameters
----------
walls: list of Wall or Wall2D objects
The walls forming the room.
fs: int, optional
The sampling frequency in Hz. Default is 8000.
t0: float, optional
The global starting time of the simulation in seconds. Default is 0.
max_order: int, optional
The maximum reflection order in the image source model. Default is 1,
namely direct sound and first order reflections.
sigma2_awgn: float, optional
The variance of the additive white Gaussian noise added during
simulation. By default, none is added.
sources: list of SoundSource objects, optional
Sources to place in the room. Sources can be added after room creating
with the `add_source` method by providing coordinates.
mics: MicrophoneArray object, optional
The microphone array to place in the room. A single microphone or
microphone array can be added after room creation with the
`add_microphone_array` method.
temperature: float, optional
The air temperature in the room in degree Celsius. By default, set so
that speed of sound is 343 m/s.
humidity: float, optional
The relative humidity of the air in the room (between 0 and 100). By
default set to 0.
air_absorption: bool, optional
If set to True, absorption of sound energy by the air will be
simulated.
ray_tracing: bool, optional
If set to True, the ray tracing simulator will be used along with
image source model.
"""
def __init__(
self,
walls,
fs=8000,
t0=0.0,
max_order=1,
sigma2_awgn=None,
sources=None,
mics=None,
temperature=None,
humidity=None,
air_absorption=False,
ray_tracing=False,
):
self.walls = walls
# Get the room dimension from that of the walls
self.dim = walls[0].dim
# Create a mapping with friendly names for walls
self._wall_mapping()
# initialize everything else
self._var_init(
fs,
t0,
max_order,
sigma2_awgn,
temperature,
humidity,
air_absorption,
ray_tracing,
)
# initialize the C++ room engine
self._init_room_engine()
# add the sources
self.sources = []
if sources is not None and isinstance(sources, list):
for src in sources:
self.add_soundsource(src)
# add the microphone array
if mics is not None:
self.add_microphone_array(mics)
else:
self.mic_array = None
def _var_init(
self,
fs,
t0,
max_order,
sigma2_awgn,
temperature,
humidity,
air_absorption,
ray_tracing,
):
self.fs = fs
if t0 != 0.0:
raise NotImplementedError(
"Global simulation delay not " "implemented (aka t0)"
)
self.t0 = t0
self.max_order = max_order
self.sigma2_awgn = sigma2_awgn
self.octave_bands = OctaveBandsFactory(fs=self.fs)
# Keep track of the state of the simulator
self.simulator_state = {
"ism_needed": (self.max_order >= 0),
"rt_needed": ray_tracing,
"air_abs_needed": air_absorption,
"ism_done": False,
"rt_done": False,
"rir_done": False,
}
# make it clear the room (C++) engine is not ready yet
self.room_engine = None
if temperature is None and humidity is None:
# default to package wide setting when nothing is provided
self.physics = Physics().from_speed(constants.get("c"))
else:
# use formulas when temperature and/or humidity are provided
self.physics = Physics(temperature=temperature, humidity=humidity)
self.set_sound_speed(self.physics.get_sound_speed())
self.air_absorption = None
if air_absorption:
self.set_air_absorption()
# default values for ray tracing parameters
self._set_ray_tracing_options(use_ray_tracing=ray_tracing)
# in the beginning, nothing has been
self.visibility = None
# initialize the attribute for the impulse responses
self.rir = None
def _init_room_engine(self, *args):
args = list(args)
if len(args) == 0:
# This is a polygonal room
# find the non convex walls
obstructing_walls = find_non_convex_walls(self.walls)
args += [self.walls, obstructing_walls]
# for shoebox rooms, the required arguments are passed to
# the function
# initialize the C++ room engine
args += [
[],
self.c, # speed of sound
self.max_order,
self.rt_args["energy_thres"],
self.rt_args["time_thres"],
self.rt_args["receiver_radius"],
self.rt_args["hist_bin_size"],
self.simulator_state["ism_needed"] and self.simulator_state["rt_needed"],
]
# Create the real room object
if self.dim == 2:
self.room_engine = libroom.Room2D(*args)
else:
self.room_engine = libroom.Room(*args)
def _update_room_engine_params(self):
# Now, if it exists, set the parameters of room engine
if self.room_engine is not None:
self.room_engine.set_params(
self.c, # speed of sound
self.max_order,
self.rt_args["energy_thres"],
self.rt_args["time_thres"],
self.rt_args["receiver_radius"],
self.rt_args["hist_bin_size"],
(
self.simulator_state["ism_needed"]
and self.simulator_state["rt_needed"]
),
)
@property
def is_multi_band(self):
multi_band = False
for w in self.walls:
if len(w.absorption) > 1:
multi_band = True
return multi_band
def set_ray_tracing(
self,
n_rays=None,
receiver_radius=0.5,
energy_thres=1e-7,
time_thres=10.0,
hist_bin_size=0.004,
):
"""
Activates the ray tracer.
Parameters
----------
n_rays: int, optional
The number of rays to shoot in the simulation
receiver_radius: float, optional
The radius of the sphere around the microphone in which to
integrate the energy (default: 0.5 m)
energy_thres: float, optional
The energy thresold at which rays are stopped (default: 1e-7)
time_thres: float, optional
The maximum time of flight of rays (default: 10 s)
hist_bin_size: float
The time granularity of bins in the energy histogram (default: 4 ms)
"""
self._set_ray_tracing_options(
use_ray_tracing=True,
n_rays=n_rays,
receiver_radius=receiver_radius,
energy_thres=energy_thres,
time_thres=time_thres,
hist_bin_size=hist_bin_size,
)
def _set_ray_tracing_options(
self,
use_ray_tracing,
n_rays=None,
receiver_radius=0.5,
energy_thres=1e-7,
time_thres=10.0,
hist_bin_size=0.004,
is_init=False,
):
"""
Base method to set all ray tracing related options
"""
if use_ray_tracing:
if hasattr(self, "mic_array") and self.mic_array is not None:
if self.mic_array.directivity is not None:
raise NotImplementedError(
"Directivity not supported with ray tracing."
)
if hasattr(self, "sources"):
for source in self.sources:
if source.directivity is not None:
raise NotImplementedError(
"Directivity not supported with ray tracing."
)
self.simulator_state["rt_needed"] = use_ray_tracing
self.rt_args = {}
self.rt_args["energy_thres"] = energy_thres
self.rt_args["time_thres"] = time_thres
self.rt_args["receiver_radius"] = receiver_radius
self.rt_args["hist_bin_size"] = hist_bin_size
# set the histogram bin size so that it is an integer number of samples
self.rt_args["hist_bin_size_samples"] = math.floor(
self.fs * self.rt_args["hist_bin_size"]
)
self.rt_args["hist_bin_size"] = self.rt_args["hist_bin_size_samples"] / self.fs
if n_rays is None:
n_rays_auto_flag = True
# We follow Vorlaender 2008, Eq. (11.12) to set the default number of rays
# It depends on the mean hit rate we want to target
target_mean_hit_count = 20
# This is the multiplier for a single hit in average
k1 = self.get_volume() / (
np.pi
* (self.rt_args["receiver_radius"] ** 2)
* self.c
* self.rt_args["hist_bin_size"]
)
n_rays = int(target_mean_hit_count * k1)
if self.simulator_state["rt_needed"] and n_rays > 100000:
import warnings
warnings.warn(
"The number of rays used for ray tracing is larger than"
"100000 which may result in slow simulation. The number"
"of rays was automatically chosen to provide accurate"
"room impulse response based on the room volume and the"
"receiver radius around the microphones. The number of"
"rays may be reduced by increasing the size of the"
"receiver. This tends to happen especially for large"
"rooms with small receivers. The receiver is a sphere"
"around the microphone and its radius (in meters) may be"
"specified by providing the `receiver_radius` keyword"
"argument to the `set_ray_tracing` method."
)
self.rt_args["n_rays"] = n_rays
self._update_room_engine_params()
def unset_ray_tracing(self):
"""Deactivates the ray tracer"""
self.simulator_state["rt_needed"] = False
self._update_room_engine_params()
def set_air_absorption(self, coefficients=None):
"""
Activates or deactivates air absorption in the simulation.
Parameters
----------
coefficients: list of float
List of air absorption coefficients, one per octave band
"""
self.simulator_state["air_abs_needed"] = True
if coefficients is None:
self.air_absorption = self.octave_bands(**self.physics.get_air_absorption())
else:
# ignore temperature and humidity if coefficients are provided
self.air_absorption = self.physics().get_air_absorption()
def unset_air_absorption(self):
"""Deactivates air absorption in the simulation"""
self.simulator_state["air_abs_needed"] = False
def set_sound_speed(self, c):
"""Sets the speed of sound unconditionnaly"""
self.c = c
self._update_room_engine_params()
def _wall_mapping(self):
# mapping between wall names and indices
self.wallsId = {}
for i in range(len(self.walls)):
if self.walls[i].name is not None:
self.wallsId[self.walls[i].name] = i
@classmethod
def from_corners(
cls,
corners,
absorption=None,
fs=8000,
t0=0.0,
max_order=1,
sigma2_awgn=None,
sources=None,
mics=None,
materials=None,
**kwargs
):
"""
Creates a 2D room by giving an array of corners.
Parameters
----------
corners: (np.array dim 2xN, N>2)
list of corners, must be antiClockwise oriented
absorption: float array or float
list of absorption factor for each wall or single value
for all walls
Returns
-------
Instance of a 2D room
"""
# make sure the corners are wrapped in an ndarray
corners = np.array(corners)
n_walls = corners.shape[1]
corners = np.array(corners)
if corners.shape[0] != 2 or n_walls < 3:
raise ValueError("Arg corners must be more than two 2D points.")
# We want to make sure the corners are ordered counter-clockwise
if libroom.area_2d_polygon(corners) <= 0:
corners = corners[:, ::-1]
############################
# BEGIN COMPATIBILITY CODE #
############################
if absorption is None:
absorption = 0.0
absorption_compatibility_request = False
else:
absorption_compatibility_request = True
absorption = np.array(absorption, dtype="float64")
if absorption.ndim == 0:
absorption = absorption * np.ones(n_walls)
elif absorption.ndim >= 1 and n_walls != len(absorption):
raise ValueError(
"Arg absorption must be the same size as corners or must be a single value."
)
############################
# BEGIN COMPATIBILITY CODE #
############################
if materials is not None:
if absorption_compatibility_request:
import warnings
warnings.warn(
"Because materials were specified, deprecated absorption parameter is ignored.",
DeprecationWarning,
)
if not isinstance(materials, list):
materials = [materials] * n_walls
if len(materials) != n_walls:
raise ValueError("One material per wall is necessary.")
for i in range(n_walls):
assert isinstance(
materials[i], Material
), "Material not specified using correct class"
elif absorption_compatibility_request:
import warnings
warnings.warn(
"Using absorption parameter is deprecated. In the future, use materials instead."
)
# Fix the absorption
# 1 - a1 == sqrt(1 - a2) <-- a1 is former incorrect absorption, a2 is the correct definition based on energy
# <=> a2 == 1 - (1 - a1) ** 2
correct_absorption = 1.0 - (1.0 - absorption) ** 2
materials = make_materials(*correct_absorption)
else:
# In this case, no material is provided, use totally reflective walls, no scattering
materials = [Material(0.0, 0.0)] * n_walls
# Resample material properties at octave bands
octave_bands = OctaveBandsFactory(fs=fs)
if not Material.all_flat(materials):
for mat in materials:
mat.resample(octave_bands)
# Create the walls
walls = []
for i in range(n_walls):
walls.append(
wall_factory(
np.array([corners[:, i], corners[:, (i + 1) % n_walls]]).T,
materials[i].absorption_coeffs,
materials[i].scattering_coeffs,
"wall_" + str(i),
)
)
return cls(
walls,
fs=fs,
t0=t0,
max_order=max_order,
sigma2_awgn=sigma2_awgn,
sources=sources,
mics=mics,
**kwargs
)
def extrude(
self, height, v_vec=None, absorption=None, materials=None,
):
"""
Creates a 3D room by extruding a 2D polygon.
The polygon is typically the floor of the room and will have z-coordinate zero. The ceiling
Parameters
----------
height : float
The extrusion height
v_vec : array-like 1D length 3, optional
A unit vector. An orientation for the extrusion direction. The
ceiling will be placed as a translation of the floor with respect
to this vector (The default is [0,0,1]).
absorption : float or array-like, optional
Absorption coefficients for all the walls. If a scalar, then all the walls
will have the same absorption. If an array is given, it should have as many elements
as there will be walls, that is the number of vertices of the polygon plus two. The two
last elements are for the floor and the ceiling, respectively.
It is recommended to use materials instead of absorption parameter. (Default: 1)
materials : dict
Absorption coefficients for floor and ceiling. This parameter overrides absorption.
(Default: {"floor": 1, "ceiling": 1})
"""
if self.dim != 2:
raise ValueError("Can only extrude a 2D room.")
# default orientation vector is pointing up
if v_vec is None:
v_vec = np.array([0.0, 0.0, 1.0])
# check that the walls are ordered counterclock wise
# that should be the case if created from from_corners function
nw = len(self.walls)
floor_corners = np.zeros((2, nw))
floor_corners[:, 0] = self.walls[0].corners[:, 0]
ordered = True
for iw, wall in enumerate(self.walls[1:]):
if not np.allclose(self.walls[iw].corners[:, 1], wall.corners[:, 0]):
ordered = False
floor_corners[:, iw + 1] = wall.corners[:, 0]
if not np.allclose(self.walls[-1].corners[:, 1], self.walls[0].corners[:, 0]):
ordered = False
if not ordered:
raise ValueError(
"The wall list should be ordered counter-clockwise, which is the case \
if the room is created with Room.from_corners"
)
# make sure the floor_corners are ordered anti-clockwise (for now)
if libroom.area_2d_polygon(floor_corners) <= 0:
floor_corners = np.fliplr(floor_corners)
walls = []
for i in range(nw):
corners = np.array(
[
np.r_[floor_corners[:, i], 0],
np.r_[floor_corners[:, (i + 1) % nw], 0],
np.r_[floor_corners[:, (i + 1) % nw], 0] + height * v_vec,
np.r_[floor_corners[:, i], 0] + height * v_vec,
]
).T
walls.append(
wall_factory(
corners,
self.walls[i].absorption,
self.walls[i].scatter,
name=str(i),
)
)
############################
# BEGIN COMPATIBILITY CODE #
############################
if absorption is not None:
absorption = 0.0
absorption_compatibility_request = True
else:
absorption_compatibility_request = False
##########################
# END COMPATIBILITY CODE #
##########################
if materials is not None:
if absorption_compatibility_request:
import warnings
warnings.warn(
"Because materials were specified, "
"deprecated absorption parameter is ignored.",
DeprecationWarning,
)
if not isinstance(materials, dict):
materials = {"floor": materials, "ceiling": materials}
for mat in materials.values():
assert isinstance(
mat, Material
), "Material not specified using correct class"
elif absorption_compatibility_request:
import warnings
warnings.warn(
"absorption parameter is deprecated for Room.extrude",
DeprecationWarning,
)
absorption = np.array(absorption)
if absorption.ndim == 0:
absorption = absorption * np.ones(2)
elif absorption.ndim == 1 and absorption.shape[0] != 2:
raise ValueError(
"The size of the absorption array must be 2 for extrude, "
"for the floor and ceiling"
)
materials = make_materials(
floor=(absorption[0], 0.0), ceiling=(absorption[0], 0.0),
)
else:
# In this case, no material is provided, use totally reflective walls, no scattering
new_mat = Material(0.0, 0.0)
materials = {"floor": new_mat, "ceiling": new_mat}
new_corners = {}
new_corners["floor"] = np.pad(floor_corners, ((0, 1), (0, 0)), mode="constant")
new_corners["ceiling"] = (new_corners["floor"].T + height * v_vec).T
# we need the floor corners to ordered clockwise (for the normal to point outward)
new_corners["floor"] = np.fliplr(new_corners["floor"])
for key in ["floor", "ceiling"]:
walls.append(
wall_factory(
new_corners[key],
materials[key].absorption_coeffs,
materials[key].scattering_coeffs,
name=key,
)
)
self.walls = walls
self.dim = 3
# Update the real room object
self._init_room_engine()
def plot(
self,
img_order=None,
freq=None,
figsize=None,
no_axis=False,
mic_marker_size=10,
plot_directivity=True,
ax=None,
**kwargs
):
"""Plots the room with its walls, microphones, sources and images"""
try:
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Circle, Polygon, Wedge
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
fig = None
if self.dim == 2:
fig = plt.figure(figsize=figsize)
if no_axis is True:
if ax is None:
ax = fig.add_axes([0, 0, 1, 1], aspect="equal", **kwargs)
ax.axis("off")
rect = fig.patch
rect.set_facecolor("gray")
rect.set_alpha(0.15)
else:
if ax is None:
ax = fig.add_subplot(111, aspect="equal", **kwargs)
# draw room
corners = np.array([wall.corners[:, 0] for wall in self.walls]).T
polygons = [Polygon(corners.T, True)]
p = PatchCollection(
polygons,
cmap=matplotlib.cm.jet,
facecolor=np.array([1, 1, 1]),
edgecolor=np.array([0, 0, 0]),
)
ax.add_collection(p)
if self.mic_array is not None:
for i in range(self.mic_array.nmic):
ax.scatter(
self.mic_array.R[0][i],
self.mic_array.R[1][i],
marker="x",
linewidth=0.5,
s=mic_marker_size,
c="k",
)
if plot_directivity and self.mic_array.directivity is not None:
azimuth_plot = np.linspace(
start=0, stop=360, num=361, endpoint=True
)
ax = self.mic_array.directivity[i].plot_response(
azimuth=azimuth_plot,
degrees=True,
ax=ax,
offset=self.mic_array.R[:, i],
)
# draw the beam pattern of the beamformer if requested (and available)
if (
freq is not None
and isinstance(self.mic_array, bf.Beamformer)
and (
self.mic_array.weights is not None
or self.mic_array.filters is not None
)
):
freq = np.array(freq)
if freq.ndim == 0:
freq = np.array([freq])
# define a new set of colors for the beam patterns
newmap = plt.get_cmap("autumn")
desat = 0.7
try:
# this is for matplotlib >= 2.0.0
ax.set_prop_cycle(
color=[
newmap(k) for k in desat * np.linspace(0, 1, len(freq))
]
)
except:
# keep this for backward compatibility
ax.set_color_cycle(
[newmap(k) for k in desat * np.linspace(0, 1, len(freq))]
)
phis = np.arange(360) * 2 * np.pi / 360.0
newfreq = np.zeros(freq.shape)
H = np.zeros((len(freq), len(phis)), dtype=complex)
for i, f in enumerate(freq):
newfreq[i], H[i] = self.mic_array.response(phis, f)
# normalize max amplitude to one
H = np.abs(H) ** 2 / np.abs(H).max() ** 2
# a normalization factor according to room size
norm = np.linalg.norm(
(corners - self.mic_array.center), axis=0
).max()
# plot all the beam patterns
for f, h in zip(newfreq, H):
x = np.cos(phis) * h * norm + self.mic_array.center[0, 0]
y = np.sin(phis) * h * norm + self.mic_array.center[1, 0]
ax.plot(x, y, "-", linewidth=0.5)
# define some markers for different sources and colormap for damping
markers = ["o", "s", "v", "."]
cmap = plt.get_cmap("YlGnBu")
# use this to check some image sources were drawn
has_drawn_img = False
# draw the scatter of images
for i, source in enumerate(self.sources):
# draw source
ax.scatter(
source.position[0],
source.position[1],
c=[cmap(1.0)],
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(1.0),
)
if plot_directivity and source.directivity is not None:
azimuth_plot = np.linspace(
start=0, stop=360, num=361, endpoint=True
)
ax = source.directivity.plot_response(
azimuth=azimuth_plot,
degrees=True,
ax=ax,
offset=source.position,
)
# draw images
if img_order is None:
img_order = 0
elif img_order == "max":
img_order = self.max_order
I = source.orders <= img_order
if len(I) > 0:
has_drawn_img = True
val = (np.log2(np.mean(source.damping, axis=0)[I]) + 10.0) / 10.0
# plot the images
ax.scatter(
source.images[0, I],
source.images[1, I],
c=cmap(val),
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(val),
)
# When no image source has been drawn, we need to use the bounding box
# to set correctly the limits of the plot
if not has_drawn_img or img_order == 0:
bbox = self.get_bbox()
ax.set_xlim(bbox[0, :])
ax.set_ylim(bbox[1, :])
return fig, ax
if self.dim == 3:
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as a3
import scipy as sp
if ax is None:
fig = plt.figure(figsize=figsize)
ax = a3.Axes3D(fig)
# plot the walls
for w in self.walls:
tri = a3.art3d.Poly3DCollection([w.corners.T], alpha=0.5)
tri.set_color(colors.rgb2hex(sp.rand(3)))
tri.set_edgecolor("k")
ax.add_collection3d(tri)
# define some markers for different sources and colormap for damping
markers = ["o", "s", "v", "."]
cmap = plt.get_cmap("YlGnBu")
# use this to check some image sources were drawn
has_drawn_img = False
# draw the scatter of images
for i, source in enumerate(self.sources):
# draw source
ax.scatter(
source.position[0],
source.position[1],
source.position[2],
c=[cmap(1.0)],
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(1.0),
)
if plot_directivity and source.directivity is not None:
azimuth_plot = np.linspace(
start=0, stop=360, num=361, endpoint=True
)
colatitude_plot = np.linspace(
start=0, stop=180, num=180, endpoint=True
)
ax = source.directivity.plot_response(
azimuth=azimuth_plot,
colatitude=colatitude_plot,
degrees=True,
ax=ax,
offset=source.position,
)
# draw images
if img_order is None:
img_order = self.max_order
I = source.orders <= img_order
if len(I) > 0:
has_drawn_img = True
val = (np.log2(np.mean(source.damping, axis=0)[I]) + 10.0) / 10.0
# plot the images
ax.scatter(
source.images[0, I],
source.images[1, I],
source.images[2, I],
c=cmap(val),
s=20,
marker=markers[i % len(markers)],
edgecolor=cmap(val),
)
# When no image source has been drawn, we need to use the bounding box
# to set correctly the limits of the plot
if not has_drawn_img or img_order == 0:
bbox = self.get_bbox()
ax.set_xlim3d(bbox[0, :])
ax.set_ylim3d(bbox[1, :])
ax.set_zlim3d(bbox[2, :])
# draw the microphones
if self.mic_array is not None:
for i in range(self.mic_array.nmic):
ax.scatter(
self.mic_array.R[0][i],
self.mic_array.R[1][i],
self.mic_array.R[2][i],
marker="x",
linewidth=0.5,
s=mic_marker_size,
c="k",
)
if plot_directivity and self.mic_array.directivity is not None:
azimuth_plot = np.linspace(
start=0, stop=360, num=361, endpoint=True
)
colatitude_plot = np.linspace(
start=0, stop=180, num=180, endpoint=True
)
ax = self.mic_array.directivity[i].plot_response(
azimuth=azimuth_plot,
colatitude=colatitude_plot,
degrees=True,
ax=ax,
offset=self.mic_array.R[:, i],
)
return fig, ax
def plot_rir(self, select=None, FD=False):
"""
Plot room impulse responses. Compute if not done already.
Parameters
----------
select: list of tuples OR int
List of RIR pairs `(mic, src)` to plot, e.g. `[(0,0), (0,1)]`. Or
`int` to plot RIR from particular microphone to all sources. Note
that microphones and sources are zero-indexed. Default is to plot
all microphone-source pairs.
FD: bool
Whether to plot in the frequency domain, namely the transfer
function. Default is False.
"""
n_src = len(self.sources)
n_mic = self.mic_array.M
if select is None:
pairs = [(r, s) for r in range(n_mic) for s in range(n_src)]
elif isinstance(select, int):
pairs = [(select, s) for s in range(n_src)]
elif isinstance(select, list):
pairs = select
else:
raise ValueError('Invalid type for "select".')
if not self.simulator_state["rir_done"]:
self.compute_rir()
# for plotting
n_mic = len(list(set(pair[0] for pair in pairs)))
n_src = len(list(set(pair[1] for pair in pairs)))
r_plot = dict()
s_plot = dict()
for k, r in enumerate(list(set(pair[0] for pair in pairs))):
r_plot[r] = k
for k, s in enumerate(list(set(pair[1] for pair in pairs))):
s_plot[s] = k
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn("Matplotlib is required for plotting")
return
from . import utilities as u
plt.figure()
for k, _pair in enumerate(pairs):
r = _pair[0]
s = _pair[1]
h = self.rir[r][s]
if select is None: # matrix plot
plt.subplot(n_mic, n_src, r_plot[r] * n_src + s_plot[s] + 1)
else: # one column
plt.subplot(len(pairs), 1, k + 1)
if not FD:
plt.plot(np.arange(len(h)) / float(self.fs), h)
else:
u.real_spectrum(h)
plt.title("RIR: mic" + str(r) + " source" + str(s))
if r == n_mic - 1:
if not FD:
plt.xlabel("Time [s]")
else:
plt.xlabel("Normalized frequency")
plt.tight_layout()
def add(self, obj):
"""
Adds a sound source or microphone to a room
Parameters
----------
obj: :py:obj:`~pyroomacoustics.soundsource.SoundSource` or :py:obj:`~pyroomacoustics.beamforming.Microphone` object
The object to add
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if isinstance(obj, SoundSource):
if obj.dim != self.dim:
raise ValueError(
(
"The Room and SoundSource objects must be of the same "
"dimensionality. The Room is {}D but the SoundSource "
"is {}D"
).format(self.dim, obj.dim)
)
if not self.is_inside(np.array(obj.position)):
raise ValueError("The source must be added inside the room.")
self.sources.append(obj)
elif isinstance(obj, MicrophoneArray):
if obj.dim != self.dim:
raise ValueError(
(
"The Room and MicrophoneArray objects must be of the same "
"dimensionality. The Room is {}D but the MicrophoneArray "
"is {}D"
).format(self.dim, obj.dim)
)
if "mic_array" not in self.__dict__ or self.mic_array is None:
self.mic_array = obj
else:
self.mic_array.append(obj)
# microphone need to be added to the room_engine
for m in range(len(obj)):
self.room_engine.add_mic(obj.R[:, None, m])
else:
raise TypeError(
"The add method from Room only takes SoundSource or "
"MicrophoneArray objects as parameter"
)
return self
def add_microphone(self, loc, fs=None, directivity=None):
"""
Adds a single microphone in the room.
Parameters
----------
loc: array_like or ndarray
The location of the microphone. The length should be the same as the room dimension.
fs: float, optional
The sampling frequency of the microphone, if different from that of the room.
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if self.simulator_state["rt_needed"] and directivity is not None:
raise NotImplementedError("Directivity not supported with ray tracing.")
# make sure this is a
loc = np.array(loc)
# if array, make it a 2D array as expected
if loc.ndim == 1:
loc = loc[:, None]
if fs is None:
fs = self.fs
return self.add(MicrophoneArray(loc, fs, directivity))
def add_microphone_array(self, mic_array, directivity=None):
"""
Adds a microphone array (i.e. several microphones) in the room.
Parameters
----------
mic_array: array_like or ndarray or MicrophoneArray object
The array can be provided as an array of size ``(dim, n_mics)``,
where ``dim`` is the dimension of the room and ``n_mics`` is the
number of microphones in the array.
As an alternative, a
:py:obj:`~pyroomacoustics.beamforming.MicrophoneArray` can be
provided.
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if self.simulator_state["rt_needed"] and directivity is not None:
raise NotImplementedError("Directivity not supported with ray tracing.")
if not isinstance(mic_array, MicrophoneArray):
# if the type is not a microphone array, try to parse a numpy array
mic_array = MicrophoneArray(mic_array, self.fs, directivity)
else:
# if the type is microphone array
if directivity is not None:
mic_array.set_directivity(directivity)
if self.simulator_state["rt_needed"] and mic_array.directivity is not None:
raise NotImplementedError("Directivity not supported with ray tracing.")
return self.add(mic_array)
def add_source(self, position, signal=None, delay=0, directivity=None):
"""
Adds a sound source given by its position in the room. Optionally
a source signal and a delay can be provided.
Parameters
-----------
position: ndarray, shape: (2,) or (3,)
The location of the source in the room
signal: ndarray, shape: (n_samples,), optional
The signal played by the source
delay: float, optional
A time delay until the source signal starts
in the simulation
Returns
-------
:py:obj:`~pyroomacoustics.room.Room`
The room is returned for further tweaking.
"""
if self.simulator_state["rt_needed"] and directivity is not None:
raise NotImplementedError("Directivity not supported with ray tracing.")
if directivity is not None:
from pyroomacoustics import ShoeBox
if not isinstance(self, ShoeBox):
raise NotImplementedError(
"Source directivity only supported for ShoeBox room."
)
if isinstance(position, SoundSource):
if directivity is not None:
assert isinstance(directivity, CardioidFamily)
return self.add(SoundSource(position, directivity=directivity))
else:
return self.add(position)
else:
if directivity is not None:
assert isinstance(directivity, CardioidFamily)
return self.add(
SoundSource(
position, signal=signal, delay=delay, directivity=directivity
)
)
else:
return self.add(SoundSource(position, signal=signal, delay=delay))
def add_soundsource(self, sndsrc, directivity=None):
"""
Adds a :py:obj:`pyroomacoustics.soundsource.SoundSource` object to the room.
Parameters
----------
sndsrc: :py:obj:`~pyroomacoustics.soundsource.SoundSource` object
The SoundSource object to add to the room
"""
if directivity is not None:
sndsrc.set_directivity(directivity)
return self.add(sndsrc)
def image_source_model(self):
if not self.simulator_state["ism_needed"]:
return
self.visibility = []
for source in self.sources:
n_sources = self.room_engine.image_source_model(source.position)
if n_sources > 0:
# Copy to python managed memory
source.images = self.room_engine.sources.copy()
source.orders = self.room_engine.orders.copy()
source.orders_xyz = self.room_engine.orders_xyz.copy()
source.walls = self.room_engine.gen_walls.copy()
source.damping = self.room_engine.attenuations.copy()
source.generators = -np.ones(source.walls.shape)
self.visibility.append(self.room_engine.visible_mics.copy())
# We need to check that microphones are indeed in the room
for m in range(self.mic_array.R.shape[1]):
# if not, it's not visible from anywhere!
if not self.is_inside(self.mic_array.R[:, m]):
self.visibility[-1][m, :] = 0
# Update the state
self.simulator_state["ism_done"] = True
def ray_tracing(self):
if not self.simulator_state["rt_needed"]:
return
# this will be a list of lists with
# shape (n_mics, n_src, n_directions, n_bands, n_time_bins)
self.rt_histograms = [[] for r in range(self.mic_array.M)]
for s, src in enumerate(self.sources):
self.room_engine.ray_tracing(self.rt_args["n_rays"], src.position)
for r in range(self.mic_array.M):
self.rt_histograms[r].append([])
for h in self.room_engine.microphones[r].histograms:
# get a copy of the histogram
self.rt_histograms[r][s].append(h.get_hist())
# reset all the receivers' histograms
self.room_engine.reset_mics()
# update the state
self.simulator_state["rt_done"] = True
def compute_rir(self):
"""
Compute the room impulse response between every source and microphone.
"""
if self.simulator_state["ism_needed"] and not self.simulator_state["ism_done"]:
self.image_source_model()
if self.simulator_state["rt_needed"] and not self.simulator_state["rt_done"]:
self.ray_tracing()
self.rir = []
volume_room = self.get_volume()
for m, mic in enumerate(self.mic_array.R.T):
self.rir.append([])
for s, src in enumerate(self.sources):
"""
Compute the room impulse response between the source
and the microphone whose position is given as an
argument.
"""
# fractional delay length
fdl = constants.get("frac_delay_length")
fdl2 = fdl // 2
# default, just in case both ism and rt are disabled (should never happen)
N = fdl
if self.simulator_state["ism_needed"]:
# compute azimuth and colatitude angles for receiver
if self.mic_array.directivity is not None:
angle_function_array = angle_function(src.images, mic)
azimuth = angle_function_array[0]
colatitude = angle_function_array[1]
# compute azimuth and colatitude angles for source
if self.sources[s].directivity is not None:
azimuth_s, colatitude_s = source_angle_shoebox(
image_source_loc=src.images,
wall_flips=abs(src.orders_xyz),
mic_loc=mic,
)
# compute the distance from image sources
dist = np.sqrt(np.sum((src.images - mic[:, None]) ** 2, axis=0))
time = dist / self.c
t_max = time.max()
N = int(math.ceil(t_max * self.fs))
else:
t_max = 0.0
if self.simulator_state["rt_needed"]:
# get the maximum length from the histograms
nz_bins_loc = np.nonzero(self.rt_histograms[m][s][0].sum(axis=0))[0]
if len(nz_bins_loc) == 0:
n_bins = 0
else:
n_bins = nz_bins_loc[-1] + 1
t_max = np.maximum(t_max, n_bins * self.rt_args["hist_bin_size"])
# the number of samples needed
# round up to multiple of the histogram bin size
# add the lengths of the fractional delay filter
hbss = int(self.rt_args["hist_bin_size_samples"])
N = int(math.ceil(t_max * self.fs / hbss) * hbss)
# this is where we will compose the RIR
ir = np.zeros(N + fdl)
# This is the distance travelled wrt time
distance_rir = np.arange(N) / self.fs * self.c
# this is the random sequence for the tail generation
seq = sequence_generation(volume_room, N / self.fs, self.c, self.fs)
seq = seq[:N]
# Do band-wise RIR construction
is_multi_band = self.is_multi_band
bws = self.octave_bands.get_bw() if is_multi_band else [self.fs / 2]
rir_bands = []
for b, bw in enumerate(bws):
ir_loc = np.zeros_like(ir)
# IS method
if self.simulator_state["ism_needed"]:
alpha = src.damping[b, :] / dist
if self.mic_array.directivity is not None:
alpha *= self.mic_array.directivity[m].get_response(
azimuth=azimuth,
colatitude=colatitude,
frequency=bw,
degrees=False,
)
if self.sources[s].directivity is not None:
alpha *= self.sources[s].directivity.get_response(
azimuth=azimuth_s,
colatitude=colatitude_s,
frequency=bw,
degrees=False,
)
# Use the Cython extension for the fractional delays
from .build_rir import fast_rir_builder
vis = self.visibility[s][m, :].astype(np.int32)
# we add the delay due to the factional delay filter to
# the arrival times to avoid problems when propagation
# is shorter than the delay to to the filter
# hence: time + fdl2
time_adjust = time + fdl2 / self.fs
fast_rir_builder(ir_loc, time_adjust, alpha, vis, self.fs, fdl)
if is_multi_band:
ir_loc = self.octave_bands.analysis(ir_loc, band=b)
ir += ir_loc
# Ray Tracing
if self.simulator_state["rt_needed"]:
if is_multi_band:
seq_bp = self.octave_bands.analysis(seq, band=b)
else:
seq_bp = seq.copy()
# interpolate the histogram and multiply the sequence
seq_bp_rot = seq_bp.reshape((-1, hbss))
new_n_bins = seq_bp_rot.shape[0]
hist = self.rt_histograms[m][s][0][b, :new_n_bins]
normalization = np.linalg.norm(seq_bp_rot, axis=1)
indices = normalization > 0.0
seq_bp_rot[indices, :] /= normalization[indices, None]
seq_bp_rot *= np.sqrt(hist[:, None])
# Normalize the band power
# The bands should normally sum up to fs / 2
seq_bp *= np.sqrt(bw / self.fs * 2.0)
ir_loc[fdl2 : fdl2 + N] += seq_bp
# keep for further processing
rir_bands.append(ir_loc)
# Do Air absorption
if self.simulator_state["air_abs_needed"]:
# In case this was not multi-band, do the band pass filtering
if len(rir_bands) == 1:
rir_bands = self.octave_bands.analysis(rir_bands[0]).T
# Now apply air absorption
for band, air_abs in zip(rir_bands, self.air_absorption):
air_decay = np.exp(-0.5 * air_abs * distance_rir)
band[fdl2 : N + fdl2] *= air_decay
# Sum up all the bands
np.sum(rir_bands, axis=0, out=ir)
self.rir[-1].append(ir)
self.simulator_state["rir_done"] = True
def simulate(
self,
snr=None,
reference_mic=0,
callback_mix=None,
callback_mix_kwargs={},
return_premix=False,
recompute_rir=False,
):
r"""
Simulates the microphone signal at every microphone in the array
Parameters
----------
reference_mic: int, optional
The index of the reference microphone to use for SNR computations.
The default reference microphone is the first one (index 0)
snr: float, optional
The target signal-to-noise ratio (SNR) in decibels at the reference microphone.
When this option is used the argument
:py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is ignored. The variance of
every source at the reference microphone is normalized to one and
the variance of the noise \\(\\sigma_n^2\\) is chosen
.. math::
\mathsf{SNR} = 10 \log_{10} \frac{ K }{ \sigma_n^2 }
The value of :py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is also set
to \\(\\sigma_n^2\\) automatically
callback_mix: func, optional
A function that will perform the mix, it takes as first argument
an array of shape ``(n_sources, n_mics, n_samples)`` that contains
the source signals convolved with the room impulse response prior
to mixture at the microphone. It should return an array of shape
``(n_mics, n_samples)`` containing the mixed microphone signals.
If such a function is provided, the ``snr`` option is ignored
and :py:attr:`pyroomacoustics.room.Room.sigma2_awgn` is set to ``None``.
callback_mix_kwargs: dict, optional
A dictionary that contains optional arguments for ``callback_mix``
function
return_premix: bool, optional
If set to ``True``, the function will return an array of shape
``(n_sources, n_mics, n_samples)`` containing the microphone
signals with individual sources, convolved with the room impulse
response but prior to mixing
recompute_rir: bool, optional
If set to ``True``, the room impulse responses will be recomputed
prior to simulation
Returns
-------
Nothing or an array of shape ``(n_sources, n_mics, n_samples)``
Depends on the value of ``return_premix`` option
"""
# import convolution routine
from scipy.signal import fftconvolve
# Throw an error if we are missing some hardware in the room
if len(self.sources) == 0:
raise ValueError("There are no sound sources in the room.")
if self.mic_array is None:
raise ValueError("There is no microphone in the room.")
# compute RIR if necessary
if self.rir is None or len(self.rir) == 0 or recompute_rir:
self.compute_rir()
# number of mics and sources
M = self.mic_array.M
S = len(self.sources)
# compute the maximum signal length
from itertools import product
max_len_rir = np.array(
[len(self.rir[i][j]) for i, j in product(range(M), range(S))]
).max()
f = lambda i: len(self.sources[i].signal) + np.floor(
self.sources[i].delay * self.fs
)
max_sig_len = np.array([f(i) for i in range(S)]).max()
L = int(max_len_rir) + int(max_sig_len) - 1
if L % 2 == 1:
L += 1
# the array that will receive all the signals
premix_signals = np.zeros((S, M, L))
# compute the signal at every microphone in the array
for m in np.arange(M):
for s in np.arange(S):
sig = self.sources[s].signal
if sig is None:
continue
d = int(np.floor(self.sources[s].delay * self.fs))
h = self.rir[m][s]
premix_signals[s, m, d : d + len(sig) + len(h) - 1] += fftconvolve(
h, sig
)
if callback_mix is not None:
# Execute user provided callback
signals = callback_mix(premix_signals, **callback_mix_kwargs)
self.sigma2_awgn = None
elif snr is not None:
# Normalize all signals so that
denom = np.std(premix_signals[:, reference_mic, :], axis=1)
premix_signals /= denom[:, None, None]
signals = np.sum(premix_signals, axis=0)
# Compute the variance of the microphone noise
self.sigma2_awgn = 10 ** (-snr / 10) * S
else:
signals =
|
np.sum(premix_signals, axis=0)
|
numpy.sum
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class.
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy import units as u
class HRSOrder(object):
"""A class describing a single order for a High Resolutoin Spectrograph
observation.
Parameters
-----------
order: integer
Order of the HRS observations
region: list, tuple, or `~numpy.ndarray`
region is an object that contains coordinates for pixels in
the image which are part of this order. It should be a list
containing two arrays with the coordinates listed in each array.
flux: `~numpy.ndarray`
Fluxes corresponding to each pixel coordinate in region.
wavelength: `~numpy.ndarray`
Wavelengths corresponding to each pixel coordinate in region.
order_type: str
Type of order for the Order of the HRS observations
flux_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the flux.
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
"""
def __init__(self, order, region=None, flux=None, wavelength=None,
flux_unit=None, wavelength_unit=None, order_type=None):
self.order = order
self.region = region
self.flux = flux
self.wavelength = wavelength
self.flux_unit = flux_unit
self.wavelength_unit = wavelength_unit
self.order_type = order_type
@property
def order(self):
return self._order
@order.setter
def order(self, value):
if not isinstance(value, int):
raise TypeError('order is not an integer')
self._order = value
@property
def order_type(self):
return self._order_type
@order_type.setter
def order_type(self, value):
if value not in ['sky', 'object', None]:
raise TypeError("order_type is not None, 'sky', or 'object'")
self._order_type = value
@property
def region(self):
return self._region
@region.setter
def region(self, value):
if value is None:
self._region = None
return
if len(value) != 2:
raise TypeError("region is not of length 2")
if len(value[0]) != len(value[1]):
raise TypeError(
"coordinate lists in region are not of equal length")
self.npixels = len(value[0])
self._region = value
@property
def flux(self):
return self._flux
@flux.setter
def flux(self, value):
if value is None:
self._flux = None
return
if self.region is None:
raise ValueError('No region is set yet')
if len(value) != self.npixels:
raise TypeError("flux is not the same length as region")
self._flux = value
@property
def wavelength(self):
return self._wavelength
@wavelength.setter
def wavelength(self, value):
if value is None:
self._wavelength = None
return
if self.region is None:
raise ValueError('No region is set yet')
if len(value) != self.npixels:
raise TypeError("wavelength is not the same length as region")
self._wavelength = value
@property
def flux_unit(self):
return self._flux_unit
@flux_unit.setter
def flux_unit(self, value):
if value is None:
self._flux_unit = None
else:
self._flux_unit = u.Unit(value)
@property
def wavelength_unit(self):
return self._wavelength_unit
@wavelength_unit.setter
def wavelength_unit(self, value):
if value is None:
self._wavelength_unit = None
else:
self._wavelength_unit = u.Unit(value)
def set_order_from_array(self, data):
"""Given an array of data which has an order specified at each pixel,
set the region at the given order for HRSOrder
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with an order value specified at each pixel. If
no order is available for a given pixel, the pixel should have a
value of zero.
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.region = np.where(data == self.order)
def set_flux_from_array(self, data, flux_unit=None):
"""Given an array of data of fluxes, set the fluxes for
the region at the given order for HRSOrder
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with a flux value specified at each pixel.
flux_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the flux.
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.flux = data[self.region]
self.flux_unit = flux_unit
def set_wavelength_from_array(self, data, wavelength_unit):
"""Given an array of wavelengths, set the wavelength for
each pixel coordinate in `~HRSOrder.region`.
Parameters
----------
data: `~numpy.ndarray`
data is an 2D array with a wavelength value specified at each pixel
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
"""
if not isinstance(data, np.ndarray):
raise TypeError('data is not an numpy.ndarray')
if data.ndim != 2:
raise TypeError('data is not a 2D numpy.ndarray')
self.wavelength = data[self.region]
self.wavelength_unit = wavelength_unit
def set_wavelength_from_model(
self, model, params, wavelength_unit, **kwargs):
"""Given an array of wavelengths, set the wavelength for
each pixel coordinate in `~HRSOrder.region`.
Parameters
----------
model: function
model is a callable function that will create a corresponding
wavelength for each pixel in `~HRSOrder.region`. The function
can either be 1D or 2D. If it is 2D, the x-coordinate should
be the first argument.
params: `~numpy.ndarray`
Either a 1D or 2D list of parameters with the number of elements
corresponding to the number of pixles. Typically, if model
is a 1D function, this would be the x-coordinated from
`~HRSOrder.region`. Otherwise, this would be expected to be
`~HRSOrder.region`.
wavelength_unit: `~astropy.units.UnitBase` instance or str, optional
The units of the wavelength
**kwargs:
All additional keywords to be passed to model
"""
if not hasattr(model, '__call__'):
raise TypeError('model is not a function')
self.wavelength_unit = wavelength_unit
if len(params) == self.npixels:
self.wavelength = model(params, **kwargs)
elif len(params) == 2:
self.wavelength = model(params[1], params[0], **kwargs)
else:
raise TypeError('params is not the correct size or shape')
def create_box(self, flux, interp=False):
"""Convert order into a square representation with integer shifts
beteween each pixel
Parameters
----------
flux: ~numpy.ndarray
Array of values to convert into a rectangular representation
Returns
-------
box: ~numpy.ndarray
Rectangular represnation of flux
"""
xmax = self.region[1].max()
xmin = 0
ymax = self.region[0].max()
ymin = self.region[0].min()
xs = xmax-xmin
coef = np.polyfit(self.region[1], self.region[0], 3)
xarr = np.arange(xs+1)
yarr = np.polyval(coef, xarr)-ymin
x = self.region[1]-xmin
y = self.region[0]-ymin - (np.polyval(coef, x) - ymin - yarr.min()).astype(int)
ys = y.max()
data = np.zeros((ys+1,xs+1))
if interp:
yarr = np.arange(ys+1)
for i in range(xs):
mask = (self.region[1]==i)
ym = ymin-np.polyval(coef, xarr).min()
if mask.sum() > 0:
y = self.region[0][mask]- np.polyval(coef, i) - ym
data[:,i] = np.interp(yarr, y, flux[mask])
else:
data[y,x] = flux
return data, coef
def unravel_box(self, box):
"""Convert a rectangular represenation of the spectra back to a single
array
Parameters
----------
box: ~numpy.ndarray
Rectangular represnation of flux
Returns
-------
data: ~numpy.ndarray
Array of values to convert into a rectangular representation
"""
xmax = self.region[1].max()
xmin = 0
ymax = self.region[0].max()
ymin = self.region[0].min()
ys = ymax-ymin
xs = xmax-xmin
data = np.zeros((ys+1,xs+1))
coef = np.polyfit(self.region[1], self.region[0], 3)
xarr = np.arange(xs+1)
yarr = np.polyval(coef, xarr)-ymin
x = self.region[1]-xmin
y = self.region[0]-ymin - (np.polyval(coef, x) - ymin - yarr.min()).astype(int)
data =
|
np.zeros(self.npixels)
|
numpy.zeros
|
from optparse import OptionParser
import os
import re
import sys
import random
import math
import numpy as np
import cv2
# This function is used for numerical sorting of file names (strings)
numbers = re.compile(r'(\d+)')
def numerical_sort(value):
parts = numbers.split(value)
parts[1::2] = map(int, parts[1::2])
return parts
def update_progress(message, progress):
sys.stdout.write('\r')
sys.stdout.write(message + "[%-50s] %d%%" % ('=' * int(
|
np.floor(0.5 * progress)
|
numpy.floor
|
teams = ["air-force",
"akron",
"alabama",
"alabama-birmingham",
"appalachian-state",
"arizona",
"arizona-state",
"arkansas",
"arkansas-state",
"army",
"auburn",
"ball-state",
"baylor",
"boise-state",
"boston-college",
"bowling-green-state",
"brigham-young",
"buffalo",
"california",
"central-florida",
"central-michigan",
"charlotte",
"cincinnati",
"clemson",
"coastal-carolina",
"colorado",
"colorado-state",
"connecticut",
"duke",
"east-carolina",
"eastern-michigan",
"florida",
"florida-atlantic",
"florida-international",
"florida-state",
"fresno-state",
"georgia",
"georgia-southern",
"georgia-state",
"georgia-tech",
"hawaii",
"houston",
"illinois",
"indiana",
"iowa",
"iowa-state",
"kansas",
"kansas-state",
"kent-state",
"kentucky",
"liberty",
"louisiana-lafayette",
"louisiana-state",
"louisiana-tech",
"louisiana-monroe",
"louisville",
"marshall",
"maryland",
"massachusetts",
"memphis",
"miami-fl",
"miami-oh",
"michigan",
"michigan-state",
"middle-tennessee-state",
"minnesota",
"mississippi",
"mississippi-state",
"missouri",
"navy",
"nebraska",
"nevada",
"nevada-las-vegas",
"new-mexico",
"new-mexico-state",
"north-carolina",
"north-carolina-state",
"north-texas",
"northern-illinois",
"northwestern",
"notre-dame",
"ohio",
"ohio-state",
"oklahoma",
"oklahoma-state",
"old-dominion",
"oregon",
"oregon-state",
"penn-state",
"pittsburgh",
"purdue",
"rice",
"rutgers",
"san-diego-state",
"san-jose-state",
"south-alabama",
"south-carolina",
"south-florida",
"southern-california",
"southern-methodist",
"southern-mississippi",
"stanford",
"syracuse",
"temple",
"tennessee",
"texas",
"texas-am",
"texas-christian",
"texas-state",
"texas-tech",
"texas-el-paso",
"texas-san-antonio",
"toledo",
"troy",
"tulane",
"tulsa",
"ucla",
"utah",
"utah-state",
"vanderbilt",
"virginia",
"virginia-tech",
"wake-forest",
"washington",
"washington-state",
"west-virginia",
"western-kentucky",
"western-michigan",
"wisconsin",
"wyoming"]
import argparse
import os
import sys
import shutil
import subprocess
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import pickle
import itertools
import io
tables = {}
teams = sorted(teams)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--year', default=2018,type=int, help='season to save')
parser.add_argument('--folder', default='cfb_team',type=str, help='folder to save year stats')
parser.add_argument('--rfolder', default='cfb_roster',type=str, help='rosters folder')
parser.add_argument('--ow', action='store_true',help='overwrite existing')
parser.add_argument('--process', action='store_true',help='only process files, no fetching')
args = parser.parse_args()
for folder in [args.folder,args.rfolder]:
try:
os.mkdir(folder)
print("Directory {} created".format(folder))
except FileExistsError:
pass
for team in teams:
target = os.path.join(args.folder,team + str(args.year) + '.html')
rtarget = os.path.join(args.rfolder,team + str(args.year) + '.html')
if args.process:
if not os.path.exists(target):
continue
# get the files
else:
if args.ow or not os.path.exists(target):
subprocess.call(['wget','-O',target,
'https://www.sports-reference.com/cfb/schools/{}/{}.html'.format(team,args.year)])
fs = os.path.getsize(target)
if fs < 10:
os.remove(target)
continue
if args.ow or not os.path.exists(rtarget):
subprocess.call(['wget','-O',rtarget,
'https://www.sports-reference.com/cfb/schools/{}/{}-roster.html'.format(team,args.year)])
fs = os.path.getsize(rtarget)
if fs < 10:
os.remove(rtarget)
continue
# load the data
try:
with open(target,'rt') as fp:
data = fp.read()
with open(rtarget,'rt') as fp:
rdata = fp.read()
except:
with open(target,'rt',encoding='latin-1') as fp:
data = fp.read()
with open(rtarget,'rt',encoding='latin-1') as fp:
rdata = fp.read()
# collect all the tables
try:
m = re.findall(r'<!--[ \n]*(<div[\s\S\r]+?</div>)[ \n]*-->',data)
m2 = re.findall(r'(<div class="table_outer_container">[ \n]*<div class="overthrow table_container" id="div_roster">[\s\S\r]+?</table>[ \n]*</div>[ \n]*</div>)',rdata)
m3 = re.findall(r'<!--[ \n]*(<div[\s\S\r]+?</div>)[ \n]*-->',rdata)
m = m2 + m + m3
print(target,len(m),len(m3))
tables[team] = {}
bs = BeautifulSoup(data,features="lxml")
tables[team]['logo'] = re.findall('(http.*png)',str(bs.find_all('img',{"class": "teamlogo"})[0]))[0]
tables[team]['name'] = re.findall('<title>{} (.*) Stats'.format(args.year),data)[0]
tables[team]['conf'] = re.findall('<a href="/cfb/conferences/(.*)/{}.html">(.*)</a>'.format(args.year),data)[0]
except:
continue
for test_table in m:
try:
soup = BeautifulSoup(test_table,features="lxml")
table_id = str(soup.find('table').get('id'))
if table_id in ['team_and_opponent','team_td_log','opp_td_log']:
continue
soup.findAll('tr')
table_size = {'defense_and_fumbles':1,'passing':1, 'rushing_and_receiving' :1,'returns' :1,'kicking' :1,'defense' :1,'kicking_and_punting':1,'scoring':1}
# use getText()to extract the text we need into a list
headers = [th.getText() for th in soup.findAll('tr')[table_size.get(table_id,0)].findAll('th')]
# exclude the first column as we will not need the ranking order from Basketball Reference for the analysis
start_col = 1
if table_id in ['contracts','injury','on_off','on_off_p','roster']:
start_col = 0
headers = headers[start_col:]
rows = soup.findAll('tr')[start_col:]
player_stats = [[td.getText() for td in rows[i].findAll('td')]
for i in range(len(rows))]
if table_id in ['contracts','roster']:
player_status = [[td.get('class') for td in rows[i].findAll('td')]
for i in range(len(rows))]
status_array = []
for status in player_status:
if len(status) > 0:
s2 = [False] + [s[-1] in ['salary-pl','salary-et','salary-tm'] for s in status[1:]]
else:
s2 =
|
np.array([])
|
numpy.array
|
import operator
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting
from xarray.core.npcompat import IS_NEP18_ACTIVE
pint = pytest.importorskip("pint")
DimensionalityError = pint.errors.DimensionalityError
unit_registry = pint.UnitRegistry()
Quantity = unit_registry.Quantity
pytestmark = [
pytest.mark.skipif(
not IS_NEP18_ACTIVE, reason="NUMPY_EXPERIMENTAL_ARRAY_FUNCTION is not enabled"
),
# TODO: remove this once pint has a released version with __array_function__
pytest.mark.skipif(
not hasattr(unit_registry.Quantity, "__array_function__"),
reason="pint does not implement __array_function__ yet",
),
# pytest.mark.filterwarnings("ignore:::pint[.*]"),
]
def array_extract_units(obj):
raw = obj.data if hasattr(obj, "data") else obj
try:
return raw.units
except AttributeError:
return None
def array_strip_units(array):
try:
return array.magnitude
except AttributeError:
return array
def array_attach_units(data, unit, convert_from=None):
try:
unit, convert_from = unit
except TypeError:
pass
if isinstance(data, Quantity):
if not convert_from:
raise ValueError(
"cannot attach unit {unit} to quantity ({data.units})".format(
unit=unit, data=data
)
)
elif isinstance(convert_from, unit_registry.Unit):
data = data.magnitude
elif convert_from is True: # intentionally accept exactly true
if data.check(unit):
convert_from = data.units
data = data.magnitude
else:
raise ValueError(
"cannot convert quantity ({data.units}) to {unit}".format(
unit=unit, data=data
)
)
else:
raise ValueError(
"cannot convert from invalid unit {convert_from}".format(
convert_from=convert_from
)
)
# to make sure we also encounter the case of "equal if converted"
if convert_from is not None:
quantity = (data * convert_from).to(
unit
if isinstance(unit, unit_registry.Unit)
else unit_registry.dimensionless
)
else:
try:
quantity = data * unit
except np.core._exceptions.UFuncTypeError:
if unit != 1:
raise
quantity = data
return quantity
def extract_units(obj):
if isinstance(obj, xr.Dataset):
vars_units = {
name: array_extract_units(value) for name, value in obj.data_vars.items()
}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, xr.DataArray):
vars_units = {obj.name: array_extract_units(obj)}
coords_units = {
name: array_extract_units(value) for name, value in obj.coords.items()
}
units = {**vars_units, **coords_units}
elif isinstance(obj, Quantity):
vars_units = {"<array>": array_extract_units(obj)}
units = {**vars_units}
else:
units = {}
return units
def strip_units(obj):
if isinstance(obj, xr.Dataset):
data_vars = {name: strip_units(value) for name, value in obj.data_vars.items()}
coords = {name: strip_units(value) for name, value in obj.coords.items()}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords)
elif isinstance(obj, xr.DataArray):
data = array_strip_units(obj.data)
coords = {
name: (
(value.dims, array_strip_units(value.data))
if isinstance(value.data, Quantity)
else value # to preserve multiindexes
)
for name, value in obj.coords.items()
}
new_obj = xr.DataArray(name=obj.name, data=data, coords=coords, dims=obj.dims)
elif hasattr(obj, "magnitude"):
new_obj = obj.magnitude
else:
new_obj = obj
return new_obj
def attach_units(obj, units):
if not isinstance(obj, (xr.DataArray, xr.Dataset)):
return array_attach_units(obj, units.get("data", 1))
if isinstance(obj, xr.Dataset):
data_vars = {
name: attach_units(value, units) for name, value in obj.data_vars.items()
}
coords = {
name: attach_units(value, units) for name, value in obj.coords.items()
}
new_obj = xr.Dataset(data_vars=data_vars, coords=coords, attrs=obj.attrs)
else:
# try the array name, "data" and None, then fall back to dimensionless
data_units = (
units.get(obj.name, None)
or units.get("data", None)
or units.get(None, None)
or 1
)
data = array_attach_units(obj.data, data_units)
coords = {
name: (
(value.dims, array_attach_units(value.data, units.get(name) or 1))
if name in units
# to preserve multiindexes
else value
)
for name, value in obj.coords.items()
}
dims = obj.dims
attrs = obj.attrs
new_obj = xr.DataArray(
name=obj.name, data=data, coords=coords, attrs=attrs, dims=dims
)
return new_obj
def assert_equal_with_units(a, b):
# works like xr.testing.assert_equal, but also explicitly checks units
# so, it is more like assert_identical
__tracebackhide__ = True
if isinstance(a, xr.Dataset) or isinstance(b, xr.Dataset):
a_units = extract_units(a)
b_units = extract_units(b)
a_without_units = strip_units(a)
b_without_units = strip_units(b)
assert a_without_units.equals(b_without_units), formatting.diff_dataset_repr(
a, b, "equals"
)
assert a_units == b_units
else:
a = a if not isinstance(a, (xr.DataArray, xr.Variable)) else a.data
b = b if not isinstance(b, (xr.DataArray, xr.Variable)) else b.data
assert type(a) == type(b) or (
isinstance(a, Quantity) and isinstance(b, Quantity)
)
# workaround until pint implements allclose in __array_function__
if isinstance(a, Quantity) or isinstance(b, Quantity):
assert (
hasattr(a, "magnitude") and hasattr(b, "magnitude")
) and np.allclose(a.magnitude, b.magnitude, equal_nan=True)
assert (hasattr(a, "units") and hasattr(b, "units")) and a.units == b.units
else:
assert np.allclose(a, b, equal_nan=True)
@pytest.fixture(params=[float, int])
def dtype(request):
return request.param
class method:
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
def __call__(self, obj, *args, **kwargs):
from collections.abc import Callable
from functools import partial
all_args = list(self.args) + list(args)
all_kwargs = {**self.kwargs, **kwargs}
func = getattr(obj, self.name, None)
if func is None or not isinstance(func, Callable):
# fall back to module level numpy functions if not a xarray object
if not isinstance(obj, (xr.Variable, xr.DataArray, xr.Dataset)):
numpy_func = getattr(np, self.name)
func = partial(numpy_func, obj)
# remove typical xr args like "dim"
exclude_kwargs = ("dim", "dims")
all_kwargs = {
key: value
for key, value in all_kwargs.items()
if key not in exclude_kwargs
}
else:
raise AttributeError(f"{obj} has no method named '{self.name}'")
return func(*all_args, **all_kwargs)
def __repr__(self):
return f"method_{self.name}"
class function:
def __init__(self, name):
self.name = name
self.func = getattr(np, name)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self):
return f"function_{self.name}"
@pytest.mark.parametrize("func", (xr.zeros_like, xr.ones_like))
def test_replication(func, dtype):
array = np.linspace(0, 10, 20).astype(dtype) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
numpy_func = getattr(np, func.__name__)
expected = xr.DataArray(data=numpy_func(array), dims="x")
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(
reason="np.full_like on Variable strips the unit and pint does not allow mixed args"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.m, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.ms, None, id="compatible_unit"),
pytest.param(unit_registry.s, None, id="identical_unit"),
),
)
def test_replication_full_like(unit, error, dtype):
array = np.linspace(0, 5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims="x")
fill_value = -1 * unit
if error is not None:
with pytest.raises(error):
xr.full_like(data_array, fill_value=fill_value)
else:
result = xr.full_like(data_array, fill_value=fill_value)
expected = np.full_like(array, fill_value=fill_value)
assert_equal_with_units(expected, result)
class TestDataArray:
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
def test_init(self, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
assert isinstance(data_array.data, Quantity)
assert all(
{
name: isinstance(coord.data, Quantity)
for name, coord in data_array.coords.items()
}.values()
)
@pytest.mark.filterwarnings("error:::pint[.*]")
@pytest.mark.parametrize(
"func", (pytest.param(str, id="str"), pytest.param(repr, id="repr"))
)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"with_dims",
marks=pytest.mark.xfail(reason="units in indexes are not supported"),
),
pytest.param("with_coords"),
pytest.param("without_coords"),
),
)
def test_repr(self, func, variant, dtype):
array = np.linspace(1, 2, 10, dtype=dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
y = x.to(unit_registry.ms)
variants = {
"with_dims": {"x": x},
"with_coords": {"y": ("x", y)},
"without_coords": {},
}
kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
data_array = xr.DataArray(**kwargs)
# FIXME: this just checks that the repr does not raise
# warnings or errors, but does not check the result
func(data_array)
@pytest.mark.parametrize(
"func",
(
pytest.param(
function("all"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("any"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("argmax"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
pytest.param(
function("argmin"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
function("max"),
function("mean"),
pytest.param(
function("median"),
marks=pytest.mark.xfail(
reason="np.median on DataArray strips the units"
),
),
function("min"),
pytest.param(
function("prod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
function("sum"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
function("std"),
function("var"),
function("cumsum"),
pytest.param(
function("cumprod"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("all"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("any"),
marks=pytest.mark.xfail(reason="not implemented by pint yet"),
),
pytest.param(
method("argmax"),
marks=pytest.mark.xfail(
reason="comparison of quantities with ndarrays in nanops not implemented"
),
),
pytest.param(
method("argmin"),
marks=pytest.mark.xfail(
reason="comparison of quantities with ndarrays in nanops not implemented"
),
),
method("max"),
method("mean"),
method("median"),
method("min"),
pytest.param(
method("prod"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
pytest.param(
method("sum"),
marks=pytest.mark.xfail(
reason="comparison of quantity with ndarrays in nanops not implemented"
),
),
method("std"),
method("var"),
method("cumsum"),
pytest.param(
method("cumprod"),
marks=pytest.mark.xfail(reason="pint does not implement cumprod yet"),
),
),
ids=repr,
)
def test_aggregation(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(operator.neg, id="negate"),
pytest.param(abs, id="absolute"),
pytest.param(
np.round,
id="round",
marks=pytest.mark.xfail(reason="pint does not implement round"),
),
),
)
def test_unary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(lambda x: 2 * x, id="multiply"),
pytest.param(lambda x: x + x, id="add"),
pytest.param(lambda x: x[0] + x, id="add scalar"),
pytest.param(
lambda x: x.T @ x,
id="matrix multiply",
marks=pytest.mark.xfail(
reason="pint does not support matrix multiplication yet"
),
),
),
)
def test_binary_operations(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
expected = xr.DataArray(data=func(array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"comparison",
(
pytest.param(operator.lt, id="less_than"),
pytest.param(operator.ge, id="greater_equal"),
pytest.param(operator.eq, id="equal"),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, ValueError, id="without_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incorrect_unit"),
pytest.param(unit_registry.m, None, id="correct_unit"),
),
)
def test_comparison_operations(self, comparison, unit, error, dtype):
array = (
np.array([10.1, 5.2, 6.5, 8.0, 21.3, 7.1, 1.3]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array)
value = 8
to_compare_with = value * unit
# incompatible units are all not equal
if error is not None and comparison is not operator.eq:
with pytest.raises(error):
comparison(array, to_compare_with)
with pytest.raises(error):
comparison(data_array, to_compare_with)
else:
result = comparison(data_array, to_compare_with)
# pint compares incompatible arrays to False, so we need to extend
# the multiplication works for both scalar and array results
expected = xr.DataArray(
data=comparison(array, to_compare_with)
* np.ones_like(array, dtype=bool)
)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.m, DimensionalityError, id="incorrect unit"),
pytest.param(unit_registry.degree, None, id="correct unit"),
),
)
def test_univariate_ufunc(self, units, error, dtype):
array = np.arange(10).astype(dtype) * units
data_array = xr.DataArray(data=array)
if error is not None:
with pytest.raises(error):
np.sin(data_array)
else:
expected = xr.DataArray(data=np.sin(array))
result = np.sin(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="pint's implementation of `np.maximum` strips units")
def test_bivariate_ufunc(self, dtype):
unit = unit_registry.m
array = np.arange(10).astype(dtype) * unit
data_array = xr.DataArray(data=array)
expected = xr.DataArray(np.maximum(array, 0 * unit))
assert_equal_with_units(expected, np.maximum(data_array, 0 * unit))
assert_equal_with_units(expected, np.maximum(0 * unit, data_array))
@pytest.mark.parametrize("property", ("T", "imag", "real"))
def test_numpy_properties(self, property, dtype):
array = (
np.arange(5 * 10).astype(dtype)
+ 1j * np.linspace(-1, 0, 5 * 10).astype(dtype)
).reshape(5, 10) * unit_registry.s
data_array = xr.DataArray(data=array, dims=("x", "y"))
expected = xr.DataArray(
data=getattr(array, property),
dims=("x", "y")[:: 1 if property != "T" else -1],
)
result = getattr(data_array, property)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
method("conj"),
method("argsort"),
method("conjugate"),
method("round"),
pytest.param(
method("rank", dim="x"),
marks=pytest.mark.xfail(reason="pint does not implement rank yet"),
),
),
ids=repr,
)
def test_numpy_methods(self, func, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array, dims="x")
expected = xr.DataArray(func(array), dims="x")
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func", (method("clip", min=3, max=8), method("searchsorted", v=5)), ids=repr
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_numpy_methods_with_args(self, func, unit, error, dtype):
array = np.arange(10).astype(dtype) * unit_registry.m
data_array = xr.DataArray(data=array)
scalar_types = (int, float)
kwargs = {
key: (value * unit if isinstance(value, scalar_types) else value)
for key, value in func.kwargs.items()
}
if error is not None:
with pytest.raises(error):
func(data_array, **kwargs)
else:
expected = func(array, **kwargs)
if func.name not in ["searchsorted"]:
expected = xr.DataArray(data=expected)
result = func(data_array, **kwargs)
if func.name in ["searchsorted"]:
assert np.allclose(expected, result)
else:
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func", (method("isnull"), method("notnull"), method("count")), ids=repr
)
def test_missing_value_detection(self, func, dtype):
array = (
np.array(
[
[1.4, 2.3, np.nan, 7.2],
[np.nan, 9.7, np.nan, np.nan],
[2.1, np.nan, np.nan, 4.6],
[9.9, np.nan, 7.2, 9.1],
]
)
* unit_registry.degK
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x, "y": y}, dims=("x", "y"))
expected = func(strip_units(data_array))
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="ffill and bfill lose units in data")
@pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
def test_missing_value_filling(self, func, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.degK
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
result_without_units = func(strip_units(data_array), dim="x")
result = xr.DataArray(
data=result_without_units.data * unit_registry.degK,
coords={"x": x},
dims=["x"],
)
expected = attach_units(
func(strip_units(data_array), dim="x"), {"data": unit_registry.degK}
)
result = func(data_array, dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="fillna drops the unit")
@pytest.mark.parametrize(
"fill_value",
(
pytest.param(
-1,
id="python scalar",
marks=pytest.mark.xfail(
reason="python scalar cannot be converted using astype()"
),
),
pytest.param(np.array(-1), id="numpy scalar"),
pytest.param(np.array([-1]), id="numpy array"),
),
)
def test_fillna(self, fill_value, dtype):
unit = unit_registry.m
array = np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype) * unit
data_array = xr.DataArray(data=array)
expected = attach_units(
strip_units(data_array).fillna(value=fill_value), {"data": unit}
)
result = data_array.fillna(value=fill_value * unit)
assert_equal_with_units(expected, result)
def test_dropna(self, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
expected = attach_units(
strip_units(data_array).dropna(dim="x"), {"data": unit_registry.m}
)
result = data_array.dropna(dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="pint does not implement `numpy.isin`")
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="same_unit"),
),
)
def test_isin(self, unit, dtype):
array = (
np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
* unit_registry.m
)
data_array = xr.DataArray(data=array, dims="x")
raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
values = raw_values * unit
result_without_units = strip_units(data_array).isin(raw_values)
if unit != unit_registry.m:
result_without_units[:] = False
result_with_units = data_array.isin(values)
assert_equal_with_units(result_without_units, result_with_units)
@pytest.mark.parametrize(
"variant",
(
pytest.param(
"masking",
marks=pytest.mark.xfail(reason="nan not compatible with quantity"),
),
pytest.param(
"replacing_scalar",
marks=pytest.mark.xfail(reason="scalar not convertible using astype"),
),
pytest.param(
"replacing_array",
marks=pytest.mark.xfail(
reason="replacing using an array drops the units"
),
),
pytest.param(
"dropping",
marks=pytest.mark.xfail(reason="nan not compatible with quantity"),
),
),
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="same_unit"),
),
)
def test_where(self, variant, unit, error, dtype):
def _strip_units(mapping):
return {key: array_strip_units(value) for key, value in mapping.items()}
original_unit = unit_registry.m
array = np.linspace(0, 1, 10).astype(dtype) * original_unit
data_array = xr.DataArray(data=array)
condition = data_array < 0.5 * original_unit
other = np.linspace(-2, -1, 10).astype(dtype) * unit
variant_kwargs = {
"masking": {"cond": condition},
"replacing_scalar": {"cond": condition, "other": -1 * unit},
"replacing_array": {"cond": condition, "other": other},
"dropping": {"cond": condition, "drop": True},
}
kwargs = variant_kwargs.get(variant)
kwargs_without_units = _strip_units(kwargs)
if variant not in ("masking", "dropping") and error is not None:
with pytest.raises(error):
data_array.where(**kwargs)
else:
expected = attach_units(
strip_units(array).where(**kwargs_without_units),
{"data": original_unit},
)
result = data_array.where(**kwargs)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="interpolate strips units")
def test_interpolate_na(self, dtype):
array = (
np.array([-1.03, 0.1, 1.4, np.nan, 2.3, np.nan, np.nan, 9.1])
* unit_registry.m
)
x = np.arange(len(array))
data_array = xr.DataArray(data=array, coords={"x": x}, dims="x").astype(dtype)
expected = attach_units(
strip_units(data_array).interpolate_na(dim="x"), {"data": unit_registry.m}
)
result = data_array.interpolate_na(dim="x")
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="uses DataArray.where, which currently fails")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_combine_first(self, unit, error, dtype):
array = np.zeros(shape=(2, 2), dtype=dtype) * unit_registry.m
other_array = np.ones_like(array) * unit
data_array = xr.DataArray(
data=array, coords={"x": ["a", "b"], "y": [-1, 0]}, dims=["x", "y"]
)
other = xr.DataArray(
data=other_array, coords={"x": ["b", "c"], "y": [0, 1]}, dims=["x", "y"]
)
if error is not None:
with pytest.raises(error):
data_array.combine_first(other)
else:
expected = attach_units(
strip_units(data_array).combine_first(strip_units(other)),
{"data": unit_registry.m},
)
result = data_array.combine_first(other)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(
unit_registry.cm,
id="compatible_unit",
marks=pytest.mark.xfail(reason="identical does not check units yet"),
),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
@pytest.mark.parametrize(
"variation",
(
"data",
pytest.param(
"dims", marks=pytest.mark.xfail(reason="units in indexes not supported")
),
"coords",
),
)
@pytest.mark.parametrize("func", (method("equals"), method("identical")), ids=repr)
def test_comparisons(self, func, variation, unit, dtype):
data = np.linspace(0, 5, 10).astype(dtype)
coord = np.arange(len(data)).astype(dtype)
base_unit = unit_registry.m
quantity = data * base_unit
x = coord * base_unit
y = coord * base_unit
units = {
"data": (unit, base_unit, base_unit),
"dims": (base_unit, unit, base_unit),
"coords": (base_unit, base_unit, unit),
}
data_unit, dim_unit, coord_unit = units.get(variation)
data_array = xr.DataArray(
data=quantity, coords={"x": x, "y": ("x", y)}, dims="x"
)
other = attach_units(
strip_units(data_array),
{
None: (data_unit, base_unit if quantity.check(data_unit) else None),
"x": (dim_unit, base_unit if x.check(dim_unit) else None),
"y": (coord_unit, base_unit if y.check(coord_unit) else None),
},
)
# TODO: test dim coord once indexes leave units intact
# also, express this in terms of calls on the raw data array
# and then check the units
equal_arrays = (
np.all(quantity == other.data)
and (np.all(x == other.x.data) or True) # dims can't be checked yet
and np.all(y == other.y.data)
)
equal_units = (
data_unit == unit_registry.m
and coord_unit == unit_registry.m
and dim_unit == unit_registry.m
)
expected = equal_arrays and (func.name != "identical" or equal_units)
result = func(data_array, other)
assert expected == result
@pytest.mark.parametrize(
"unit",
(
pytest.param(1, id="no_unit"),
pytest.param(unit_registry.dimensionless, id="dimensionless"),
pytest.param(unit_registry.s, id="incompatible_unit"),
pytest.param(unit_registry.cm, id="compatible_unit"),
pytest.param(unit_registry.m, id="identical_unit"),
),
)
def test_broadcast_equals(self, unit, dtype):
left_array = np.ones(shape=(2, 2), dtype=dtype) * unit_registry.m
right_array = array_attach_units(
np.ones(shape=(2,), dtype=dtype),
unit,
convert_from=unit_registry.m if left_array.check(unit) else None,
)
left = xr.DataArray(data=left_array, dims=("x", "y"))
right = xr.DataArray(data=right_array, dims="x")
expected = np.all(left_array == right_array[:, None])
result = left.broadcast_equals(right)
assert expected == result
@pytest.mark.parametrize(
"func",
(
method("pipe", lambda da: da * 10),
method("assign_coords", y2=("y", np.arange(10) * unit_registry.mm)),
method("assign_attrs", attr1="value"),
method("rename", x2="x_mm"),
method("swap_dims", {"x": "x2"}),
method(
"expand_dims",
dim={"z": np.linspace(10, 20, 12) * unit_registry.s},
axis=1,
),
method("drop", labels="x"),
method("reset_coords", names="x2"),
method("copy"),
pytest.param(
method("astype", np.float32),
marks=pytest.mark.xfail(reason="units get stripped"),
),
pytest.param(
method("item", 1), marks=pytest.mark.xfail(reason="units get stripped")
),
),
ids=repr,
)
def test_content_manipulation(self, func, dtype):
quantity = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.pascal
)
x = np.arange(quantity.shape[0]) * unit_registry.m
y = np.arange(quantity.shape[1]) * unit_registry.m
x2 = x.to(unit_registry.mm)
data_array = xr.DataArray(
name="data",
data=quantity,
coords={"x": x, "x2": ("x", x2), "y": y},
dims=("x", "y"),
)
stripped_kwargs = {
key: array_strip_units(value) for key, value in func.kwargs.items()
}
expected = attach_units(
func(strip_units(data_array), **stripped_kwargs),
{
"data": quantity.units,
"x": x.units,
"x_mm": x2.units,
"x2": x2.units,
"y": y.units,
},
)
result = func(data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(
pytest.param(
method("drop", labels=np.array([1, 5]), dim="x"),
marks=pytest.mark.xfail(
reason="selecting using incompatible units does not raise"
),
),
pytest.param(method("copy", data=np.arange(20))),
),
ids=repr,
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, DimensionalityError, id="no_unit"),
pytest.param(
unit_registry.dimensionless, DimensionalityError, id="dimensionless"
),
pytest.param(unit_registry.s, DimensionalityError, id="incompatible_unit"),
pytest.param(unit_registry.cm, KeyError, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_content_manipulation_with_units(self, func, unit, error, dtype):
quantity = np.linspace(0, 10, 20, dtype=dtype) * unit_registry.pascal
x = np.arange(len(quantity)) * unit_registry.m
data_array = xr.DataArray(name="data", data=quantity, coords={"x": x}, dims="x")
kwargs = {
key: (value * unit if isinstance(value, np.ndarray) else value)
for key, value in func.kwargs.items()
}
stripped_kwargs = func.kwargs
expected = attach_units(
func(strip_units(data_array), **stripped_kwargs),
{"data": quantity.units if func.name == "drop" else unit, "x": x.units},
)
if error is not None and func.name == "drop":
with pytest.raises(error):
func(data_array, **kwargs)
else:
result = func(data_array, **kwargs)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"indices",
(
pytest.param(4, id="single index"),
pytest.param([5, 2, 9, 1], id="multiple indices"),
),
)
def test_isel(self, indices, dtype):
array = np.arange(10).astype(dtype) * unit_registry.s
x = np.arange(len(array)) * unit_registry.m
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
expected = attach_units(
strip_units(data_array).isel(x=indices),
{"data": unit_registry.s, "x": unit_registry.m},
)
result = data_array.isel(x=indices)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(
reason="xarray does not support duck arrays in dimension coordinates"
)
@pytest.mark.parametrize(
"values",
(
pytest.param(12, id="single value"),
pytest.param([10, 5, 13], id="list of multiple values"),
pytest.param(np.array([9, 3, 7, 12]), id="array of multiple values"),
),
)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(1, KeyError, id="no units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incorrect unit"),
pytest.param(unit_registry.s, None, id="correct unit"),
),
)
def test_sel(self, values, units, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
values_with_units = values * units
if error is not None:
with pytest.raises(error):
data_array.sel(x=values_with_units)
else:
result_array = array[values]
result_data_array = data_array.sel(x=values_with_units)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="xarray does not support duck arrays in dimension coordinates"
)
@pytest.mark.parametrize(
"values",
(
pytest.param(12, id="single value"),
pytest.param([10, 5, 13], id="list of multiple values"),
pytest.param(np.array([9, 3, 7, 12]), id="array of multiple values"),
),
)
@pytest.mark.parametrize(
"units,error",
(
pytest.param(1, KeyError, id="no units"),
pytest.param(unit_registry.dimensionless, KeyError, id="dimensionless"),
pytest.param(unit_registry.degree, KeyError, id="incorrect unit"),
pytest.param(unit_registry.s, None, id="correct unit"),
),
)
def test_loc(self, values, units, error, dtype):
array = np.linspace(5, 10, 20).astype(dtype) * unit_registry.m
x = np.arange(len(array)) * unit_registry.s
data_array = xr.DataArray(data=array, coords={"x": x}, dims=["x"])
values_with_units = values * units
if error is not None:
with pytest.raises(error):
data_array.loc[values_with_units]
else:
result_array = array[values]
result_data_array = data_array.loc[values_with_units]
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(reason="tries to coerce using asarray")
@pytest.mark.parametrize(
"shape",
(
pytest.param((10, 20), id="nothing squeezable"),
pytest.param((10, 20, 1), id="last dimension squeezable"),
pytest.param((10, 1, 20), id="middle dimension squeezable"),
pytest.param((1, 10, 20), id="first dimension squeezable"),
pytest.param((1, 10, 1, 20), id="first and last dimension squeezable"),
),
)
def test_squeeze(self, shape, dtype):
names = "xyzt"
coords = {
name: np.arange(length).astype(dtype)
* (unit_registry.m if name != "t" else unit_registry.s)
for name, length in zip(names, shape)
}
array = np.arange(10 * 20).astype(dtype).reshape(shape) * unit_registry.J
data_array = xr.DataArray(
data=array, coords=coords, dims=tuple(names[: len(shape)])
)
result_array = array.squeeze()
result_data_array = data_array.squeeze()
assert_equal_with_units(result_array, result_data_array)
# try squeezing the dimensions separately
names = tuple(dim for dim, coord in coords.items() if len(coord) == 1)
for index, name in enumerate(names):
assert_equal_with_units(
np.squeeze(array, axis=index), data_array.squeeze(dim=name)
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
new_coords = (np.arange(10) + 0.5) * unit
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
if error is not None:
with pytest.raises(error):
data_array.interp(x=new_coords)
else:
new_coords_ = (
new_coords.magnitude if hasattr(new_coords, "magnitude") else new_coords
)
result_array = strip_units(data_array).interp(
x=new_coords_ * unit_registry.degK
)
result_data_array = data_array.interp(x=new_coords)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(reason="tries to coerce using asarray")
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_interp_like(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
coords = {
"x": (np.arange(10) + 0.3) * unit_registry.m,
"y": (np.arange(5) + 0.3) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
new_data_array = xr.DataArray(
data=np.empty((20, 10)),
coords={"x": np.arange(20) * unit, "y": np.arange(10) * unit},
dims=("x", "y"),
)
if error is not None:
with pytest.raises(error):
data_array.interp_like(new_data_array)
else:
result_array = (
xr.DataArray(
data=array.magnitude,
coords={name: value.magnitude for name, value in coords.items()},
dims=("x", "y"),
).interp_like(strip_units(new_data_array))
* unit_registry.degK
)
result_data_array = data_array.interp_like(new_data_array)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="pint does not implement np.result_type in __array_function__ yet"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
new_coords = (np.arange(10) + 0.5) * unit
coords = {
"x": np.arange(10) * unit_registry.m,
"y": np.arange(5) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
if error is not None:
with pytest.raises(error):
data_array.interp(x=new_coords)
else:
result_array = strip_units(data_array).reindex(
x=(
new_coords.magnitude
if hasattr(new_coords, "magnitude")
else new_coords
)
* unit_registry.degK
)
result_data_array = data_array.reindex(x=new_coords)
assert_equal_with_units(result_array, result_data_array)
@pytest.mark.xfail(
reason="pint does not implement np.result_type in __array_function__ yet"
)
@pytest.mark.parametrize(
"unit,error",
(
pytest.param(1, None, id="no_unit"),
pytest.param(unit_registry.dimensionless, None, id="dimensionless"),
pytest.param(unit_registry.s, None, id="incompatible_unit"),
pytest.param(unit_registry.cm, None, id="compatible_unit"),
pytest.param(unit_registry.m, None, id="identical_unit"),
),
)
def test_reindex_like(self, unit, error):
array = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_registry.degK
coords = {
"x": (np.arange(10) + 0.3) * unit_registry.m,
"y": (np.arange(5) + 0.3) * unit_registry.m,
}
data_array = xr.DataArray(array, coords=coords, dims=("x", "y"))
new_data_array = xr.DataArray(
data=np.empty((20, 10)),
coords={"x": np.arange(20) * unit, "y": np.arange(10) * unit},
dims=("x", "y"),
)
if error is not None:
with pytest.raises(error):
data_array.reindex_like(new_data_array)
else:
expected = attach_units(
strip_units(data_array).reindex_like(strip_units(new_data_array)),
{
"data": unit_registry.degK,
"x": unit_registry.m,
"y": unit_registry.m,
},
)
result = data_array.reindex_like(new_data_array)
assert_equal_with_units(expected, result)
@pytest.mark.parametrize(
"func",
(method("unstack"), method("reset_index", "z"), method("reorder_levels")),
ids=repr,
)
def test_stacking_stacked(self, func, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * unit_registry.m
)
x = np.arange(array.shape[0])
y = np.arange(array.shape[1])
data_array = xr.DataArray(
name="data", data=array, coords={"x": x, "y": y}, dims=("x", "y")
)
stacked = data_array.stack(z=("x", "y"))
expected = attach_units(func(strip_units(stacked)), {"data": unit_registry.m})
result = func(stacked)
assert_equal_with_units(expected, result)
@pytest.mark.xfail(reason="indexes strip the label units")
def test_to_unstacked_dataset(self, dtype):
array = (
np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype)
* unit_registry.pascal
)
x = np.arange(array.shape[0]) * unit_registry.m
y = np.arange(array.shape[1]) * unit_registry.s
data_array = xr.DataArray(
data=array, coords={"x": x, "y": y}, dims=("x", "y")
).stack(z=("x", "y"))
func = method("to_unstacked_dataset", dim="z")
expected = attach_units(
func(strip_units(data_array)),
{"y": y.units, **dict(zip(x.magnitude, [array.units] * len(y)))},
).rename({elem.magnitude: elem for elem in x})
result = func(data_array)
print(data_array, expected, result, sep="\n")
assert_equal_with_units(expected, result)
assert False
@pytest.mark.parametrize(
"func",
(
method("transpose", "y", "x", "z"),
method("stack", a=("x", "y")),
method("set_index", x="x2"),
pytest.param(
method("shift", x=2), marks=pytest.mark.xfail(reason="strips units")
),
pytest.param(
method("roll", x=2, roll_coords=False),
marks=pytest.mark.xfail(reason="strips units"),
),
method("sortby", "x2"),
),
ids=repr,
)
def test_stacking_reordering(self, func, dtype):
array = (
np.linspace(0, 10, 2 * 5 * 10).reshape(2, 5, 10).astype(dtype)
* unit_registry.m
)
x =
|
np.arange(array.shape[0])
|
numpy.arange
|
import numpy as np
import nengo
from nengo.networks.product import Product
from nengo.utils.compat import range
from nengo.utils.magic import memoize
def circconv(a, b, invert_a=False, invert_b=False, axis=-1):
"""A reference Numpy implementation of circular convolution"""
A = np.fft.fft(a, axis=axis)
B = np.fft.fft(b, axis=axis)
if invert_a:
A = A.conj()
if invert_b:
B = B.conj()
return np.fft.ifft(A * B, axis=axis).real
@memoize
def transform_in(dims, align, invert):
"""Create a transform to map the input into the Fourier domain.
See CircularConvolution docstring for more details.
Parameters
----------
dims : int
Input dimensions.
align : 'A' or 'B'
How to align the real and imaginary components; the alignment
depends on whether we're doing transformA or transformB.
invert : bool
Whether to reverse the order of elements.
"""
if align not in ('A', 'B'):
raise ValueError("'align' must be either 'A' or 'B'")
dims2 = 4 * (dims // 2 + 1)
tr = np.zeros((dims2, dims))
dft = dft_half(dims)
for i in range(dims2):
row = dft[i // 4] if not invert else dft[i // 4].conj()
if align == 'A':
tr[i] = row.real if i % 2 == 0 else row.imag
else: # align == 'B'
tr[i] = row.real if i % 4 == 0 or i % 4 == 3 else row.imag
remove_imag_rows(tr)
return tr.reshape((-1, dims))
def transform_out(dims):
dims2 = (dims // 2 + 1)
tr = np.zeros((dims2, 4, dims))
idft = dft_half(dims).conj()
for i in range(dims2):
row = idft[i] if i == 0 or 2*i == dims else 2*idft[i]
tr[i, 0] = row.real
tr[i, 1] = -row.real
tr[i, 2] = -row.imag
tr[i, 3] = -row.imag
tr = tr.reshape(4*dims2, dims)
remove_imag_rows(tr)
# IDFT has a 1/D scaling factor
tr /= dims
return tr.T
def remove_imag_rows(tr):
"""Throw away imaginary row we don't need (since they're zero)"""
i =
|
np.arange(tr.shape[0])
|
numpy.arange
|
import numpy as np
import logging
import time
import math
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from config import config
from hyperparameters import all_models
import os
# return boolean arrays with length corresponding to n_samples
# the split is done based on the number of IDs
def split(ids, train, val, test):
assert (train+val+test == 1)
IDs = np.unique(ids)
num_ids = len(IDs)
# priority given to the test/val sets
test_split = math.ceil(test * num_ids)
val_split = math.ceil(val * num_ids)
train_split = num_ids - val_split - test_split
train = np.isin(ids, IDs[:train_split])
val =
|
np.isin(ids, IDs[train_split:train_split+val_split])
|
numpy.isin
|
from __future__ import print_function
import time
import gc
import numpy as np
import tensorflow as tf
from src.VGG16 import VGG16mo
from src.utils.utils import *
import cv2
from lap import lapjv
from src.utils.shape_context import ShapeContext
import matplotlib.pyplot as plt
class CNN(object):
def __init__(self):
self.height = 224
self.width = 224
self.shape = np.array([224.0, 224.0])
self.sift_weight = 2.0
self.cnn_weight = 1.0
self.max_itr = 200
self.tolerance = 1e-2
self.freq = 5 # k in the paper
self.epsilon = 0.5
self.omega = 0.5
self.beta = 2.0
self.lambd = 0.5
self.cnnph = tf.placeholder("float", [2, 224, 224, 3])
self.vgg = VGG16mo()
self.vgg.build(self.cnnph)
self.SC = ShapeContext()
def register(self, IX, IY):
# set parameters
tolerance = self.tolerance
freq = self.freq
epsilon = self.epsilon
omega = self.omega
beta = self.beta
lambd = self.lambd
# resize image
Xscale = 1.0 * np.array(IX.shape[:2]) / self.shape
Yscale = 1.0 * np.array(IY.shape[:2]) / self.shape
IX = cv2.resize(IX, (self.height, self.width))
IY = cv2.resize(IY, (self.height, self.width))
# CNN feature
# propagate the images through VGG16
IX = np.expand_dims(IX, axis=0)
IY = np.expand_dims(IY, axis=0)
cnn_input = np.concatenate((IX, IY), axis=0)
with tf.Session() as sess:
feed_dict = {self.cnnph: cnn_input}
D1, D2, D3 = sess.run([
self.vgg.pool3, self.vgg.pool4, self.vgg.pool5_1
], feed_dict=feed_dict)
# flatten
DX1, DY1 = np.reshape(D1[0], [-1, 256]), np.reshape(D1[1], [-1, 256])
DX2, DY2 = np.reshape(D2[0], [-1, 512]), np.reshape(D2[1], [-1, 512])
DX3, DY3 = np.reshape(D3[0], [-1, 512]), np.reshape(D3[1], [-1, 512])
# normalization
DX1, DY1 = DX1 / np.std(DX1), DY1 / np.std(DY1)
DX2, DY2 = DX2 / np.std(DX2), DY2 / np.std(DY2)
DX3, DY3 = DX3 / np.std(DX3), DY3 / np.std(DY3)
del D1, D2, D3
# compute feature space distance
PD1 = pairwise_distance(DX1, DY1)
PD2 = pd_expand(pairwise_distance(DX2, DY2), 2)
PD3 = pd_expand(pairwise_distance(DX3, DY3), 4)
PD = 1.414 * PD1 + PD2 + PD3
del DX1, DY1, DX2, DY2, DX3, DY3, PD1, PD2, PD3
seq = np.array([[i, j] for i in range(28) for j in range(28)], dtype='int32')
X = np.array(seq, dtype='float32') * 8.0 + 4.0
Y = np.array(seq, dtype='float32') * 8.0 + 4.0
# normalize
X = (X - 112.0) / 224.0
Y = (Y - 112.0) / 224.0
# prematch and select points
C_all, quality = match(PD)
tau_max = np.max(quality)
while np.where(quality >= tau_max)[0].shape[0] <= 128: tau_max -= 0.01
C = C_all[np.where(quality >= tau_max)]
cnt = C.shape[0]
# select prematched feature points
X, Y = X[C[:, 1]], Y[C[:, 0]]
PD = PD[np.repeat(np.reshape(C[:, 1], [cnt, 1]), cnt, axis=1),
np.repeat(np.reshape(C[:, 0], [1, cnt]), cnt, axis=0)]
N = X.shape[0]
M = X.shape[0]
assert M == N
# precalculation of feature match
C_all, quality = match(PD)
# compute \hat{\theta} and \delta
tau_min = np.min(quality)
tau_max = np.max(quality)
while np.where(quality >= tau_max)[0].shape[0] <= 0.5 * cnt: tau_max -= 0.01
tau = tau_max
delta = (tau_max - tau_min) / 10.0
SCX = self.SC.compute(X)
# initialization
Z = Y.copy()
GRB = gaussian_radial_basis(Y, beta)
A = np.zeros([M, 2])
sigma2 = init_sigma2(X, Y)
Pr = None
Q = 0
dQ = float('Inf')
itr = 1
# registration process
while itr < self.max_itr and abs(dQ) > tolerance and sigma2 > 1e-4:
Z_old = Z.copy()
Q_old = Q
# for every k iterations
if (itr - 1) % freq == 0:
# compute C^{conv}_{\theta}
C = C_all[np.where(quality >= tau)]
Lt = PD[C[:, 0], C[:, 1]]
maxLt = np.max(Lt)
if maxLt > 0: Lt = Lt / maxLt
L = np.ones([M, N])
L[C[:, 0], C[:, 1]] = Lt
# compute C^{geo}_{\theta}
SCZ = self.SC.compute(Z)
SC_cost = self.SC.cost(SCZ, SCX)
# compute C
L = L * SC_cost
# linear assignment
C = lapjv(L)[1]
# prior probability matrix
Pr = np.ones_like(PD) * (1.0 - epsilon) / N
Pr[np.arange(C.shape[0]), C] = 1.0
Pr = Pr / np.sum(Pr, axis=0)
tau = tau - delta
if tau < tau_min: tau = tau_min
# compute minimization
Po, P1, Np, tmp, Q = compute(X, Y, Z_old, Pr, sigma2, omega)
Q = Q + lambd / 2 * np.trace(np.dot(np.dot(A.transpose(), GRB), A))
# update variables
dP = np.diag(P1)
t1 = np.dot(dP, GRB) + lambd * sigma2 * np.eye(M)
t2 = np.dot(Po, X) - np.dot(dP, Y)
A = np.dot(
|
np.linalg.inv(t1)
|
numpy.linalg.inv
|
from algo.mrcnn import utils
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
import cv2
############################################################
# Dataset
############################################################
class PDL1NetDataset(utils.Dataset):
def __init__(self, class_map=None, active_inflammation_tag=False):
super().__init__(class_map)
self.active_inflammation_tag = active_inflammation_tag
def load_pdl1net_dataset(self, dataset_dir, subset, synthetic=False, real=False):
"""Load a subset of the PDL1 dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes. We have only one class to add.
self.add_class("PDL1", 1, "inflammation")
self.add_class("PDL1", 2, "negative")
self.add_class("PDL1", 3, "positive")
# if we decide to delete the next line reduce the number of classes in the config
self.add_class("PDL1", 4, "other")
# self.add_class("PDL1", 5, "air")
ids = [c["id"] for c in self.class_info]
names = [c["name"] for c in self.class_info]
self.class_name2id = dict(zip(names, ids))
# Train or validation dataset?
# TODO: change the path to the right one
# assert subset in ["train", "val"]
dataset_dir = os.path.join(dataset_dir, subset)
# Load annotations
# VGG Image Annotator saves each image in the form:
# { 'filename': '28503151_5b5b7ec140_b.jpg',
# 'regions': {
# '0': {
# 'region_attributes': {},
# 'shape_attributes': {
# 'all_points_x': [...],
# 'all_points_y': [...],
# 'name': 'polygon'}},
# ... more regions ...
# },
# 'size': 100202
# }
# We mostly care about the x and y coordinates of each region
# TODO: make sure the json has the right name
# ATTENTION! the parser will work only for via POLYGON segmented regions
# annotations = json.load(open(os.path.join(dataset_dir, "train_synth_via_json.json")))
if synthetic:
# full types (we use only positive, negative and other):
# type2class = {0: "other", 1: "inflammation", 2: "negative", 3: "positive", 4: "black-pad", 5: "air", 6: "cell", 7: "noise"}
type2class = {0: "other", 1: "other", 2: "negative", 3: "positive", 4: "other", 5: "other",
6: "other", 7: "other"}
images_path = os.path.join(dataset_dir, "images")
images_names = os.listdir(images_path)
labels_path = os.path.join(dataset_dir, "labels")
for image_name in images_names:
if "input_label" not in image_name:
label_name = image_name.split("_cells")[0] + ".png"
label_path = os.path.join(labels_path, label_name)
classes = list(type2class.values())
image_path = os.path.join(images_path, image_name)
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
# add air mask
gray = (1 / 256) * utils.rgb2gray(image)
air_mask = gray > 0.95
self.add_image(
"PDL1",
image_id=image_name, # use file name as a unique image id
path=image_path,
width=width, height=height,
synthetic=True,
label_path=label_path,
classes=classes,
air_mask=air_mask)
elif real:
dataset_dir = dataset_dir.split(subset)[0]
images_path = os.path.join(dataset_dir, "good")
images_names = os.listdir(images_path)
for image_name in images_names:
image_path = os.path.join(images_path, image_name)
# image = skimage.io.imread(image_path)
# height, width = image.shape[:2]
self.add_image(
"PDL1",
image_id=image_name, # use file name as a unique image id
path=image_path,
width=1024, height=1024, # assuming the patch crop size is 1024
real=True,
classes=[])
else:
json_dir = os.path.join(dataset_dir, "via_export_json.json")
annotations = json.load(open(json_dir))
annotations = list(annotations.values()) # don't need the dict keys
# The VIA tool saves images in the JSON even if they don't have any
# annotations. Skip unannotated images.
# annotations = [a for a in annotations if a['regions']]
type2class = {"1": "inflammation", "2": "negative", "3": "positive", "4": "other"} # yael's data
# type2class = {"1": "inflammation", "2": "negative", "3": "positive", "4": "other", "5": "air"} # yael's data
# type2class = {"1": "other", "2": "positive", "3": "positive", "4": "other"}
# type2class = {"inf": "inflammation", "neg": "negative", "pos": "positive", "other": "other"} #synthetic's data
# Add images
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions']]
# classes = [r['region_attributes']['category'] for r in a['regions']] # validate that a list of classes is obtained
classes = [r['region_attributes']['type'] for r in
a['regions']] # 'category' for synthetic data, 'type' for yael's data
classes = [type2class[c] for c in classes]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
# add air mask
gray = (1 / 256) * utils.rgb2gray(image)
air_mask = gray > 0.95
self.add_image(
"PDL1",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
classes=classes,
air_mask=air_mask)
def load_mask(self, image_id):
"""
Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a PDL1 dataset image, delegate to parent class.
info = self.image_info[image_id]
if info["source"] != "PDL1":
return super(self.__class__, self).load_mask(image_id)
if "synthetic" in info.keys() and info["synthetic"]:
label_path = info["label_path"]
labels = cv2.imread(label_path)
mask = np.zeros([info["height"], info["width"], len(info["classes"])], dtype=np.uint8)
for i in range(info["height"]):
for j in range(info["width"]):
class_type = labels[i, j, 0]
mask[i, j, class_type] = 1
mask_classes = [self.class_name2id[name] for name in info["classes"]]
mask_classes = np.array(mask_classes, dtype=np.int32)
# remove other from masks
mask = mask[:, :, mask_classes != self.class_name2id["other"]]
mask_classes = mask_classes[mask_classes != self.class_name2id["other"]]
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask, mask_classes
if "real" in info.keys() and info["real"]:
mask = np.zeros([info["height"], info["width"], len(info["classes"])], dtype=np.uint8)
return mask, np.array([])
# Convert polygons to a bitmap mask of shape
# [height, width, instance_count]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
dtype=np.uint8)
# mask = np.zeros([info["height"], info["width"], len(info["polygons"])+1],
# dtype=np.uint8)
# TODO: make sure no intersection are made between polygons
for i, p in enumerate(info["polygons"]):
# Get indexes of pixels inside the polygon and set them to 1
if 'all_points_y' not in p.keys() or 'all_points_x' not in p.keys():
continue
if p['all_points_y'] is None or p['all_points_x'] is None:
continue
# check if an element in the list is also a list
if any(isinstance(elem, list) for elem in p['all_points_y']) or any(
isinstance(elem, list) for elem in p['all_points_x']):
continue
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'], (info["height"], info["width"]))
mask[rr, cc, i] = 1
# filter air from mask
mask[:, :, i] = mask[:, :, i] * np.logical_not(info["air_mask"])
# add air class
# mask[..., -1] = info["air_mask"]
# mask_classes = [self.class_name2id[name] for name in self.class_names]
mask_classes = [self.class_name2id[name] for name in info["classes"]]
# mask_classes.append(5)
mask_classes = np.array(mask_classes, dtype=np.int32)
# clean masks intersections
# create united mask for each class
united_masks = np.zeros([info["height"], info["width"], self.num_classes])
for i in np.arange(self.num_classes):
masks_of_same_class = mask[:, :, mask_classes == (i + 1)]
for single_mask_index in
|
np.arange(masks_of_same_class.shape[2])
|
numpy.arange
|
import os
import numpy as np
import pytest
import shapely.wkt
from green_spaces.image_loaders import OrdnanceSurveyMapLoader, OrdnanceSurveyMapLoaderWithInfraRed
actual_image_loader_calls = 0
latitude_longitude_coord_system = "urn:ogc:def:crs:OGC:1.3:CRS84"
eastings_northings_coord_system = "urn:ogc:def:crs:EPSG::27700"
def extract_poly_coords(geom):
if geom.type == 'Polygon':
exterior_coords = geom.exterior.coords[:]
interior_coords = []
for interior in geom.interiors:
interior_coords += interior.coords[:]
elif geom.type == 'MultiPolygon':
exterior_coords = []
interior_coords = []
for part in geom:
epc = extract_poly_coords(part) # Recursive call
exterior_coords += epc['exterior_coords']
interior_coords += epc['interior_coords']
else:
raise ValueError('Unhandled geometry type: ' + repr(geom.type))
return {'exterior_coords': exterior_coords,
'interior_coords': interior_coords}
def test_OrdnanceSurveyMapLoader_raises_error_with_unknown_coordinate_system():
root_folder_path = 'root'
tile_size = 100
primary_cache_size = 1000000
crs_name = "unknown system"
loader_config = {'folder': root_folder_path, 'tile_size': tile_size, 'name': 'test_OS'}
with pytest.raises(ValueError) as e_info:
OrdnanceSurveyMapLoader(loader_config, crs_name, primary_cache_size, 0, None)
assert e_info.value.args[0] == f'crs_name="{crs_name}" is unsupported'
def test_OrdnanceSurveyMapLoader_supports_eastings_northings():
root_folder_path = 'root'
tile_size = 100
primary_cache_size = 1000000
loader_config = {'folder': root_folder_path, 'tile_size': tile_size, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, eastings_northings_coord_system, primary_cache_size, 0, None)
eastings_northings_geometry = shapely.wkt.loads('MULTIPOLYGON (((367220.85 170316.2, 367220.55 170316.3, '
'367205.423 170315.39, 367220.85 170316.2)))')
expected_tile_geometry = shapely.wkt.loads('MULTIPOLYGON (((367.22085 170.3162, 367.22055 170.3163, '
'367.205423 170.31539, 367.22085 170.3162)))')
tile_geometry = loader.calculate_tile_geometry(eastings_northings_geometry)
np.testing.assert_almost_equal(
extract_poly_coords(tile_geometry)['exterior_coords'],
extract_poly_coords(expected_tile_geometry)['exterior_coords'],
decimal=5
)
def test_OrdnanceSurveyMapLoader_supports_latitude_longitude():
root_folder_path = 'root'
tile_size = 100
primary_cache_size = 1000000
loader_config = {'folder': root_folder_path, 'tile_size': tile_size, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, primary_cache_size, 0, None)
eastings_northings_geometry = shapely.wkt.loads('MULTIPOLYGON (((-2.472899455869044 51.430893105324593, '
'-2.472903780332496 51.430893987034189, '
'-2.473121281324169 51.430884926567657, '
'-2.472899455869044 51.430893105324593)))')
expected_tile_geometry = shapely.wkt.loads('MULTIPOLYGON (((367.22085 170.3162, 367.22055 170.3163, '
'367.205423 170.31539, 367.22085 170.3162)))')
tile_geometry = loader.calculate_tile_geometry(eastings_northings_geometry)
np.testing.assert_almost_equal(
extract_poly_coords(tile_geometry)['exterior_coords'],
extract_poly_coords(expected_tile_geometry)['exterior_coords'],
decimal=5
)
def test_OrdnanceSurveyMapLoader_build_tile_file_name():
root_folder_path = 'root'
tile_size = 100
primary_cache_size = 1000000
loader_config = {'folder': root_folder_path, 'tile_size': tile_size, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, primary_cache_size, 0, None)
eastings = 702
northings = 345
expected_path = os.path.join('TH', 'TH04', 'TH0245.jpg')
actual_path = loader.build_tile_file_name(eastings, northings)
assert expected_path == actual_path
def test_OrdnanceSurveyMapLoader_retrieve_image_loads_rgb_as_bgr_red():
expected_image = np.array([
[[0, 0, 255], [0, 0, 255]],
[[0, 0, 255], [0, 0, 255]]
], dtype=np.uint8)
loader_config = {'folder': 'tests/data/images_RGB', 'tile_size': 2, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('red.png')
np.testing.assert_equal(actual_image, expected_image)
def test_OrdnanceSurveyMapLoader_retrieve_image_loads_rgb_as_bgr_green():
expected_image = np.array([
[[0, 255, 0], [0, 255, 0]],
[[0, 255, 0], [0, 255, 0]]
], dtype=np.uint8)
loader_config = {'folder': 'tests/data/images_RGB', 'tile_size': 2, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('green.png')
np.testing.assert_equal(actual_image, expected_image)
def test_OrdnanceSurveyMapLoader_retrieve_image_loads_rgb_as_bgr_blue():
expected_image = np.array([
[[255, 0, 0], [255, 0, 0]],
[[255, 0, 0], [255, 0, 0]]
], dtype=np.uint8)
loader_config = {'folder': 'tests/data/images_RGB', 'tile_size': 2, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('blue.png')
np.testing.assert_equal(actual_image, expected_image)
def test_OrdnanceSurveyMapLoader_retrieve_image_resizes_to_config():
expected_image = np.array([
[[255, 0, 0], [255, 0, 0], [255, 0, 0]],
[[255, 0, 0], [255, 0, 0], [255, 0, 0]],
[[255, 0, 0], [255, 0, 0], [255, 0, 0]]
], dtype=np.uint8)
loader_config = {'folder': 'tests/data/images_RGB', 'tile_size': 3, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoader(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('blue.png')
np.testing.assert_equal(actual_image, expected_image)
assert loader.warnings == ['Image "tests/data/images_RGB/blue.png" is sized (2, 2, 3) rather than (3, 3, 3))']
def test_OrdnanceSurveyMapLoaderWithInfraRed_retrieve_image_loads_rgb_cir_as_bgrir_blue():
expected_image = np.array([
[[255, 0, 0, 0], [255, 0, 0, 0]],
[[255, 0, 0, 0], [255, 0, 0, 0]]
], dtype=np.uint8)
loader_config = {'folder_RGB': 'tests/data/images_RGB',
'folder_CIR': 'tests/data/images_CIR',
'final_tile_size': 2, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoaderWithInfraRed(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('blue.png')
np.testing.assert_equal(actual_image, expected_image)
def test_OrdnanceSurveyMapLoaderWithInfraRed_retrieve_image_loads_rgb_cir_as_bgrir_blue_plus_ir():
expected_image = np.array([
[[255, 0, 0, 255], [255, 0, 0, 255]],
[[255, 0, 0, 255], [255, 0, 0, 255]]
], dtype=np.uint8)
loader_config = {'folder_RGB': 'tests/data/images_RGB',
'folder_CIR': 'tests/data/images_CIR',
'final_tile_size': 2, 'name': 'test_OS'}
loader = OrdnanceSurveyMapLoaderWithInfraRed(loader_config, latitude_longitude_coord_system, 0, 0)
actual_image = loader.retrieve_image('blue+ir.png')
|
np.testing.assert_equal(actual_image, expected_image)
|
numpy.testing.assert_equal
|
# coding: utf-8
# In[2]:
## Process AlpArray Switzerland for SKS-Splitting
## <NAME>
######################################################
######### LOAD IN THE MODULES
######################################################
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:75% !important; }</style>"))
import matplotlib.pyplot as plt
import numpy as np
import os
import obspy
from obspy import read
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from obspy.taup import TauPyModel
from obspy.geodetics.base import gps2dist_azimuth
from obspy.geodetics import locations2degrees
from obspy.taup import plot_travel_times
from obspy.geodetics import locations2degrees
from obspy.signal.rotate import rotate_ne_rt
from obspy.signal.polarization import particle_motion_odr
from obspy.signal.util import next_pow_2
import matplotlib.gridspec as gridspec
from scipy.optimize import curve_fit
from matplotlib.mlab import specgram
from scipy import stats
from itertools import izip_longest as zip_longest
#from itertools import zip_longest as zip_longest
from tqdm import tqdm
import time
import multiprocessing
import splitwavepy as sw
######################################################
###
######################################################
# In[123]:
### FUNCTIONS TO LOAD IN THE RESULTS
# def read_SKS_files(path,filename):
# filename = '{0}/{1}'.format(path,filename) #removed first two header lines
# with open(filename) as f:
# content = f.readlines()
# station = ['']*len(content)
# dt = ['']*len(content)
# dtlag = ['']*len(content)
# fast_dir = ['']*len(content)
# dfast_dir = ['']*len(content)
# for i in range(1,len(content)-1):
# data = zip_longest(*(x.split(' ') for x in content[i].splitlines()), fillvalue='\t')
# for row in zip(*data):
# new_data = tuple(np.nan if x == '' else x for x in row)
# line = new_data
# station[i] = line[0][1:-1]
# dt[i] = float(line[1][1:-2])
# dtlag[i] = float(line[2][1:-2])
# fast_dir[i] = float(line[3][1:-2])
# dfast_dir[i] = float(line[4][1:-2])
# station = np.asarray(station[1:-1])
# dt = np.asarray(dt[1:-1])
# dtlag = np.asarray(dtlag[1:-1])
# fast_dir = np.asarray(fast_dir[1:-1])
# ## convert from -90-90 to 0-180
# fast_dir = (fast_dir+180)%180
# dfast_dir = np.asarray(dfast_dir[1:-1])
# return station,dt,dtlag,fast_dir,dfast_dir
def calc_u_v(r,phi):
if (phi>=0 and phi<=90):
phi=90-phi
u = r*np.cos(np.deg2rad(phi))
v = r*np.sin(np.deg2rad(phi))
elif (phi>90 and phi<=180):
phi=180-phi
u = r*np.sin(np.deg2rad(phi))
v = -r*np.cos(np.deg2rad(phi))
elif (phi>180 and phi<=270):
phi=270-phi
u = -r*np.cos(np.deg2rad(phi))
v = -r*np.sin(np.deg2rad(phi))
elif (phi>270 and phi<=360):
phi=360-phi
u = -r*np.sin(np.deg2rad(phi))
v = r*np.cos(np.deg2rad(phi))
elif (phi>=-90 and phi<0):
phi=abs(phi)
u = -r*np.sin(np.deg2rad(phi))
v = r*np.cos(np.deg2rad(phi))
elif (phi>=-180 and phi<-90):
phi=180-abs(phi)
u = -r*np.sin(np.deg2rad(phi))
v = -r*np.cos(np.deg2rad(phi))
return u,v
def read_SKS_files(path,filename):
filename = '{0}/{1}'.format(path,filename) #removed first two header lines
with open(filename) as f:
content = f.readlines()
station = ['']*len(content)
dt = ['']*len(content)
dtlag = ['']*len(content)
fast_dir = ['']*len(content)
dfast_dir = ['']*len(content)
best_twin = ['']*len(content)
chi_phi = ['']*len(content)
chi_dt = ['']*len(content)
p_phi = ['']*len(content)
p_dt = ['']*len(content)
n_events = ['']*len(content)
for i in range(1,len(content)):
data = zip_longest(*(x.split(' ') for x in content[i].splitlines()), fillvalue='\t')
for row in zip(*data):
new_data = tuple(np.nan if x == '' else x for x in row)
line = new_data
station[i] = line[0][1:-1]
fast_dir[i] = float(line[1][1:-2])
dfast_dir[i] = float(line[2][1:-2])
dt[i] = float(line[3][1:-2])
dtlag[i] = float(line[4][1:-2])
best_twin[i] = float(line[5][1:-2])
chi_phi[i] = float(line[6][1:-2])
p_phi[i] = float(line[7][1:-2])
chi_dt[i] = float(line[8][1:-2])
p_dt[i] = float(line[9][1:-2])
n_events[i] = float(line[10][1:-1])
station = np.asarray(station[1:])
dt = np.asarray(dt[1:])
dtlag = np.asarray(dtlag[1:])
fast_dir =
|
np.asarray(fast_dir[1:])
|
numpy.asarray
|
from copy import deepcopy as _deepcopy
import numpy as _np
import pandas as _pd
from scipy import integrate as _integrate
from atmPy.aerosols.size_distribution import moments as _sizedist_moment_conversion
from atmPy.general import timeseries as _timeseries
from atmPy.general import vertical_profile as _vertical_profile
from atmPy.radiation.mie_scattering import bhmie as _bhmie
# import atmPy.aerosols.size_distribution.sizedistribution as _sizedistribution
from atmPy.aerosols.size_distribution import sizedistribution as _sizedistribution
import warnings as _warnings
# Todo: Docstring is wrong
# todo: This function can be sped up by breaking it apart. Then have OpticalProperties
# have properties that call the subfunction on demand
def size_dist2optical_properties(op, sd, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
# if not _np.any(sd.index_of_refraction):
# txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
# raise ValueError(txt)
# if not sd.sup_optical_properties_wavelength:
# txt = 'Please provied wavelength by setting the attribute sup_optical_properties_wavelength (in nm).'
# raise AttributeError(txt)
sd.parameters4reductions._check_opt_prop_param_exist()
wavelength = sd.parameters4reductions.wavelength.value
n = sd.parameters4reductions.refractive_index.value
mie_result = sd.parameters4reductions.mie_result.value
out = {}
sdls = sd.convert2numberconcentration()
index = sdls.data.index
dist_class = type(sdls).__name__
if dist_class not in ['SizeDist','SizeDist_TS','SizeDist_LS']:
raise TypeError('this distribution class (%s) can not be converted into optical property yet!'%dist_class)
# determin if index of refraction changes or if it is constant
if isinstance(n, _pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
if isinstance(mie_result, type(None)):
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
else:
mie = mie_result['mie']
angular_scatt_func = mie_result['angular_scatt_func']
out['mie_result'] = {'mie': mie, 'angular_scatt_func': angular_scatt_func}
if aod:
#todo: use function that does a the interpolation instead of the sum?!? I guess this can lead to errors when layers are very thick, since centers are used instea dof edges?
AOD_layer = _np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
scattCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
absCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)), dtype= _np.float32)
angular_scatt_func_effective = _pd.DataFrame()
asymmetry_parameter_LS = _np.zeros((len(sdls.data.index.values)))
#calculate optical properties for each line in the dataFrame
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values # picking a size distribution (either a layer or a point in time)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
absorption_coefficient = _get_coefficients(mie.absorption_crossection, laydata)
out['test.extcross'] = mie.extinction_crossection.copy()
out['test.extcoeff'] = extinction_coefficient.copy()
out['test.laydata'] = laydata
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
scattCoeffPerLayer[i] = scattering_coefficient
absCoeffPerLayer[i] = absorption_coefficient
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < _np.pi]
x_1p = x_2p[x_2p < _np.pi]
y_phase_func = y_1p * 4 * _np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * _integrate.simps(_np.cos(x_1p) * y_phase_func * _np.sin(x_1p), x_1p)
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
if aod:
out['AOD'] = AOD_layer[~ _np.isnan(AOD_layer)].sum()
out['AOD_layer'] = _pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = _pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
scattCoeff_perrow_perbin = _pd.DataFrame(scattCoeffPerLayer, index=index, columns=sdls.data.columns)
absCoeff_perrow_perbin = _pd.DataFrame(absCoeffPerLayer, index=index, columns=sdls.data.columns)
# if dist_class == 'SizeDist_TS':
# out['extCoeff_perrow_perbin'] = timeseries.TimeSeries_2D(extCoeff_perrow_perbin)
# if dist_class == 'SizeDist':
# out['extCoeff_perrow_perbin'] = _timeseries.TimeSeries(extCoeff_perrow_perbin)
# out['scattCoeff_perrow_perbin'] = _timeseries.TimeSeries(scattCoeff_perrow_perbin)
# out['absCoeff_perrow_perbin'] = _timeseries.TimeSeries(absCoeff_perrow_perbin)
# else:
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
out['scattCoeff_perrow_perbin'] = scattCoeff_perrow_perbin
out['absCoeff_perrow_perbin'] = absCoeff_perrow_perbin
out['parent_type'] = dist_class
out['asymmetry_param'] = _pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['bins'] = sdls.bins
out['binwidth'] = sdls.binwidth
out['distType'] = sdls.distributionType
out['angular_scatt_func'] = angular_scatt_func_effective.transpose()
### test values
out['mie_curve_ext'] = mie.extinction_crossection
out['mie_inst'] = mie
return out
def DEPRECATED_size_dist2optical_properties(sd, aod=False, noOfAngles=100):
"""
!!!Tis Docstring need fixn
Calculates the extinction crossection, AOD, phase function, and asymmetry Parameter for each layer.
plotting the layer and diameter dependent extinction coefficient gives you an idea what dominates the overall AOD.
Parameters
----------
wavelength: float.
wavelength of the scattered light, unit: nm
n: float.
Index of refraction of the scattering particles
noOfAngles: int, optional.
Number of scattering angles to be calculated. This mostly effects calculations which depend on the phase
function.
Returns
-------
OpticalProperty instance
"""
# if not _np.any(sd.index_of_refraction):
# txt = 'Refractive index is not specified. Either set self.index_of_refraction or set optional parameter n.'
# raise ValueError(txt)
# if not sd.sup_optical_properties_wavelength:
# txt = 'Please provied wavelength by setting the attribute sup_optical_properties_wavelength (in nm).'
# raise AttributeError(txt)
sd.optical_properties_settings._check()
wavelength = sd.optical_properties_settings.wavelength.value
n = sd.optical_properties_settings.refractive_index.value
out = {}
sdls = sd.convert2numberconcentration()
index = sdls.data.index
dist_class = type(sdls).__name__
if dist_class not in ['SizeDist','SizeDist_TS','SizeDist_LS']:
raise TypeError('this distribution class (%s) can not be converted into optical property yet!'%dist_class)
# determin if index of refraction changes or if it is constant
if isinstance(n, _pd.DataFrame):
n_multi = True
else:
n_multi = False
if not n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n,
noOfAngles=noOfAngles)
if aod:
#todo: use function that does a the interpolation instead of the sum?!? I guess this can lead to errors when layers are very thick, since centers are used instea dof edges?
AOD_layer = _np.zeros((len(sdls.layercenters)))
extCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
scattCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
absCoeffPerLayer = _np.zeros((len(sdls.data.index.values), len(sdls.bincenters)))
angular_scatt_func_effective = _pd.DataFrame()
asymmetry_parameter_LS = _np.zeros((len(sdls.data.index.values)))
#calculate optical properties for each line in the dataFrame
for i, lc in enumerate(sdls.data.index.values):
laydata = sdls.data.iloc[i].values # picking a size distribution (either a layer or a point in time)
if n_multi:
mie, angular_scatt_func = _perform_Miecalculations(_np.array(sdls.bincenters / 1000.), wavelength / 1000., n.iloc[i].values[0],
noOfAngles=noOfAngles)
extinction_coefficient = _get_coefficients(mie.extinction_crossection, laydata)
scattering_coefficient = _get_coefficients(mie.scattering_crossection, laydata)
absorption_coefficient = _get_coefficients(mie.absorption_crossection, laydata)
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
extCoeffPerLayer[i] = extinction_coefficient
scattCoeffPerLayer[i] = scattering_coefficient
absCoeffPerLayer[i] = absorption_coefficient
scattering_cross_eff = laydata * mie.scattering_crossection
pfe = (laydata * angular_scatt_func).sum(axis=1) # sum of all angular_scattering_intensities
x_2p = pfe.index.values
y_2p = pfe.values
# limit to [0,pi]
y_1p = y_2p[x_2p < _np.pi]
x_1p = x_2p[x_2p < _np.pi]
y_phase_func = y_1p * 4 * _np.pi / scattering_cross_eff.sum()
asymmetry_parameter_LS[i] = .5 * _integrate.simps(_np.cos(x_1p) * y_phase_func * _np.sin(x_1p), x_1p)
angular_scatt_func_effective[
lc] = pfe * 1e-12 * 1e6 # equivalent to extCoeffPerLayer # similar to _get_coefficients (converts everthing to meter)
if aod:
out['AOD'] = AOD_layer[~ _np.isnan(AOD_layer)].sum()
out['AOD_layer'] = _pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
extCoeff_perrow_perbin = _pd.DataFrame(extCoeffPerLayer, index=index, columns=sdls.data.columns)
scattCoeff_perrow_perbin = _pd.DataFrame(scattCoeffPerLayer, index=index, columns=sdls.data.columns)
absCoeff_perrow_perbin = _pd.DataFrame(absCoeffPerLayer, index=index, columns=sdls.data.columns)
# if dist_class == 'SizeDist_TS':
# out['extCoeff_perrow_perbin'] = timeseries.TimeSeries_2D(extCoeff_perrow_perbin)
if dist_class == 'SizeDist':
out['extCoeff_perrow_perbin'] = _timeseries.TimeSeries(extCoeff_perrow_perbin)
out['scattCoeff_perrow_perbin'] = _timeseries.TimeSeries(scattCoeff_perrow_perbin)
out['absCoeff_perrow_perbin'] = _timeseries.TimeSeries(absCoeff_perrow_perbin)
else:
out['extCoeff_perrow_perbin'] = extCoeff_perrow_perbin
out['scattCoeff_perrow_perbin'] = scattCoeff_perrow_perbin
out['absCoeff_perrow_perbin'] = absCoeff_perrow_perbin
# extCoeff_perrow = pd.DataFrame(extCoeff_perrow_perbin.sum(axis=1), columns=['ext_coeff'])
# if index.dtype == '<M8[ns]':
# out['extCoeff_perrow'] = timeseries.TimeSeries(extCoeff_perrow)
# else:
# out['extCoeff_perrow'] = extCoeff_perrow
out['parent_type'] = dist_class
out['asymmetry_param'] = _pd.DataFrame(asymmetry_parameter_LS, index=index,
columns=['asymmetry_param'])
# out['asymmetry_param_alt'] = pd.DataFrame(asymmetry_parameter_LS_alt, index=sdls.layercenters, columns = ['asymmetry_param_alt'])
# out['OptPropInstance']= OpticalProperties(out, self.bins)
out['wavelength'] = wavelength
out['index_of_refraction'] = n
out['bin_centers'] = sdls.bincenters
out['bins'] = sdls.bins
out['binwidth'] = sdls.binwidth
out['distType'] = sdls.distributionType
out['angular_scatt_func'] = angular_scatt_func_effective
# opt_properties = OpticalProperties(out, self.bins)
# opt_properties.wavelength = wavelength
# opt_properties.index_of_refractio = n
# opt_properties.angular_scatt_func = angular_scatt_func_effective # This is the formaer phase_fct, but since it is the angular scattering intensity, i changed the name
# opt_properties.parent_dist_LS = self
if dist_class == 'SizeDist_TS':
return OpticalProperties_TS(out, parent = sd)
elif dist_class == 'SizeDist_LS':
return OpticalProperties_VP(out, parent= sd)
return out
def hemispheric_backscattering(osf_df):
"""scattering into backwards hemisphere from angulare scattering intensity
Parameters
----------
osf_df: pandas DataFrame
This contains the angulare scattering intensity with column names giving the
angles in radiant
Returns
-------
pandas data frame with the scattering intensities
"""
import pdb
# pdb.set_trace()
def ang_scat_funk2bs(index,ol):
x = index #_np.deg2rad(index)
f = ol
# pdb.set_trace()
# my phase function goes all the way to two py
f = f[x < _np.pi]
x = x[x < _np.pi]
f_b = f[x >= _np.pi / 2.]
x_b = x[x >= _np.pi / 2.]
# pdb.set_trace()
res_b = 2 * _np.pi * _integrate.simps(f_b * _np.sin(x_b), x_b)
return res_b
bs = _np.zeros(osf_df.shape[0])
index = osf_df.columns
for i in range(osf_df.shape[0]):
ol = osf_df.iloc[i,:].values
bs[i] = ang_scat_funk2bs(index,ol)
bs = _pd.DataFrame(bs, index = osf_df.index)
return bs
def hemispheric_forwardscattering(osf_df):
"""scattering into forward hemisphere from angulare scattering intensity
Parameters
----------
osf_df: pandas DataFrame
This contains the angulare scattering intensity with column names giving the
angles in radiant
Returns
-------
pandas data frame with the scattering intensities
"""
def ang_scat_funk2fs(index,ol):
x = index #ol.index.values
f = ol
# my phase function goes all the way to two py
f = f[x < _np.pi]
x = x[x < _np.pi]
f_f = f[x < _np.pi / 2.]
x_f = x[x < _np.pi / 2.]
res_f = 2 * _np.pi * _integrate.simps(f_f * _np.sin(x_f), x_f)
return res_f
fs = _np.zeros(osf_df.shape[0])
index = osf_df.columns
for i in range(osf_df.shape[0]):
ol = osf_df.iloc[i,:].values
fs[i] = ang_scat_funk2fs(index,ol)
fs = _pd.DataFrame(fs, index = osf_df.index)
return fs
#Todo: bins are redundand
# Todo: some functions should be switched of
# todo: right now this for layer and time series, not ok
class OpticalProperties(object):
def __init__(self, parent):
self._parent_sizedist = parent
self.parameters = _sizedistribution._Parameters4Reductions_opt_prop(parent)
# self.asymmetry_param = data['asymmetry_param']
self._extinction_coeff = None
self._scattering_coeff = None
self._absorption_coeff = None
self._mie_result = None
self._hemispheric_backscattering = None
# self._hemispheric_backscattering_ratio = None
self._hemispheric_forwardscattering = None
# self._hemispheric_forwardscattering_ratio = None
self._optical_porperties_pv = None
self.mean_effective_diameter = None
self._parent_type = type(parent).__name__
self.bins = parent.bins
self.binwidth = parent.binwidth
self.distributionType = parent.distributionType
# self._data_period = self.parent_sizedist._data_period
@property
def extinction_coeff_per_bin(self):
self._optical_porperties
return self._extinction_coeff_per_bin
@property
def scattering_coeff_per_bin(self):
self._optical_porperties
return self._scattering_coeff_per_bin
@property
def absorption_coeff_per_bin(self):
self._optical_porperties
return self._absorption_coeff_per_bin
@property
def angular_scatt_func(self):
self._optical_porperties
return self._angular_scatt_func
@property
def _optical_porperties(self):
if not self._optical_porperties_pv:
data = size_dist2optical_properties(self, self._parent_sizedist)
self._optical_porperties_pv = data
####
self._extinction_coeff_per_bin = data['extCoeff_perrow_perbin']
self._extinction_coeff = _pd.DataFrame(self._extinction_coeff_per_bin.sum(axis=1), columns=['ext_coeff_m^1'])
####
self._scattering_coeff_per_bin = data['scattCoeff_perrow_perbin']
self._scattering_coeff = _pd.DataFrame(self._scattering_coeff_per_bin.sum(axis=1), columns=['scatt_coeff_m^1'])
#####
self._absorption_coeff_per_bin = data['absCoeff_perrow_perbin']
self._absorption_coeff = _pd.DataFrame(self._absorption_coeff_per_bin.sum(axis=1), columns=['abs_coeff_m^1'])
####
self._angular_scatt_func = data['angular_scatt_func']
####
self.parameters.mie_result = data['mie_result']
return self._optical_porperties_pv
@property
def absorption_coeff(self):
self._optical_porperties
return self._absorption_coeff
@property
def extinction_coeff(self):
self._optical_porperties
return self._extinction_coeff
@property
def scattering_coeff(self):
self._optical_porperties
return self._scattering_coeff
@property
def hemispheric_backscattering(self):
if not _np.any(self._hemispheric_backscattering):
self._hemispheric_backscattering = hemispheric_backscattering(self.angular_scatt_func)
self._hemispheric_backscattering_ratio = _pd.DataFrame(
self._hemispheric_backscattering.iloc[:, 0] / self._scattering_coeff.iloc[:, 0],
columns=['hem_back_scatt_ratio'])
return self._hemispheric_backscattering
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
# if not _np.any(self._hemispheric_backscattering_ratio):
# self._hemispheric_backscattering_ratio = _pd.DataFrame(self.hemispheric_backscattering.iloc[:,0] / self._scattering_coeff.iloc[:,0], columns=['hem_beck_scatt_ratio'])
return self._hemispheric_backscattering_ratio
@property
def hemispheric_forwardscattering(self):
if not _np.any(self._hemispheric_forwardscattering):
self._hemispheric_forwardscattering = hemispheric_forwardscattering(self.angular_scatt_func)
self._hemispheric_forwardscattering_ratio = _pd.DataFrame(self._hemispheric_forwardscattering.iloc[:, 0] / self._scattering_coeff.iloc[:, 0],
columns=['hem_forward_scatt_ratio'])
return self._hemispheric_forwardscattering
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
# if not _np.any(self._hemispheric_forwardscattering_ratio):
# self._hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
return self._hemispheric_forwardscattering_ratio
def convert_between_moments(self, moment, verbose = False):
return _sizedist_moment_conversion.convert(self,moment, verbose = verbose)
def copy(self):
return _deepcopy(self)
#Todo: bins are redundand
# Todo: some functions should be switched of
# todo: right now this for layer and time series, not ok
class DEPRECATEDOpticalProperties(object):
def __init__(self, data, parent = None):
self.parent_sizedist = parent
self.data_orig = data
self.wavelength = data['wavelength']
self.index_of_refraction = data['index_of_refraction']
self.extinction_coeff_per_bin = data['extCoeff_perrow_perbin']
self.scattering_coeff_per_bin = data['scattCoeff_perrow_perbin']
self.absorption_coeff_per_bin = data['absCoeff_perrow_perbin']
self.angular_scatt_func = data['angular_scatt_func']
# self.asymmetry_param = data['asymmetry_param']
self.__extinction_coeff_sum_along_d = None
self.__scattering_coeff_sum_along_d = None
self.__absorption_coeff_sum_along_d = None
self.mean_effective_diameter = None
self._parent_type = data['parent_type']
self.bins = data['bins']
self.binwidth = data['binwidth']
self.distributionType = data['distType']
# self._data_period = self.parent_sizedist._data_period
# @property
# def mean_effective_diameter(self):
# if not self.__mean_effective_diameter:
# # todo: remove
# @property
# def extinction_coeff_sum_along_d(self):
# _warnings.warn('extinction_coeff_sum_along_d is deprecated and will be removed in future versions. Use extingction_coeff instead')
# if not _np.any(self.__extinction_coeff_sum_along_d):
# data = self.extinction_coeff_per_bin.data.sum(axis = 1)
# df = _pd.DataFrame()
# df['ext_coeff_m^1'] = data
# if self._parent_type == 'SizeDist_TS':
# self.__extinction_coeff_sum_along_d = _timeseries.TimeSeries(df)
# elif self._parent_type == 'SizeDist':
# self.__extinction_coeff_sum_along_d = df
# else:
# raise TypeError('not possible for this distribution type')
# self.__extinction_coeff_sum_along_d._data_period = self._data_period
# return self.__extinction_coeff_sum_along_d
#
# # todo: remove
# @extinction_coeff_sum_along_d.setter
# def extinction_coeff_sum_along_d(self, data):
# self.__extinction_coeff_sum_along_d = data
@property
def extinction_coeff(self):
if not _np.any(self.__extinction_coeff_sum_along_d):
data = self.extinction_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['ext_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__extinction_coeff_sum_along_d = _timeseries.TimeSeries(df)
self.__extinction_coeff_sum_along_d._data_period = self._data_period
elif self._parent_type == 'SizeDist_LS':
self.__extinction_coeff_sum_along_d = _vertical_profile.VerticalProfile(df)
elif self._parent_type == 'SizeDist':
self.__extinction_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
return self.__extinction_coeff_sum_along_d
@extinction_coeff.setter
def extinction_coeff(self, data):
self.__extinction_coeff_sum_along_d = data
@property
def scattering_coeff(self):
if not _np.any(self.__scattering_coeff_sum_along_d):
data = self.scattering_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['scatt_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__scattering_coeff_sum_along_d = _timeseries.TimeSeries(df)
elif self._parent_type == 'SizeDist':
self.__scattering_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
self.__scattering_coeff_sum_along_d._data_period = self._data_period
return self.__scattering_coeff_sum_along_d
@scattering_coeff.setter
def scattering_coeff(self, data):
self.__scattering_coeff_sum_along_d = data
@property
def absorption_coeff(self):
if not _np.any(self.__absorption_coeff_sum_along_d):
data = self.absorption_coeff_per_bin.data.sum(axis=1)
df = _pd.DataFrame()
df['abs_coeff_m^1'] = data
if self._parent_type == 'SizeDist_TS':
self.__absorption_coeff_sum_along_d = _timeseries.TimeSeries(df)
elif self._parent_type == 'SizeDist':
self.__absorption_coeff_sum_along_d = df
else:
raise TypeError('not possible for this distribution type')
self.__absorption_coeff_sum_along_d._data_period = self._data_period
return self.__absorption_coeff_sum_along_d
@absorption_coeff.setter
def absorption_coeff(self, data):
self.__absorption_coeff_sum_along_d = data
@property
def hemispheric_backscattering(self):
if not self.__hemispheric_backscattering:
self.__hemispheric_backscattering = hemispheric_backscattering(self.angular_scatt_func)
return self.__hemispheric_backscattering
@property
def hemispheric_forwardscattering(self):
if not self.__hemispheric_forwardscattering:
self.__hemispheric_forwardscattering = hemispheric_forwardscattering(self.angular_scatt_func)
return self.__hemispheric_forwardscattering
@property
def hemispheric_backscattering_ratio(self):
if not self.__hemispheric_backscattering_ratio:
self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.scattering_coeff
return self.__hemispheric_backscattering_ratio
@property
def hemispheric_forwardscattering_ratio(self):
if not self.hemispheric_forwardscattering_ratio:
self.__hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
return self.__hemispheric_forwardscattering_ratio
def convert_between_moments(self, moment, verbose = False):
return _sizedist_moment_conversion.convert(self,moment, verbose = verbose)
def copy(self):
return _deepcopy(self)
class OpticalProperties_TS(OpticalProperties):
@property
def hemispheric_forwardscattering(self):
super().hemispheric_forwardscattering
return _timeseries.TimeSeries(self._hemispheric_forwardscattering, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_backscattering(self):
super().hemispheric_backscattering
return _timeseries.TimeSeries(self._hemispheric_backscattering, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
return _timeseries.TimeSeries(self._hemispheric_backscattering_ratio, sampling_period = self._parent_sizedist._data_period)
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
return _timeseries.TimeSeries(self._hemispheric_forwardscattering_ratio, sampling_period = self._parent_sizedist._data_period)
@property
def absorption_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._absorption_coeff, sampling_period = self._parent_sizedist._data_period)
@property
def extinction_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._extinction_coeff, sampling_period = self._parent_sizedist._data_period)
@property
def scattering_coeff(self):
self._optical_porperties
return _timeseries.TimeSeries(self._scattering_coeff, sampling_period = self._parent_sizedist._data_period)
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.extinction_coeff_per_bin = _timeseries.TimeSeries_2D(self.extinction_coeff_per_bin)
# self.extinction_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.scattering_coeff_per_bin = _timeseries.TimeSeries_2D(self.scattering_coeff_per_bin)
# self.scattering_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.absorption_coeff_per_bin = _timeseries.TimeSeries_2D(self.absorption_coeff_per_bin)
# self.absorption_coeff_per_bin._data_period = self.parent_sizedist._data_period
#
# self.angular_scatt_func = _timeseries.TimeSeries_2D(self.angular_scatt_func.transpose())
# self.angular_scatt_func._data_period = self.parent_sizedist._data_period
#
# self.__hemispheric_forwardscattering = None
# self.__hemispheric_backscattering = None
# self.__hemispheric_backscattering_ratio = None
# self.__hemispheric_forwardscattering_ratio = None
# self._data_period = self.parent_sizedist._data_period
#
#
#
# @property
# def hemispheric_backscattering(self):
# if not self.__hemispheric_backscattering:
# out = hemispheric_backscattering(self.angular_scatt_func.data)
# out = _timeseries.TimeSeries(out)
# out._data_period = self.angular_scatt_func._data_period
# self.__hemispheric_backscattering = out
# return self.__hemispheric_backscattering
#
# @hemispheric_backscattering.setter
# def hemispheric_backscattering(self,value):
# self.__hemispheric_backscattering = value
#
# @property
# def hemispheric_forwardscattering(self):
# if not self.__hemispheric_forwardscattering:
# out = hemispheric_forwardscattering(self.angular_scatt_func.data)
# out = _timeseries.TimeSeries(out)
# out._data_period = self.angular_scatt_func._data_period
# self.__hemispheric_forwardscattering = out
# return self.__hemispheric_forwardscattering
#
#
# @hemispheric_forwardscattering.setter
# def hemispheric_forwardscattering(self, value):
# self.__hemispheric_forwardscattering = value
#
# @property
# def hemispheric_backscattering_ratio(self):
# """ratio between backscattering and overall scattering"""
# if not self.__hemispheric_backscattering_ratio:
# # self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.extinction_coeff
# self.__hemispheric_backscattering_ratio = self.hemispheric_backscattering / self.scattering_coeff
# return self.__hemispheric_backscattering_ratio
#
# @property
# def hemispheric_forwardscattering_ratio(self):
# """ratio between forwardscattering and over scattering"""
# if not self.__hemispheric_forwardscattering_ratio:
# self.__hemispheric_forwardscattering_ratio = self.hemispheric_forwardscattering / self.scattering_coeff
# return self.__hemispheric_forwardscattering_ratio
class OpticalProperties_VP(OpticalProperties):
@property
def hemispheric_forwardscattering(self):
super().hemispheric_forwardscattering
return _vertical_profile.VerticalProfile(self._hemispheric_forwardscattering)
@property
def hemispheric_backscattering(self):
super().hemispheric_backscattering
return _vertical_profile.VerticalProfile(self._hemispheric_backscattering)
@property
def hemispheric_backscattering_ratio(self):
self.hemispheric_backscattering
return _vertical_profile.VerticalProfile(self._hemispheric_backscattering_ratio)
@property
def hemispheric_forwardscattering_ratio(self):
self.hemispheric_forwardscattering
return _vertical_profile.VerticalProfile(self._hemispheric_forwardscattering_ratio)
@property
def absorption_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._absorption_coeff)
@property
def extinction_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._extinction_coeff)
@property
def scattering_coeff(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._scattering_coeff)
@property
def _optical_porperties(self):
if not self._optical_porperties_pv:
super()._optical_porperties
layerthickness = self._parent_sizedist.layerbounderies[:, 1] - self._parent_sizedist.layerbounderies[:, 0]
aod_per_bin_per_layer = self._parent_sizedist.optical_properties.extinction_coeff_per_bin.multiply(layerthickness, axis=0)
aod_per_layer = _pd.DataFrame(aod_per_bin_per_layer.sum(axis=1), columns=['aod_per_layer'])
self._aod = aod_per_layer.values.sum()
aod_cumulative = aod_per_layer.iloc[::-1].cumsum()
aod_cumulative.rename(columns={'aod_per_layer': 'aod'}, inplace=True)
self._aod_cumulative = aod_cumulative
return self._optical_porperties_pv
@property
def aod(self):
self._optical_porperties
return self._aod
@property
def aod_cumulative(self):
self._optical_porperties
return _vertical_profile.VerticalProfile(self._aod_cumulative)
class DEPRECATED_OpticalProperties_VP(OpticalProperties):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extinction_coeff_per_bin = _vertical_profile.VerticalProfile_2D(self.extinction_coeff_per_bin)
self.aerosol_optical_depth_cumulative_VP = _vertical_profile.VerticalProfile(self._data_dict['AOD_cum'])
self.asymmetry_param_VP = _vertical_profile.VerticalProfile(self._data_dict['asymmetry_param'])
self.aerosol_optical_depth_cumulative = self._data_dict['AOD']
class ExtinctionCoeffVerticlProfile(_vertical_profile.VerticalProfile):
def __init__(self, ext, parent, wavelength, index_of_refraction):
super(ExtinctionCoeffVerticlProfile, self).__init__(ext)
self.parent = parent
self.wavelength = wavelength
self.index_of_refraction = index_of_refraction
def plot(self, *args, **kwargs):
a = super(ExtinctionCoeffVerticlProfile, self).plot(*args, **kwargs)
a.set_xlabel('Extinction coefficient (m$^{-1}$)')
return a
def _perform_Miecalculations(diam, wavelength, n, noOfAngles=100.):
"""
Performs Mie calculations
Parameters
----------
diam: NumPy array of floats
Array of diameters over which to perform Mie calculations; units are um
wavelength: float
Wavelength of light in um for which to perform calculations
n: complex
Ensemble complex index of refraction
Returns
panda DataTable with the diameters as the index and the mie_scattering results in the different collumns
total_extinction_coefficient: this takes the sum of all particles crossections of the particular diameter in a qubic
meter. This is in principle the AOD of an L
"""
diam = _np.asarray(diam)
extinction_efficiency = _np.zeros(diam.shape)
scattering_efficiency = _np.zeros(diam.shape)
absorption_efficiency = _np.zeros(diam.shape)
extinction_crossection = _np.zeros(diam.shape)
scattering_crossection = _np.zeros(diam.shape)
absorption_crossection = _np.zeros(diam.shape)
# phase_function_natural = pd.DataFrame()
angular_scattering_natural = _pd.DataFrame()
# extinction_coefficient = np.zeros(diam.shape)
# scattering_coefficient = np.zeros(diam.shape)
# absorption_coefficient = np.zeros(diam.shape)
# Function for calculating the size parameter for wavelength l and radius r
sp = lambda r, l: 2. * _np.pi * r / l
for e, d in enumerate(diam):
radius = d / 2.
# print('sp(radius, wavelength)', sp(radius, wavelength))
# print('n', n)
# print('d', d)
mie = _bhmie.bhmie_hagen(sp(radius, wavelength), n, noOfAngles, diameter=d)
values = mie.return_Values_as_dict()
extinction_efficiency[e] = values['extinction_efficiency']
# print("values['extinction_crosssection']",values['extinction_crosssection'])
scattering_efficiency[e] = values['scattering_efficiency']
absorption_efficiency[e] = values['extinction_efficiency'] - values['scattering_efficiency']
extinction_crossection[e] = values['extinction_crosssection']
scattering_crossection[e] = values['scattering_crosssection']
absorption_crossection[e] = values['extinction_crosssection'] - values['scattering_crosssection']
# phase_function_natural[d] = values['phaseFct_natural']['Phase_function_natural'].values
angular_scattering_natural[d] = mie.get_angular_scatt_func().natural.values
# print('\n')
# phase_function_natural.index = values['phaseFct_natural'].index
angular_scattering_natural.index = mie.get_angular_scatt_func().index
out = _pd.DataFrame(index=diam)
out['extinction_efficiency'] = _pd.Series(extinction_efficiency, index=diam)
out['scattering_efficiency'] = _pd.Series(scattering_efficiency, index=diam)
out['absorption_efficiency'] = _pd.Series(absorption_efficiency, index=diam)
out['extinction_crossection'] = _pd.Series(extinction_crossection, index=diam)
out['scattering_crossection'] = _pd.Series(scattering_crossection, index=diam)
out['absorption_crossection'] = _pd.Series(absorption_crossection, index=diam)
return out, angular_scattering_natural
def _get_coefficients(crossection, cn):
"""
Calculates the extinction, scattering or absorbtion coefficient
Parameters
----------
crosssection: float
Units are um^2
cn: float
Particle concentration in cc^-1
Returns
--------
coefficient in m^-1. This is the differential AOD.
"""
crossection = crossection.copy()
cn = cn.copy()
crossection *= 1e-12 # conversion from um^2 to m^2
cn *= 1e6 # conversion from cm^-3 to m^-3
coefficient = cn * crossection
# print('cn',cn)
# print('crossection', crossection)
# print('coeff',coefficient)
# print('\n')
return coefficient
def vertical_profile2accumulative_AOD(timeseries):
data = timeseries.data.copy()
data.dropna(inplace = True)
accu_aod =
|
_np.zeros(data.shape)
|
numpy.zeros
|
import numpy as np
from holoviews.element.util import compute_edges
from holoviews.element.comparison import ComparisonTestCase
class TestComputeEdges(ComparisonTestCase):
"""
Tests for compute_edges function.
"""
def setUp(self):
self.array1 = [.5, 1.5, 2.5]
self.array2 = [.5, 1.0000001, 1.5]
self.array3 = [1, 2, 4]
def test_simple_edges(self):
self.assertEqual(compute_edges(self.array1),
np.array([0, 1, 2, 3]))
def test_close_edges(self):
self.assertEqual(compute_edges(self.array2),
|
np.array([0.25, 0.75, 1.25, 1.75])
|
numpy.array
|
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING
from qtpy import QtWidgets as QtW
from qtpy import uic
from qtpy.QtCore import QSize, Qt
from qtpy.QtGui import QIcon
from typing_extensions import Literal
from useq import MDASequence
if TYPE_CHECKING:
from pymmcore_plus import RemoteMMCore
# daq
import mcsim.expt_ctrl.daq
from mcsim.expt_ctrl.program_sim_odt import get_sim_odt_sequence
# dmd
from mcsim.expt_ctrl import dlp6500
import numpy as np
import time
import datetime
import zarr
ICONS = Path(__file__).parent / "icons"
OBJECTIVE_DEVICE = "Objective"
# Once the PR #43 is merged, we pass the objective device to this variable
@dataclass
class SequenceMeta:
mode: Literal["mda"] | Literal["explorer"] = ""
split_channels: bool = False
should_save: bool = False
file_name: str = ""
save_dir: str = ""
save_pos: bool = False
class _MultiDUI:
UI_FILE = str(Path(__file__).parent / "_ui" / "sim_odt_gui.ui")
# The UI_FILE above contains these objects:
save_groupBox: QtW.QGroupBox
fname_lineEdit: QtW.QLineEdit
dir_lineEdit: QtW.QLineEdit
browse_save_Button: QtW.QPushButton
channel_groupBox: QtW.QGroupBox
channel_tableWidget: QtW.QTableWidget # TODO: extract
add_ch_Button: QtW.QPushButton
clear_ch_Button: QtW.QPushButton
remove_ch_Button: QtW.QPushButton
time_groupBox: QtW.QGroupBox
timepoints_spinBox: QtW.QSpinBox
interval_spinBox: QtW.QSpinBox
time_comboBox: QtW.QComboBox
cx_spinBox: QtW.QSpinBox
sx_spinBox: QtW.QSpinBox
cy_spinBox: QtW.QSpinBox
sy_spinBox: QtW.QSpinBox
stack_groupBox: QtW.QGroupBox
z_tabWidget: QtW.QTabWidget
step_size_doubleSpinBox: QtW.QDoubleSpinBox
n_images_label: QtW.QLabel
# TopBottom
set_top_Button: QtW.QPushButton
set_bottom_Button: QtW.QPushButton
z_top_doubleSpinBox: QtW.QDoubleSpinBox
z_bottom_doubleSpinBox: QtW.QDoubleSpinBox
z_range_topbottom_doubleSpinBox: QtW.QDoubleSpinBox
# RangeAround
zrange_spinBox: QtW.QSpinBox
range_around_label: QtW.QLabel
# AboveBelow
above_doubleSpinBox: QtW.QDoubleSpinBox
below_doubleSpinBox: QtW.QDoubleSpinBox
z_range_abovebelow_doubleSpinBox: QtW.QDoubleSpinBox
show_dataset_checkBox: QtW.QCheckBox
run_Button: QtW.QPushButton
pause_Button: QtW.QPushButton
cancel_Button: QtW.QPushButton
sim_exposure_SpinBox: QtW.QDoubleSpinBox
odt_exposure_SpinBox: QtW.QDoubleSpinBox
odt_frametime_SpinBox: QtW.QDoubleSpinBox
odt_circbuff_SpinBox: QtW.QDoubleSpinBox
daq_dt_doubleSpinBox: QtW.QDoubleSpinBox
#
stage_groupBox: QtW.QGroupBox
stage_tableWidget: QtW.QTableWidget
add_pos_Button: QtW.QPushButton
remove_pos_Button: QtW.QPushButton
clear_pos_Button: QtW.QPushButton
def setup_ui(self):
uic.loadUi(self.UI_FILE, self) # load QtDesigner .ui file
self.pause_Button.hide()
self.cancel_Button.hide()
# button icon
self.run_Button.setIcon(QIcon(str(ICONS / "play-button_1.svg")))
self.run_Button.setIconSize(QSize(20, 0))
class SimOdtWidget(QtW.QWidget, _MultiDUI):
# metadata associated with a given experiment
SEQUENCE_META: dict[MDASequence, SequenceMeta] = {}
def __init__(self, mmcores: list[RemoteMMCore], daq: mcsim.expt_ctrl.daq.daq, dmd: dlp6500,
viewer, parent=None, otf_data=None, affine_data=None):
mmcore = mmcores[0]
self._mmcores = mmcores
self._mmc = self._mmcores[0]
# todo: would it be better to pass through the main frame instead of these various attributes?
# todo: or maybe create a python microscope object which contains mmc, daq, DMD?
self.daq = daq
self.dmd = dmd
self.affine_data = affine_data # todo: this is not a good way of passing this data around
self.otf_data = otf_data
self.viewer = viewer
super().__init__(parent)
self.setup_ui()
self.pause_Button.released.connect(self._mmc.toggle_pause)
self.cancel_Button.released.connect(self._mmc.cancel)
# todo: maybe all of this stuff should go in a configuration file?
# initial value for ROI
self.sx_spinBox.setValue(801)
self.cx_spinBox.setValue(1024)
self.sy_spinBox.setValue(511)
self.cy_spinBox.setValue(1024)
# default value for exposure times
self.odt_exposure_SpinBox.setValue(3.)
self.sim_exposure_SpinBox.setValue(100.)
self.odt_frametime_SpinBox.setValue(8.7)
self.odt_circbuff_SpinBox.setValue(3.)
self.daq_dt_doubleSpinBox.setValue(26)
# connect buttons
self.add_pos_Button.clicked.connect(self.add_position)
self.remove_pos_Button.clicked.connect(self.remove_position)
self.clear_pos_Button.clicked.connect(self.clear_positions)
self.add_ch_Button.clicked.connect(self.add_channel)
self.remove_ch_Button.clicked.connect(self.remove_channel)
self.clear_ch_Button.clicked.connect(self.clear_channel)
self.browse_save_Button.clicked.connect(self.set_multi_d_acq_dir)
self.run_Button.clicked.connect(self._on_run_clicked)
# connect for z stack
self.set_top_Button.clicked.connect(self._set_top)
self.set_bottom_Button.clicked.connect(self._set_bottom)
self.z_top_doubleSpinBox.valueChanged.connect(self._update_topbottom_range)
self.z_bottom_doubleSpinBox.valueChanged.connect(self._update_topbottom_range)
self.zrange_spinBox.valueChanged.connect(self._update_rangearound_label)
self.above_doubleSpinBox.valueChanged.connect(self._update_abovebelow_range)
self.below_doubleSpinBox.valueChanged.connect(self._update_abovebelow_range)
self.z_range_abovebelow_doubleSpinBox.valueChanged.connect(
self._update_n_images
)
self.zrange_spinBox.valueChanged.connect(self._update_n_images)
self.z_range_topbottom_doubleSpinBox.valueChanged.connect(self._update_n_images)
self.step_size_doubleSpinBox.valueChanged.connect(self._update_n_images)
self.z_tabWidget.currentChanged.connect(self._update_n_images)
self.stack_groupBox.toggled.connect(self._update_n_images)
# events
mmcore.events.sequenceStarted.connect(self._on_mda_started)
mmcore.events.sequenceFinished.connect(self._on_mda_finished)
mmcore.events.sequencePauseToggled.connect(self._on_mda_paused)
def _set_enabled(self, enabled: bool):
self.save_groupBox.setEnabled(enabled)
self.channel_groupBox.setEnabled(enabled)
self.time_groupBox.setEnabled(enabled)
self.stack_groupBox.setEnabled(enabled)
def _set_top(self):
self.z_top_doubleSpinBox.setValue(self._mmc.getZPosition())
def _set_bottom(self):
self.z_bottom_doubleSpinBox.setValue(self._mmc.getZPosition())
def _update_topbottom_range(self):
self.z_range_topbottom_doubleSpinBox.setValue(
abs(self.z_top_doubleSpinBox.value() - self.z_bottom_doubleSpinBox.value())
)
def _update_rangearound_label(self, value):
self.range_around_label.setText(f"-{value/2} µm <- z -> +{value/2} µm")
def _update_abovebelow_range(self):
self.z_range_abovebelow_doubleSpinBox.setValue(
self.above_doubleSpinBox.value() + self.below_doubleSpinBox.value()
)
def _update_n_images(self):
step = self.step_size_doubleSpinBox.value()
# set what is the range to consider depending on the z_stack mode
if self.z_tabWidget.currentIndex() == 0:
range = self.z_range_topbottom_doubleSpinBox.value()
if self.z_tabWidget.currentIndex() == 1:
range = self.zrange_spinBox.value()
if self.z_tabWidget.currentIndex() == 2:
range = self.z_range_abovebelow_doubleSpinBox.value()
self.n_images_label.setText(f"{round((range / step) + 1)}")
def _on_mda_started(self, sequence):
self._set_enabled(False)
self.pause_Button.show()
self.cancel_Button.show()
self.run_Button.hide()
def _on_mda_finished(self, sequence):
self._set_enabled(True)
self.pause_Button.hide()
self.cancel_Button.hide()
self.run_Button.show()
def _on_mda_paused(self, paused):
self.pause_Button.setText("GO" if paused else "PAUSE")
# add, remove, clear channel table
def add_channel(self):
presets = self.daq.presets
if len(presets) > 0:
idx = self.channel_tableWidget.rowCount()
self.channel_tableWidget.insertRow(idx)
# create a combo_box for channels in the table
self.channel_comboBox = QtW.QComboBox(self)
self.mode_comboBox = QtW.QComboBox(self)
pks = list(presets.keys())
self.channel_comboBox.addItems(pks)
self.channel_tableWidget.setCellWidget(idx, 0, self.channel_comboBox)
self.channel_tableWidget.setCellWidget(idx, 1, self.mode_comboBox)
self.channel_comboBox.currentTextChanged.connect(self._on_channel_changed)
def _on_channel_changed(self):
dmd_cmap = self.dmd.presets
for ii in range(self.channel_tableWidget.rowCount()):
ch = self.channel_tableWidget.cellWidget(ii, 0).currentText()
# clear old modes
self.channel_tableWidget.cellWidget(ii, 1).clear()
# add new modes
modes = list(dmd_cmap[ch].keys())
self.channel_tableWidget.cellWidget(ii, 1).addItems(modes)
def remove_channel(self):
# remove selected position
rows = {r.row() for r in self.channel_tableWidget.selectedIndexes()}
for idx in sorted(rows, reverse=True):
self.channel_tableWidget.removeRow(idx)
def clear_channel(self):
# clear all positions
self.channel_tableWidget.clearContents()
self.channel_tableWidget.setRowCount(0)
def set_multi_d_acq_dir(self):
# set the directory
self.dir = QtW.QFileDialog(self)
self.dir.setFileMode(QtW.QFileDialog.DirectoryOnly)
self.save_dir = QtW.QFileDialog.getExistingDirectory(self.dir)
self.dir_lineEdit.setText(self.save_dir)
self.parent_path = Path(self.save_dir)
def _get_zstack_params(self):
znow = self._mmc.getZPosition()
if self.stack_groupBox.isChecked():
step = self.step_size_doubleSpinBox.value()
if self.z_tabWidget.currentIndex() == 0:
top = self.z_top_doubleSpinBox.value()
bottom = self.z_bottom_doubleSpinBox.value()
elif self.z_tabWidget.currentIndex() == 1:
range = self.zrange_spinBox.value()
top = znow + range / 2
bottom = znow - range / 2
elif self.z_tabWidget.currentIndex() == 2:
above = self.above_doubleSpinBox.value()
below = self.below_doubleSpinBox.value()
top = znow + above
bottom = znow - below
nz = (top - bottom) // step
zpos = bottom + np.arange(nz) * step
return zpos
# add, remove, clear, move_to positions table
def add_position(self):
dev_loaded = list(self._mmc.getLoadedDevices())
if len(dev_loaded) > 1:
x = self._mmc.getXPosition()
y = self._mmc.getYPosition()
x_txt = QtW.QTableWidgetItem(str(x))
y_txt = QtW.QTableWidgetItem(str(y))
x_txt.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
y_txt.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
idx = self.stage_tableWidget.rowCount()
self.stage_tableWidget.insertRow(idx)
self.stage_tableWidget.setItem(idx, 0, QtW.QTableWidgetItem(x_txt))
self.stage_tableWidget.setItem(idx, 1, QtW.QTableWidgetItem(y_txt))
def remove_position(self):
# remove selected position
rows = {r.row() for r in self.stage_tableWidget.selectedIndexes()}
for idx in sorted(rows, reverse=True):
self.stage_tableWidget.removeRow(idx)
def clear_positions(self):
# clear all positions
self.stage_tableWidget.clearContents()
self.stage_tableWidget.setRowCount(0)
def _on_run_clicked(self):
# saving
if self.save_groupBox.isChecked() and not (
self.fname_lineEdit.text() and Path(self.dir_lineEdit.text()).is_dir()):
raise ValueError("Select a filename and a valid directory.")
if self.save_groupBox.isChecked():
subdir = self.fname_lineEdit.text()
save_path = Path(self.dir_lineEdit.text()) / subdir / "sim_odt.zarr"
# make sure save path is unique
if save_path.exists():
ii = 1
while save_path.exists():
save_path = Path(self.dir_lineEdit.text()) / Path(f"{subdir:s}_{ii:d}") / "sim_odt.zarr"
ii += 1
else:
save_path = None
subdir = None
mmc1 = self._mmcores[0]
mmc2 = self._mmcores[1]
if len(self._mmc.getLoadedDevices()) < 2:
raise ValueError("Load a cfg file first.")
# ##############################
# grab sequence information from GUI
# ##############################
channels = [self.channel_tableWidget.cellWidget(c, 0).currentText() for c in range(self.channel_tableWidget.rowCount())]
channels_modes = [self.channel_tableWidget.cellWidget(c, 1).currentText() for c in range(self.channel_tableWidget.rowCount())]
sim_channels = [ch for ch in channels if ch != "odt"]
nsim_channels = len(sim_channels)
modes = [chm if ch == "odt" else "default" for ch, chm in zip(channels, channels_modes)]
exposure_tms_sim = self.sim_exposure_SpinBox.value()
exposure_tms_odt = self.odt_exposure_SpinBox.value()
min_odt_frame_time_ms = self.odt_frametime_SpinBox.value()
odt_circ_buffer_mb = int(np.round(self.odt_circbuff_SpinBox.value() * 1e3))
dt = int(np.round(self.daq_dt_doubleSpinBox.value())) * 1e-6
# ##############################
# time lapse
# ##############################
do_time_lapse = self.time_groupBox.isChecked()
if do_time_lapse:
ntimes = self.timepoints_spinBox.value()
interval_ms = self.interval_spinBox.value()
else:
ntimes = 1
interval_ms = 0.
# ##############################
# parameter scan
# ##############################
nparams = 1
# ##############################
# xy-positions
# ##############################
do_position_scan = self.stage_groupBox.isChecked() and self.stage_tableWidget.rowCount() > 0
positions = []
if do_position_scan:
for r in range(self.stage_tableWidget.rowCount()):
positions.append([float(self.stage_tableWidget.item(r, 0).text()),
float(self.stage_tableWidget.item(r, 1).text())])
else:
positions.append([float(mmc1.getXPosition()),
float(mmc1.getYPosition())])
npositions = len(positions)
# ##############################
# zstack
# ##############################
# get current z-position info
z_now = mmc1.getZPosition()
z_volts_start = self.daq.last_known_analog_val[self.daq.analog_line_names["z_stage"]]
do_zstack = self.stack_groupBox.isChecked()
if do_zstack:
zpositions = self._get_zstack_params()
nz = len(zpositions)
# get focus device info
focus_dev = mmc1.getFocusDevice()
focus_dev_props = mmc1.getDeviceProperties(focus_dev)
guess_calibration_um_per_v = (float(focus_dev_props["Upper Limit"]) - float(focus_dev_props["Lower Limit"])) / 10
# guess voltages to reach desired positions
dzs = zpositions - z_now
z_volts_guesses = z_volts_start + dzs / guess_calibration_um_per_v
print("z-start position was %0.3fV" % z_volts_start)
print("z guess calibration = %0.3fum/V" % guess_calibration_um_per_v)
print("z volts guesses= ", end="")
print(z_volts_guesses)
if np.any(z_volts_guesses < -5) or
|
np.any(z_volts_guesses > 5)
|
numpy.any
|
"""General utility functions"""
import os
import json
import logging
import random
import numpy as np
import pandas as pd
import datetime
from sklearn import metrics
import time
from PIL import Image
import cv2
from collections import defaultdict, deque, OrderedDict
import seaborn as sns
import matplotlib.pyplot as plt
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torchvision.transforms import autoaugment, transforms
from torchvision.transforms.functional import InterpolationMode
class Params:
"""Class that loads hyperparameters from a json file.
Example:
```
params = Params(json_path)
print(params.learning_rate)
params.learning_rate = 0.5 # change the value of learning_rate in params
```
"""
def __init__(self, json_path):
self.update(json_path)
def save(self, json_path):
"""Saves parameters to json file"""
with open(json_path, "w") as f:
json.dump(self.__dict__, f, indent=4)
def update(self, json_path):
"""Loads parameters from json file"""
with open(json_path) as f:
params = json.load(f)
self.__dict__.update(params)
def __str__(self) -> str:
return str(self.__dict__)
@property
def dict(self):
"""Gives dict-like access to Params instance by `params.dict['learning_rate']`"""
return self.__dict__
def datasets_to_df(ds_path: str):
"""
Convert dataset folder to pandas dataframe format
Args:
ds_path (string): Path to dataset
Returns:
pd.DataFrame : A pandas dataframe containing paths to dataset and labels.
"""
if not os.path.exists(ds_path):
raise FileNotFoundError(f"Dataset directory not found: {ds_path}")
raise NotImplementedError("Implement this method")
# return pd.DataFrame(data, columns=["file", "label"]) --> return same dataframe to be consistent
def plot_hist(history: dict):
"""
Plot training and validation accuracy and losses
Args:
history: Dict containing training loss, acc and val loss, acc
"""
# summarize history for accuracy
plt.plot(history["train_acc"])
plt.plot(history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history["train_loss"])
plt.plot(history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "val"], loc="upper left")
plt.show()
def img_display(img, means, stds):
"""
Convert normalized image to display unnormalized image
"""
img = img.numpy()
img = np.transpose(img, (1, 2, 0))
# unnormalize
img = img *
|
np.array(stds)
|
numpy.array
|
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_curve
from scipy.optimize import brentq
from scipy.interpolate import interp1d
import os
import yaml
import numpy as np
import torch
import torch.nn as nn
from torch.utils import data
from model_RawNet import RawNet
def keras_lr_decay(step, decay = 0.00005):
return 1./(1.+decay*step)
def init_weights(m):
print(m)
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.0001)
elif isinstance(m, nn.BatchNorm1d):
pass
else:
if hasattr(m, 'weight'):
torch.nn.init.kaiming_normal_(m.weight, a=0.01)
else:
print('no weight',m)
def train_model(model, device, db_gen, optimizer, epoch):
model.train()
with tqdm(total = len(db_gen), ncols = 70) as pbar:
for idx_ct, (m_batch, m_label) in enumerate(db_gen):
if bool(parser['do_lr_decay']):
if parser['lr_decay'] == 'keras': lr_scheduler.step()
m_batch = m_batch.to(device)
m_label= m_label.to(device)
output = model(m_batch, m_label) #output
'''
#for future updates including bc_loss and h_loss
if bool(parser['mg']):
norm = torch.norm(model.module.fc2_gru.weight, dim=1, keepdim = True) / (5. ** 0.5)
normed_weight = torch.div(model.module.fc2_gru.weight, norm)
else:
norm = torch.norm(model.fc2_gru.weight, dim=1, keepdim = True) / (5. ** 0.5)
normed_weight = torch.div(model.fc2_gru.weight, norm)
'''
cce_loss = criterion(output, m_label)
# bc_loss, h_loss currently removed.
loss = cce_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx_ct % 100 == 0:
for p in optimizer.param_groups:
lr_cur = p['lr']
#print('lr_cur', lr_cur)
break
pbar.set_description('epoch%d,cce:%.3f, cur_lr:%.6f'%(epoch, cce_loss,float(lr_cur)))
pbar.update(1)
def evaluate_model(mode, model, db_gen, device, l_utt, save_dir, epoch, l_trial):
if mode not in ['val', 'eval']: raise ValueError('mode should be either "val" or "eval"')
model.eval()
with torch.set_grad_enabled(False):
#1st, extract speaker embeddings.
l_embeddings = []
with tqdm(total = len(db_gen), ncols = 70) as pbar:
for m_batch in db_gen:
code = model(x = m_batch, is_test=True)
l_embeddings.extend(code.cpu().numpy()) #>>> (batchsize, codeDim)
pbar.update(1)
d_embeddings = {}
if not len(l_utt) == len(l_embeddings):
print(len(l_utt), len(l_embeddings))
exit()
for k, v in zip(l_utt, l_embeddings):
d_embeddings[k] = v
#2nd, calculate EER
y_score = [] # score for each sample
y = [] # label for each sample
if mode == 'val':
f_res = open(save_dir + 'results/epoch%s.txt'%(epoch), 'w')
else:
f_res = open(save_dir + 'results/eval.txt', 'w')
for line in l_trial:
trg, utt_a, utt_b = line.strip().split(' ')
y.append(int(trg))
y_score.append(cos_sim(d_embeddings[utt_a], d_embeddings[utt_b]))
f_res.write('{score} {target}\n'.format(score=y_score[-1],target=y[-1]))
f_res.close()
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
print('eer', eer)
return eer
def cos_sim(a,b):
return np.dot(a,b) / (np.linalg.norm(a) * np.linalg.norm(b))
def get_val_utts(l_val_trial):
l_utt = []
for line in l_val_trial:
_, utt_a, utt_b = line.strip().split(' ')
if utt_a not in l_utt: l_utt.append(utt_a)
if utt_b not in l_utt: l_utt.append(utt_b)
return l_utt
def get_utt_list(src_dir):
'''
Designed for VoxCeleb
'''
l_utt = []
for r, ds, fs in os.walk(src_dir):
base = '/'.join(r.split('/')[-2:])+'/'
for f in fs:
if f[-3:] != 'npy':
continue
l_utt.append(base+f[:-4])
return l_utt
def get_label_dic_Voxceleb(l_utt):
d_label = {}
idx_counter = 0
for utt in l_utt:
spk = utt.split('/')[0]
if spk not in d_label:
d_label[spk] = idx_counter
idx_counter += 1
return d_label
class Dataset_VoxCeleb2(data.Dataset):
def __init__(self, list_IDs, base_dir, nb_time = 0, labels = {}, cut = True, return_label = True, pre_emp = True):
'''
self.list_IDs : list of strings (each string: utt key)
self.labels : dictionary (key: utt key, value: label integer)
self.nb_time : integer, the number of timesteps for each mini-batch
cut : (boolean) adjust utterance duration for mini-batch construction
return_label : (boolean)
pre_emp : (boolean) do pre-emphasis with coefficient = 0.97
'''
self.list_IDs = list_IDs
self.nb_time = nb_time
self.base_dir = base_dir
self.labels = labels
self.cut = cut
self.return_label = return_label
self.pre_emp = pre_emp
if self.cut and self.nb_time == 0: raise ValueError('when adjusting utterance length, "nb_time" should be input')
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
try:
X = np.load(self.base_dir+ID+'.npy')
except:
raise ValueError('%s'%ID)
if self.pre_emp: X = self._pre_emphasis(X)
if self.cut:
nb_time = X.shape[1]
if nb_time > self.nb_time:
start_idx = np.random.randint(low = 0,
high = nb_time - self.nb_time)
X = X[:, start_idx:start_idx+self.nb_time]
elif nb_time < self.nb_time:
nb_dup = int(self.nb_time / nb_time) + 1
X = np.tile(X, (1, nb_dup))[:, :self.nb_time]
else:
X = X
if not self.return_label:
return X
y = self.labels[ID.split('/')[0]]
return X, y
def _pre_emphasis(self, x):
'''
Pre-emphasis for single channel input
'''
return np.asarray(x[:,1:] - 0.97 * x[:, :-1], dtype=np.float32)
def make_validation_trial(l_utt, nb_trial, dir_val_trial):
f_val_trial = open(dir_val_trial, 'w')
#trg trial: 1, non-trg: 0
nb_trg_trl = int(nb_trial / 2)
d_spk_utt = {}
#make a dictionary that has keys as speakers
for utt in l_utt:
spk = utt.split('/')[0]
if spk not in d_spk_utt: d_spk_utt[spk] = []
d_spk_utt[spk].append(utt)
l_spk = list(d_spk_utt.keys())
#print('nb_spk: %d'%len(l_spk))
#compose trg trials
selected_spks = np.random.choice(l_spk, size=nb_trg_trl, replace=True)
for spk in selected_spks:
l_cur = d_spk_utt[spk]
utt_a, utt_b = np.random.choice(l_cur, size=2, replace=False)
f_val_trial.write('1 %s %s\n'%(utt_a, utt_b))
#compose non-trg trials
for i in range(nb_trg_trl):
spks_cur = np.random.choice(l_spk, size=2, replace = False)
utt_a =
|
np.random.choice(d_spk_utt[spks_cur[0]], size=1)
|
numpy.random.choice
|
#import pandas as pd
import numpy as np
import random
import permutation_test.ap as ap
#import warnings
from operator import mul
from fractions import Fraction
import functools
from operator import itemgetter
import collections
from numpy.random import normal
def permutationtest(data, ref_data, detailed=False, n_combinations_max=20000, verbose=True, n_bins=None):
'''
<NAME>, Image and Data Analysis Facility, DZNE Bonn, Germany
christoph.moehl(at)dzne.de
Implementation of Fisher's permutation test.
If detailed is False, only (two-sided) p_value is returned,
i.e. the probability that data is not different from ref_data
If detailed is True, one-sided p values and histogram data of
mean differences is returned in a dict:
hist_data: distribution of mean differences for all permutations
p_value: two sided p_value (the probability that data is not
different from ref_data )
p_value_lower_than: the probability that mean of data is not
lower than mean of ref_data
p_value_greater_than: the probability that mean of data is
not grater than mean of ref_data
If the number of possible combinations is grater than n_combinations_max,
a random subsample of size n_combinations_max is taken for histogram calculation.
According to following publications:
<NAME>. (1935). The design of experiments. 1935.
Oliver and Boyd, Edinburgh.
<NAME>. (2004). Permutation methods: a basis for exact inference.
Statistical Science, 19(4), 676-685
'''
mean_diffs = getMeanDiffListForAllPermutations(data, ref_data\
, n_combinations_max=n_combinations_max)
print('nr of mean diffs: ' + str(len(mean_diffs)))
freq, vals, edges = getHistogramFreqAndCenters(mean_diffs, n_bins=n_bins)
if (not is_list_of_tuples(data)) and (not is_list_of_tuples(ref_data)):
#if data without error
mean_diff = getDiffOfMean(data, ref_data)
p_value_lower_than, p_value_greater_than = calc_pval(mean_diff, freq, vals)
else:
#if data with error
freq_s, _, _ = getHistForDatWithErr(data, ref_data, edges)
p_value_lower_than, p_value_greater_than = calc_pval_with_err(freq_s, freq, vals)
mean_diff = np.average(vals, weights=freq_s) #ca. mean diff for report
# cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
# greater_than_index = mean_diff > vals
# lower_than_index = mean_diff < vals
# if lower_than_index.all() and not greater_than_index.all():
# p_value_lower_than = 0
# p_value_greater_than = 1
# elif not lower_than_index.all() and greater_than_index.all():
# p_value_lower_than = 1
# p_value_greater_than = 0
# else:
# p_value_lower_than = cum_freq[lower_than_index][0]
# # print 'cum_freq[lower_than_index] :' + str(cum_freq[lower_than_index])
# p_value_greater_than = 1-cum_freq[greater_than_index][-1]
p_value = min((p_value_lower_than, p_value_greater_than))
if verbose:
p = ap.AFigure()
print('\n\n Distribution of mean differences')
print(p.plot(vals, freq, marker = '*'))
print('mean difference of tested dataset: ' + str(mean_diff))
print('p_value: ' + str(p_value))
print('p_lower_than (probability that mean of test data is not lower than mean of ref data): ' + str(p_value_lower_than))
print('p_value_greater_than (probability that mean of test data is not greater than mean of ref data): ' + str(p_value_greater_than))
if detailed:
result = {'hist_data' : (freq, vals)\
, 'mean_difference' : mean_diff\
, 'p_value' : p_value\
, 'p_value_lower_than' : p_value_lower_than\
, 'p_value_greater_than' : p_value_greater_than\
}
return result
return p_value
def calc_pval_with_err(freq_s, freq, vals):
bin_width = getBinWidth(vals)
cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
weights = freq_s * freq
p_value_lower_than = np.average(cum_freq, weights=weights)
p_value_greater_than = 1-p_value_lower_than
return p_value_lower_than, p_value_greater_than
def calc_pval(mean_diff, freq, vals):
bin_width = getBinWidth(vals)
#lower_edges = vals-bin_width*0.5
cum_freq = np.cumsum(freq) * bin_width #cumulative histogram values
#meandiff_index =
greater_than_index = mean_diff > vals
lower_than_index = mean_diff < vals
if lower_than_index.all() and not greater_than_index.all():
p_value_lower_than = 0
p_value_greater_than = 1
elif not lower_than_index.all() and greater_than_index.all():
p_value_lower_than = 1
p_value_greater_than = 0
else:
p_value_lower_than = cum_freq[lower_than_index][0]
# print 'cum_freq[lower_than_index] :' + str(cum_freq[lower_than_index])
p_value_greater_than = 1-cum_freq[greater_than_index][-1]
return p_value_lower_than, p_value_greater_than
def getBinWidth(vals):
if len(vals)<2:
return 1
return vals[1]-vals[0]
# def getPermutations(lst, num=None):
# if num is None:
# return list(itertools.permutations(lst, len(lst)))
# return list(itertools.permutations(lst, num))
def getHistForDatWithErr(lst_1, lst_2, bin_edges, n_samples=10000):
mdiffs = [getDiffOfMeanRandomized(lst_1, lst_2) for i in range(n_samples)]
freq, bin_centers, edge = getHistogramFreqAndCenters(mdiffs, bin_edges=bin_edges)
return freq, bin_centers, edge
def getDiffOfMeanRandomized(lst_1, lst_2):
'''
randomized diff of mean for list of tuples (vel, error)
is calculated.
'''
if not is_list_of_tuples(lst_1) or not is_list_of_tuples(lst_2):
raise ValueError('input must be lists of tuples but is %s and %s'\
, lst_1, lst_2)
lst_1r, lst_2r = randomize_permutation_data((lst_1, lst_2))
return getDiffOfMean(lst_1r, lst_2r)
def getDiffOfMean(lst_1, lst_2):
'''
result = mean(lst_1) - mean(lst_2)
'''
if (len(lst_1)==0) or (len(lst_2)==0):
raise ValueError('empty list')
out = np.nanmean(lst_1) - np.nanmean(lst_2)
if np.isnan(out):
raise ValueError('one or more of the average values are NaN')
return out
def is_list_of_tuples(data, n=2):
try:
nr_el = list(map(len, data))
is_list_of_tuples = (np.array(nr_el)==n).all()
except:
is_list_of_tuples = False
return is_list_of_tuples
def check_data_format(permutations):
'''
returns 1 if data points are single values
returns 2 if the data points are tuples with (value,error)
returns 0 if data structure is not valid
'''
has_tuples = False
has_single_vals = False
for perm in permutations:
if len(perm)!=2:
#only 2 groups allowed
return 0
for group in perm:
for d in group:
if isinstance(d, collections.Iterable):
if len(d)!=2:
return 0
has_tuples = True
else:
has_single_vals = True
if has_tuples and not has_single_vals:
return 2
if not has_tuples and has_single_vals:
return 1
if has_tuples and has_single_vals:
return 0
def randomize_data_point(datapoint):
val = datapoint[0]
err = datapoint[1]
return normal(val, err)
def randomize_permutation_data(perm):
a = [randomize_data_point(dat) for dat in perm[0]]
b = [randomize_data_point(dat) for dat in perm[1]]
return (a, b)
def getMeanDiffListForAllPermutations(lst_1, lst_2, n_combinations_max = 20000):
'''
lst_1 and lst_2 are fused and from all possible permutations,
mean differences are calculated.
if lst_1 and lst_2 are tuples (second value= statistical error)
, data is randomized, i.e. (val, err) tuple
is transformed into a normal distributed random variable with mu=val and sigma=err
'''
perms = getPerms(lst_1 + lst_2, len(lst_1), n_combinations_max=n_combinations_max)
format = check_data_format(perms)
if format == 1:
#if single values
mean_diffs = [getDiffOfMean(perm[0], perm[1]) for perm in perms]
return mean_diffs
if format == 2:
#if (value,error) tuples
perms_rand = [randomize_permutation_data(perm) for perm in perms]
mean_diffs = [getDiffOfMean(perm[0], perm[1]) for perm in perms_rand]
return mean_diffs
def permutations(n, g):
'''
returns a generator of permutations
n : number of elements (integer)
g : list of values to permute
example:
In [27]: import permutation_test as p
In [28]: perm_generator = p.permutations(3,[10,11,12,13])
In [29]: perms = list(perm_generator)
In [30]: perms
Out[30]: [[10, 11, 12], [10, 11, 13], [10, 12, 13], [11, 12, 13]]
'''
if n == 0:
yield []
for j, x in enumerate(g):
for v in permutations(n-1, g[j+1:]):
yield [x] + v
def getPerms(dat, n_of_group_a, n_combinations_max = 20000):
'''
combined permutation for 2 lists
dat: list to be permuted
n_of_group_a: index for splitting the list
If nr of possible combinations exceeds n_combinations_max, a random subsample
of size n_combinations_max is chosen.
example:
In [33]: p.getPerms([1,2,3,4,5],3)
Out[33]:
[([1, 2, 3], [4, 5]),
([1, 2, 4], [3, 5]),
([1, 2, 5], [3, 4]),
([1, 3, 4], [2, 5]),
([1, 3, 5], [2, 4]),
([1, 4, 5], [2, 3]),
([2, 3, 4], [1, 5]),
([2, 3, 5], [1, 4]),
([2, 4, 5], [1, 3]),
([3, 4, 5], [1, 2])]
'''
dat_index = range(len(dat))
#nr of possible combinations
n_combinations = nCk(len(dat_index), n_of_group_a)
if n_combinations < n_combinations_max:
perm_list = list(permutations(n_of_group_a, dat_index))
else:
perms_iter = permutations(n_of_group_a, dat_index)
print('taking random subsample of size %s from %s possible permutations' % (n_combinations_max, n_combinations))
perm_list = iter_sample_fast(perms_iter, n_combinations_max)
combi = []
for perm in perm_list:
group_b = [dat[i] for i in dat_index if not i in perm]
if len(group_b) == 0:
raise('gp')
group_a = list(itemgetter(*perm)(dat))
combi.append((group_a, group_b))
return combi
def calc_bin_number(data):
'''
optimal bin number according to Freedman-Diaconis rule
'''
if len(np.unique(data))==1:
#if all values in the dataset are identical
return 1
#inter quartile range
iqr = np.percentile(data, 75) -
|
np.percentile(data, 25)
|
numpy.percentile
|
# Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
from medpy.metric.binary import dc
import pandas as pd
from IPython.display import display, HTML
from medseg.common_utils.measure import hd, hd_2D_stack, asd, volumesimilarity, VolumeSimIndex
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) +
label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu =
|
np.nanmean(iu)
|
numpy.nanmean
|
# -*- coding: utf-8 -*-
"""
Utilities for working with related individuals (crosses, families, etc.).
See also the examples at:
- http://nbviewer.ipython.org/github/alimanfoo/anhima/blob/master/examples/ped.ipynb
""" # noqa
from __future__ import division, print_function, absolute_import
# third party dependencies
import numpy as np
import numexpr as ne
import pandas
# internal dependencies
import anhima.gt
# constants to represent inheritance states
INHERIT_UNDETERMINED = 0
INHERIT_PARENT1 = 1
INHERIT_PARENT2 = 2
INHERIT_NONSEG_REF = 3
INHERIT_NONSEG_ALT = 4
INHERIT_NONPARENTAL = 5
INHERIT_PARENT_MISSING = 6
INHERIT_MISSING = 7
INHERITANCE_STATES = range(8)
INHERITANCE_LABELS = ('undetermined', 'parent1', 'parent2', 'non-seg ref',
'non-seg alt', 'non-parental', 'parent missing',
'missing')
def diploid_inheritance(parent_diplotype, gamete_haplotypes):
"""
Determine the transmission of parental alleles to a set of gametes.
Parameters
----------
parent_diplotype : array_like, shape (n_variants, 2)
An array of phased genotypes for a single diploid individual, where
each element of the array is an integer corresponding to an allele
index (-1 = missing, 0 = reference allele, 1 = first alternate allele,
2 = second alternate allele, etc.).
gamete_haplotypes : array_like, shape (n_variants, n_gametes)
An array of haplotypes for a set of gametes derived from the given
parent, where each element of the array is an integer corresponding
to an allele index (-1 = missing, 0 = reference allele, 1 = first
alternate allele, 2 = second alternate allele, etc.).
Returns
-------
inheritance : ndarray, uint8, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance, where 1 =
inheritance from first parental haplotype, 2 = inheritance from second
parental haplotype, 3 = inheritance of reference allele from parent
that is homozygous for the reference allele, 4 = inheritance of
alternate allele from parent that is homozygous for the alternate
allele, 5 = non-parental allele, 6 = parental genotype is missing,
7 = gamete allele is missing.
"""
# normalise inputs
parent_diplotype = np.asarray(parent_diplotype)
assert parent_diplotype.ndim == 2
assert parent_diplotype.shape[1] == 2
gamete_haplotypes = np.asarray(gamete_haplotypes)
assert gamete_haplotypes.ndim == 2
# convenience variables
parent1 = parent_diplotype[:, 0, np.newaxis] # noqa
parent2 = parent_diplotype[:, 1, np.newaxis] # noqa
gamete_is_missing = gamete_haplotypes < 0
parent_is_missing = np.any(parent_diplotype < 0, axis=1)
parent_is_hom_ref = anhima.gt.is_hom_ref(parent_diplotype)[:, np.newaxis] # noqa
parent_is_het = anhima.gt.is_het(parent_diplotype)[:, np.newaxis] # noqa
parent_is_hom_alt = anhima.gt.is_hom_alt(parent_diplotype)[:, np.newaxis] # noqa
# need this for broadcasting, but also need to retain original for later
parent_is_missing_bc = parent_is_missing[:, np.newaxis] # noqa
# N.B., use numexpr below where possible to avoid temporary arrays
# utility variable, identify allele calls where inheritance can be
# determined
callable = ne.evaluate('~gamete_is_missing & ~parent_is_missing_bc') # noqa
callable_seg = ne.evaluate('callable & parent_is_het') # noqa
# main inheritance states
inherit_parent1 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent1)'
)
inherit_parent2 = ne.evaluate(
'callable_seg & (gamete_haplotypes == parent2)'
)
nonseg_ref = ne.evaluate(
'callable & parent_is_hom_ref & (gamete_haplotypes == parent1)'
)
nonseg_alt = ne.evaluate(
'callable & parent_is_hom_alt & (gamete_haplotypes == parent1)'
)
nonparental = ne.evaluate(
'callable & (gamete_haplotypes != parent1)'
' & (gamete_haplotypes != parent2)'
)
# record inheritance states
# N.B., order in which these are set matters
inheritance = np.zeros_like(gamete_haplotypes, dtype='u1')
inheritance[inherit_parent1] = INHERIT_PARENT1
inheritance[inherit_parent2] = INHERIT_PARENT2
inheritance[nonseg_ref] = INHERIT_NONSEG_REF
inheritance[nonseg_alt] = INHERIT_NONSEG_ALT
inheritance[nonparental] = INHERIT_NONPARENTAL
inheritance[parent_is_missing] = INHERIT_PARENT_MISSING
inheritance[gamete_is_missing] = INHERIT_MISSING
return inheritance
def diploid_mendelian_error_biallelic(parental_genotypes, progeny_genotypes):
"""Implementation of function to find Mendelian errors optimised for
biallelic variants.
"""
# recode genotypes for convenience
parental_genotypes_012 = anhima.gt.as_012(parental_genotypes)
progeny_genotypes_012 = anhima.gt.as_012(progeny_genotypes)
# convenience variables
p1 = parental_genotypes_012[:, 0, np.newaxis] # noqa
p2 = parental_genotypes_012[:, 1, np.newaxis] # noqa
o = progeny_genotypes_012 # noqa
# enumerate all possible combinations of Mendel error genotypes
ex = '((p1 == 0) & (p2 == 0) & (o == 1))' \
' + ((p1 == 0) & (p2 == 0) & (o == 2)) * 2' \
' + ((p1 == 2) & (p2 == 2) & (o == 1))' \
' + ((p1 == 2) & (p2 == 2) & (o == 0)) * 2' \
' + ((p1 == 0) & (p2 == 2) & (o == 0))' \
' + ((p1 == 0) & (p2 == 2) & (o == 2))' \
' + ((p1 == 2) & (p2 == 0) & (o == 0))' \
' + ((p1 == 2) & (p2 == 0) & (o == 2))' \
' + ((p1 == 0) & (p2 == 1) & (o == 2))' \
' + ((p1 == 1) & (p2 == 0) & (o == 2))' \
' + ((p1 == 2) & (p2 == 1) & (o == 0))' \
' + ((p1 == 1) & (p2 == 2) & (o == 0))'
errors = ne.evaluate(ex).astype('u1')
return errors
def diploid_mendelian_error_multiallelic(parental_genotypes, progeny_genotypes,
max_allele):
"""Implementation of function to find Mendelian errors generalised for
multiallelic variants.
"""
# transform genotypes into per-call allele counts
alleles = range(max_allele + 1)
p = anhima.gt.as_allele_counts(parental_genotypes, alleles=alleles)
o = anhima.gt.as_allele_counts(progeny_genotypes, alleles=alleles)
# detect nonparental and hemiparental inheritance by comparing allele
# counts between parents and progeny
ps = p.sum(axis=1)[:, np.newaxis] # add allele counts for both parents
ac_diff = (o - ps).astype('i1')
ac_diff[ac_diff < 0] = 0
# sum over all alleles
errors = np.sum(ac_diff, axis=2).astype('u1')
# detect uniparental inheritance by finding cases where no alleles are
# shared between parents, then comparing progeny allele counts to each
# parent
pc1 = p[:, 0, np.newaxis, :]
pc2 = p[:, 1, np.newaxis, :]
# find variants where parents don't share any alleles
is_shared_allele = (pc1 > 0) & (pc2 > 0)
no_shared_alleles = ~np.any(is_shared_allele, axis=2)
# find calls where progeny genotype is identical to one or the other parent
errors[
no_shared_alleles
& (np.all(o == pc1, axis=2)
| np.all(o == pc2, axis=2))
] = 1
# retrofit where either or both parent has a missing call
is_parent_missing = anhima.gt.is_missing(parental_genotypes)
errors[np.any(is_parent_missing, axis=1)] = 0
return errors
def diploid_mendelian_error(parental_genotypes, progeny_genotypes):
"""Find impossible genotypes according to Mendelian inheritance laws.
Parameters
----------
parental_genotypes : array_like, int
An array of shape (n_variants, 2, 2) where each element of the array
is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
progeny_genotypes : array_like, int
An array of shape (n_variants, n_progeny, 2) where each element of the
array is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
Returns
-------
errors : ndarray, uint8
An array of shape (n_variants, n_progeny) where each element counts
the number of non-Mendelian alleles in a progeny genotype call.
See Also
--------
count_diploid_mendelian_error
Notes
-----
Not applicable to polyploid genotype calls.
Applicable to multiallelic variants.
Assumes that genotypes are unphased.
"""
# check inputs
parental_genotypes = np.asarray(parental_genotypes)
progeny_genotypes = np.asarray(progeny_genotypes)
assert parental_genotypes.ndim == 3
assert progeny_genotypes.ndim == 3
# check the number of variants is equal in parents and progeny
assert parental_genotypes.shape[0] == progeny_genotypes.shape[0]
# check the number of parents
assert parental_genotypes.shape[1] == 2
# check the ploidy
assert parental_genotypes.shape[2] == progeny_genotypes.shape[2] == 2
# determine which implementation to use
max_allele = max(np.amax(parental_genotypes), np.amax(progeny_genotypes))
if max_allele < 2:
errors = diploid_mendelian_error_biallelic(parental_genotypes,
progeny_genotypes)
else:
errors = diploid_mendelian_error_multiallelic(parental_genotypes,
progeny_genotypes,
max_allele)
return errors
def count_diploid_mendelian_error(parental_genotypes,
progeny_genotypes,
axis=None):
"""Count impossible genotypes according to Mendelian inheritance laws,
summed over all progeny genotypes, or summed along variants or samples.
Parameters
----------
parental_genotypes : array_like, int
An array of shape (n_variants, 2, 2) where each element of the array
is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
progeny_genotypes : array_like, int
An array of shape (n_variants, n_progeny, 2) where each element of the
array is an integer corresponding to an allele index (-1 = missing,
0 = reference allele, 1 = first alternate allele, 2 = second
alternate allele, etc.).
axis : int, optional
The axis along which to count (0 = variants, 1 = samples).
Returns
-------
n : int or array
If `axis` is None, returns the total number of Mendelian errors. If
`axis` is specified, returns the sum along the given `axis`.
See Also
--------
diploid_mendelian_error
"""
# sum errors
n = np.sum(diploid_mendelian_error(parental_genotypes,
progeny_genotypes),
axis=axis)
return n
def impute_inheritance_nearest(inheritance, pos, pos_impute):
"""Impute inheritance at unknown positions, by copying from
nearest neighbouring position where inheritance is known.
Parameters
----------
inheritance : array_like, int, shape (n_variants, n_gametes)
An array of integers coding the allelic inheritance state at the
known positions.
pos : array_like, int, shape (n_variants,)
Array of genomic positions at which `inheritance` was determined.
pos_impute : array_like, int
Array of positions at which to impute inheritance.
Returns
-------
imputed_inheritance : ndarray, int
An array of integers coding the imputed allelic inheritance.
"""
# check inputs
inheritance = np.asarray(inheritance)
assert inheritance.ndim == 2
pos = np.asarray(pos)
assert pos.ndim == 1
pos_impute = np.asarray(pos_impute)
assert pos_impute.ndim == 1
n_variants = pos.size
assert inheritance.shape[0] == n_variants
# find indices of neighbouring variants
indices_left = np.clip(np.searchsorted(pos, pos_impute), 0, n_variants - 1)
indices_right = np.clip(indices_left + 1, 0, n_variants - 1)
inh_left = np.take(inheritance, indices_left, axis=0)
inh_right = np.take(inheritance, indices_right, axis=0)
# find positions of neighbouring variants
pos_left = np.take(pos, indices_left)
pos_right = np.take(pos, indices_right)
# compute distance to neighbours
dist_left = np.abs(pos_impute - pos_left)
dist_right = np.abs(pos_right - pos_impute)
# build output
out =
|
np.zeros_like(inh_left)
|
numpy.zeros_like
|
import os, sys
src_path = os.path.abspath("../")
sys.path.append(src_path)
import numpy as np
import matplotlib.pyplot as plt
import crocoddyl
from models import lin_quad_action as lin_quad
from solvers import full
horizon = 100
plan_dt = 1e-2
x0 = np.zeros(4)
Q = 1e-2 * np.eye(4)
mu = 0.01
LINE_WIDTH = 100
lq_diff_running = lin_quad.DifferentialActionModelLQ()
lq_diff_terminal = lin_quad.DifferentialActionModelLQ(isTerminal=True)
lq_running = crocoddyl.IntegratedActionModelEuler(lq_diff_running, plan_dt)
lq_terminal = crocoddyl.IntegratedActionModelEuler(lq_diff_terminal, plan_dt)
models = [lq_running] * (horizon) + [lq_terminal]
print(" Constructing integrated models completed ".center(LINE_WIDTH, "-"))
problem = crocoddyl.ShootingProblem(x0, models[:-1], models[-1])
print(" Constructing shooting problem completed ".center(LINE_WIDTH, "-"))
xs = [x0] * (horizon + 1)
us = [np.zeros(2)] * horizon
dg_solver = full.SaddlePointSolver(problem, mu, Q)
print(" Constructing saddle point solver completed ".center(LINE_WIDTH, "-"))
dg_solver.solve(xs, us)
x_traj = np.array(dg_solver.xs)
xnext = [d.xnext.copy() for d in dg_solver.problem.runningDatas]
plt.figure("trajectory plot")
for t in range(len(
|
np.array(dg_solver.xs[:-1])
|
numpy.array
|
# (setq python-shell-interpreter "./venv/bin/python")
import numpy as np
# import progressbar
# import imageio
import matplotlib.pyplot as plt
from numpy import linalg as LA
import tensorly as tl
from tensorly.tenalg import khatri_rao
from tensorly import unfold as tl_unfold
from tensorly.decomposition import parafac
from sklearn.decomposition import SparseCoder
import time
import os
from tqdm import trange
DEBUG = False
class Online_CPDL():
def __init__(self,
X, n_components=100,
iterations=500,
sub_iterations=10,
batch_size=20,
ini_CPdict=None,
ini_loading=None,
history=0,
ini_A=None,
ini_B=None,
alpha=None,
beta=None,
subsample=True):
'''
Online CP Dictionary Learning algorithm
X: data tensor (n+1 -dimensional) with shape I_1 * I_2 * ... * I_n * I_n+1
Last node considered as the "batch mode"
Seeks to find CP dictionary D = [A1, A2, ... , A_R], R=n_components, Ai = rank 1 tensor
Such that each slice X[:,:,..,:, j] \approx <D, c> for some c
n_components (int) = r = number of rank-1 CP factors
iter (int): number of iterations where each iteration is a call to step(...)
batch_size (int): number random of columns of X that will be sampled during each iteration
'''
self.X = X
self.n_modes = X.ndim - 1 ### -1 for discounting the last "batch_size" mode
self.n_components = n_components
self.batch_size = batch_size
self.iterations = iterations
self.sub_iterations = sub_iterations
self.At = None # code_covariance matrix to be learned
if ini_CPdict is not None:
self.CPdict = ini_CPdict
else:
self.CPdict = self.initialize_CPdict()
if ini_loading is None:
self.loading = self.initialize_loading()
else:
self.loading = ini_loading
if ini_A is not None:
self.ini_A = ini_A
else:
self.ini_A = np.zeros(shape=(n_components, n_components))
# print('A', self.ini_A)
if ini_A is not None:
self.ini_B = ini_B
else:
Y = X.take(indices=0, axis=-1)
self.ini_B = np.zeros(shape=(len(Y.reshape(-1, 1)), n_components))
self.history = history
self.alpha = alpha
# print('???????alpha', alpha)
self.beta = beta
self.code = np.zeros(shape=(n_components, X.shape[-1])) ### X.shape[-1] = batch_size
self.subsample = subsample
def initialize_loading(self):
### loading = python dict of [U1, U2, \cdots, Un], each Ui is I_i x R array
loading = {}
for i in np.arange(self.n_modes): # n_modes = X.ndim -1 where -1 for the last `batch mode'
loading.update({'U' + str(i): np.random.rand(self.X.shape[i], self.n_components)})
return loading
def initialize_CPdict(self):
### CPdict = python dict of [A1, A2, \cdots, AR], R=n_components, each Ai is a rank-1 tensor
CPdict = {}
for i in np.arange(self.n_components):
CPdict.update({'A' + str(i):
|
np.zeros(shape=self.X.shape[:-1])
|
numpy.zeros
|
#encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
#import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
from tqdm import tqdm
import datetime as dt
print("Starting job at time:",time.time())
debug = False
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates = ["activation_date"])
train_df = shuffle(train_df, random_state=1234); train_df = train_df.iloc[:200000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=200000, parse_dates = ["activation_date"])
train_active = pd.read_csv("../input/train_active.csv", nrows=200000, usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", nrows=200000, usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", nrows=200000, parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", nrows=200000, parse_dates=["date_from", "date_to"])
print("loading data done!")
# =============================================================================
# Add image quality: by steeve
# =============================================================================
import pickle
with open('../input/inception_v3_include_head_max_train.p','rb') as f:
x = pickle.load(f)
train_features = x['features']
train_ids = x['ids']
with open('../input/inception_v3_include_head_max_test.p','rb') as f:
x = pickle.load(f)
test_features = x['features']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_features, columns = ['image_quality'])
incep_test_image_df = pd.DataFrame(test_features, columns = [f'image_quality'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del incep_train_image_df, incep_test_image_df
gc.collect()
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_blurinesses = x['blurinesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_blurinesses = x['blurinesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_blurinesses, columns = ['blurinesses'])
incep_test_image_df = pd.DataFrame(test_blurinesses, columns = [f'blurinesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding whitenesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_whitenesses = x['whitenesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_whitenesses = x['whitenesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_whitenesses, columns = ['whitenesses'])
incep_test_image_df = pd.DataFrame(test_whitenesses, columns = [f'whitenesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding dullnesses ...')
with open('../input/train_image_features.p','rb') as f:
x = pickle.load(f)
train_dullnesses = x['dullnesses']
train_ids = x['ids']
with open('../input/test_image_features.p','rb') as f:
x = pickle.load(f)
test_dullnesses = x['dullnesses']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_dullnesses, columns = ['dullnesses'])
incep_test_image_df = pd.DataFrame(test_dullnesses, columns = [f'dullnesses'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
# =============================================================================
# new image data
# =============================================================================
print('adding average_pixel_width ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_pixel_width = x['average_pixel_width']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_pixel_width = x['average_pixel_width']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_pixel_width, columns = ['average_pixel_width'])
incep_test_image_df = pd.DataFrame(test_average_pixel_width, columns = [f'average_pixel_width'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_reds ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_reds = x['average_reds']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_reds = x['average_reds']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_reds, columns = ['average_reds'])
incep_test_image_df = pd.DataFrame(test_average_reds, columns = [f'average_reds'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_blues ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_blues = x['average_blues']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_blues = x['average_blues']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_blues, columns = ['average_blues'])
incep_test_image_df = pd.DataFrame(test_average_blues, columns = [f'average_blues'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding average_greens ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_average_greens = x['average_greens']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_average_greens = x['average_greens']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_average_greens, columns = ['average_greens'])
incep_test_image_df = pd.DataFrame(test_average_greens, columns = [f'average_greens'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding widths ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_widths = x['widths']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_widths = x['widths']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_widths, columns = ['widths'])
incep_test_image_df = pd.DataFrame(test_widths, columns = [f'widths'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
print('adding heights ...')
with open('../input/train_image_features_1.p','rb') as f:
x = pickle.load(f)
train_heights = x['heights']
train_ids = x['ids']
with open('../input/test_image_features_1.p','rb') as f:
x = pickle.load(f)
test_heights = x['heights']
test_ids = x['ids']
del x; gc.collect()
incep_train_image_df = pd.DataFrame(train_heights, columns = ['heights'])
incep_test_image_df = pd.DataFrame(test_heights, columns = [f'heights'])
incep_train_image_df['image'] = (train_ids)
incep_test_image_df['image'] = (test_ids)
train_df = train_df.join(incep_train_image_df.set_index('image'), on='image')
test_df = test_df.join(incep_test_image_df.set_index('image'), on='image')
del test_average_blues, test_average_greens, test_average_reds, incep_test_image_df
del train_average_blues, train_average_greens, train_average_reds, incep_train_image_df
gc.collect()
#==============================================================================
# image features by Qifeng
#==============================================================================
print('adding image features @ qifeng ...')
with open('../input/train_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_HSV_Ss',\
'average_HSV_Vs',\
'average_LUV_Ls',\
'average_LUV_Us',\
'average_LUV_Vs',\
'average_HLS_Hs',\
'average_HLS_Ls',\
'average_HLS_Ss',\
'average_YUV_Ys',\
'average_YUV_Us',\
'average_YUV_Vs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
#==============================================================================
# image features v2 by Qifeng
#==============================================================================
print('adding image features v2 ...')
with open('../input/train_image_features_cspace_v2.p','rb') as f:
x = pickle.load(f)
x_train = pd.DataFrame(x, columns = ['average_LAB_Ls',\
'average_LAB_As',\
'average_LAB_Bs',\
'average_YCrCb_Ys',\
'average_YCrCb_Crs',\
'average_YCrCb_Cbs',\
'ids'
])
#x_train.rename(columns = {'$ids':'image'}, inplace = True)
with open('../input/test_image_features_cspace_v2.p','rb') as f:
x = pickle.load(f)
x_test = pd.DataFrame(x, columns = ['average_LAB_Ls',\
'average_LAB_As',\
'average_LAB_Bs',\
'average_YCrCb_Ys',\
'average_YCrCb_Crs',\
'average_YCrCb_Cbs',\
'ids'
])
#x_test.rename(columns = {'$ids':'image'}, inplace = True)
train_df = train_df.join(x_train.set_index('ids'), on='image')
test_df = test_df.join(x_test.set_index('ids'), on='image')
del x, x_train, x_test; gc.collect()
# =============================================================================
# add geo info: https://www.kaggle.com/frankherfert/avito-russian-region-cities/data
# =============================================================================
#tmp = pd.read_csv("../input/avito_region_city_features.csv", usecols=["region", "city", "latitude","longitude"])
#train_df = train_df.merge(tmp, on=["city","region"], how="left")
#train_df["lat_long"] = train_df["latitude"]+train_df["longitude"]
#test_df = test_df.merge(tmp, on=["city","region"], how="left")
#test_df["lat_long"] = test_df["latitude"]+test_df["longitude"]
#del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/region_income.csv", sep=";", names=["region", "income"])
train_df = train_df.merge(tmp, on="region", how="left")
test_df = test_df.merge(tmp, on="region", how="left")
del tmp; gc.collect()
# =============================================================================
# Add region-income
# =============================================================================
tmp = pd.read_csv("../input/city_population_wiki_v3.csv")
train_df = train_df.merge(tmp, on="city", how="left")
test_df = test_df.merge(tmp, on="city", how="left")
del tmp; gc.collect()
# =============================================================================
# Here Based on https://www.kaggle.com/bminixhofer/aggregated-features-lightgbm/code
# =============================================================================
all_samples = pd.concat([train_df,train_active,test_df,test_active]).reset_index(drop=True)
all_samples.drop_duplicates(["item_id"], inplace=True)
del train_active, test_active; gc.collect()
all_periods = pd.concat([train_periods,test_periods])
all_periods["days_up"] = (all_periods["date_to"] - all_periods["date_from"]).dt.days
gp = all_periods.groupby(["item_id"])[["days_up"]]
gp_df = pd.DataFrame()
gp_df["days_up_sum"] = gp.sum()["days_up"]
gp_df["times_put_up"] = gp.count()["days_up"]
gp_df.reset_index(inplace=True)
gp_df.rename(index=str, columns={"index": "item_id"})
all_periods.drop_duplicates(["item_id"], inplace=True)
all_periods = all_periods.merge(gp_df, on="item_id", how="left")
all_periods = all_periods.merge(all_samples, on="item_id", how="left")
gp = all_periods.groupby(["user_id"])[["days_up_sum", "times_put_up"]].mean().reset_index()\
.rename(index=str, columns={"days_up_sum": "avg_days_up_user",
"times_put_up": "avg_times_up_user"})
n_user_items = all_samples.groupby(["user_id"])[["item_id"]].count().reset_index() \
.rename(index=str, columns={"item_id": "n_user_items"})
gp = gp.merge(n_user_items, on="user_id", how="outer") #left
del all_samples, all_periods, n_user_items, gp_df
gc.collect()
train_df = train_df.merge(gp, on="user_id", how="left")
test_df = test_df.merge(gp, on="user_id", how="left")
#==============================================================================
# ranked price by Qifeng
#==============================================================================
#TODO
print('adding ranked price..')
train_df["image_top_1"].fillna(-1,inplace=True)
train_df["price"].fillna(-1,inplace=True)
train_df["param_2"].fillna(-1,inplace=True)
train_df["city"].fillna("nicapotato",inplace=True)
test_df["image_top_1"].fillna(-1,inplace=True)
test_df["price"].fillna(-1,inplace=True)
test_df["param_2"].fillna(-1,inplace=True)
test_df["city"].fillna("nicapotato",inplace=True)
train_df['price_rank_img'] = train_df.groupby('image_top_1')['price'].rank(ascending=False)
train_df['price_rank_p2'] = train_df.groupby('param_2')['price'].rank(ascending=False)
train_df['price_rank_city'] = train_df.groupby('city')['price'].rank(ascending=False)
test_df['price_rank_img'] = test_df.groupby('image_top_1')['price'].rank( ascending=False)
test_df['price_rank_p2'] = test_df.groupby('param_2')['price'].rank(ascending=False)
test_df['price_rank_city'] = test_df.groupby('city')['price'].rank(ascending=False)
#===============================================================================
agg_cols = list(gp.columns)[1:]
del train_periods, test_periods; gc.collect()
del gp; gc.collect()
for col in agg_cols:
train_df[col].fillna(-1, inplace=True)
test_df[col].fillna(-1, inplace=True)
print("merging supplimentary data done!")
#
#
## =============================================================================
## done! go to the normal steps
## =============================================================================
def rmse(predictions, targets):
print("calculating RMSE ...")
return np.sqrt(((predictions - targets) ** 2).mean())
def text_preprocessing(text):
text = str(text)
text = text.lower()
text = re.sub(r"(\\u[0-9A-Fa-f]+)",r"", text)
text = re.sub(r"===",r" ", text)
# https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
text = " ".join(map(str.strip, re.split('(\d+)',text)))
regex = re.compile(u'[^[:alpha:]]')
text = regex.sub(" ", text)
text = " ".join(text.split())
return text
@contextmanager
def feature_engineering(df):
# All the feature engineering here
def Do_Text_Hash(df):
print("feature engineering -> hash text ...")
df["text_feature"] = df.apply(lambda row: " ".join([str(row["param_1"]),
str(row["param_2"]), str(row["param_3"])]),axis=1)
df["text_feature_2"] = df.apply(lambda row: " ".join([str(row["param_2"]), str(row["param_3"])]),axis=1)
df["title_description"] = df.apply(lambda row: " ".join([str(row["title"]), str(row["description"])]),axis=1)
print("feature engineering -> preprocess text ...")
df["text_feature"] = df["text_feature"].apply(lambda x: text_preprocessing(x))
df["text_feature_2"] = df["text_feature_2"].apply(lambda x: text_preprocessing(x))
df["description"] = df["description"].apply(lambda x: text_preprocessing(x))
df["title"] = df["title"].apply(lambda x: text_preprocessing(x))
df["title_description"] = df["title_description"].apply(lambda x: text_preprocessing(x))
def Do_Datetime(df):
print("feature engineering -> date time ...")
df["wday"] = df["activation_date"].dt.weekday
df["wday"].fillna(-1,inplace=True)
df["wday"] =df["wday"].astype(np.uint8)
def Do_Label_Enc(df):
print("feature engineering -> label encoding ...")
lbl = LabelEncoder()
cat_col = ["user_id", "region", "city", "parent_category_name",
"category_name", "user_type", "image_top_1",
"param_1", "param_2", "param_3","image",
]
for col in cat_col:
df[col] = lbl.fit_transform(df[col].astype(str))
gc.collect()
import string
count = lambda l1,l2: sum([1 for x in l1 if x in l2])
def Do_NA(df):
print("feature engineering -> fill na ...")
df["image_top_1"].fillna(-1,inplace=True)
df["image"].fillna("noinformation",inplace=True)
df["param_1"].fillna("nicapotato",inplace=True)
df["param_2"].fillna("nicapotato",inplace=True)
df["param_3"].fillna("nicapotato",inplace=True)
df["title"].fillna("nicapotato",inplace=True)
df["description"].fillna("nicapotato",inplace=True)
# price vs income
# df["price_vs_city_income"] = df["price"] / df["income"]
# df["price_vs_city_income"].fillna(-1, inplace=True)
df["income"].fillna(-1,inplace=True)
df["item_seq_number"].fillna(-1,inplace=True)
def Do_Count(df):
print("feature engineering -> do count ...")
# some count
df["num_desc_punct"] = df["description"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_desc_capE"] = df["description"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_desc_capP"] = df["description"].apply(lambda x: count(x, "[А-Я]")).astype(np.int16)
df["num_title_punct"] = df["title"].apply(lambda x: count(x, set(string.punctuation))).astype(np.int16)
df["num_title_capE"] = df["title"].apply(lambda x: count(x, "[A-Z]")).astype(np.int16)
df["num_title_capP"] = df["title"].apply(lambda x: count(x, "[А-Я]")) .astype(np.int16)
# good, used, bad ... count
df["is_in_desc_хорошо"] = df["description"].str.contains("хорошо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_Плохо"] = df["description"].str.contains("Плохо").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_новый"] = df["description"].str.contains("новый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_старый"] = df["description"].str.contains("старый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_используемый"] = df["description"].str.contains("используемый").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатная_доставка"] = df["description"].str.contains("есплатная доставка").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_есплатный_возврат"] = df["description"].str.contains("есплатный возврат").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_идеально"] = df["description"].str.contains("идеально").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_подержанный"] = df["description"].str.contains("подержанный").map({True:1, False:0}).astype(np.uint8)
df["is_in_desc_пСниженные_цены"] = df["description"].str.contains("Сниженные цены").map({True:1, False:0}).astype(np.uint8)
# new count 0604
df["num_title_Exclamation"] = df["title"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_title_Question"] = df["title"].apply(lambda x: count(x, "?")).astype(np.int16)
df["num_desc_Exclamation"] = df["description"].apply(lambda x: count(x, "!")).astype(np.int16)
df["num_desc_Question"] = df["description"].apply(lambda x: count(x, "?")).astype(np.int16)
def Do_Drop(df):
df.drop(["activation_date"], axis=1, inplace=True)
def Do_Stat_Text(df):
print("feature engineering -> statistics in text ...")
textfeats = ["text_feature","text_feature_2","description", "title"]
for col in textfeats:
df[col + "_num_chars"] = df[col].apply(len).astype(np.int16)
df[col + "_num_words"] = df[col].apply(lambda comment: len(comment.split())).astype(np.int16)
df[col + "_num_unique_words"] = df[col].apply(lambda comment: len(set(w for w in comment.split()))).astype(np.int16)
df[col + "_words_vs_unique"] = (df[col+"_num_unique_words"] / df[col+"_num_words"] * 100).astype(np.float32)
gc.collect()
# choose which functions to run
Do_NA(df)
Do_Text_Hash(df)
Do_Label_Enc(df)
Do_Count(df)
Do_Datetime(df)
Do_Stat_Text(df)
Do_Drop(df)
gc.collect()
return df
def data_vectorize(df):
russian_stop = set(stopwords.words("russian"))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": "word",
"token_pattern": r"\w{1,}",
"sublinear_tf": True,
"dtype": np.float32,
"norm": "l2",
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
# tfidf_para2 = {
# "stop_words": russian_stop,
# "analyzer": "char",
# "token_pattern": r"\w{1,}",
# "sublinear_tf": True,
# "dtype": np.float32,
# "norm": "l2",
# # "min_df":5,
# # "max_df":.9,
# "smooth_idf": False
# }
def get_col(col_name): return lambda x: x[col_name]
vectorizer = FeatureUnion([
("description", TfidfVectorizer(
ngram_range=(1, 2),
max_features=40000,#40000,18000
**tfidf_para,
preprocessor=get_col("description"))
),
# ("title_description", TfidfVectorizer(
# ngram_range=(1, 2),#(1,2)
# max_features=1800,#40000,18000
# **tfidf_para,
# preprocessor=get_col("title_description"))
# ),
("text_feature", CountVectorizer(
ngram_range=(1, 2),
preprocessor=get_col("text_feature"))
),
("title", TfidfVectorizer(
ngram_range=(1, 2),
**tfidf_para,
preprocessor=get_col("title"))
),
#新加入两个文本处理title2,title_char
("title2", TfidfVectorizer(
ngram_range=(1, 1),
**tfidf_para,
preprocessor=get_col("title"))
),
# ("title_char", TfidfVectorizer(
#
# ngram_range=(1, 4),#(1, 4),(1,6)
# max_features=16000,#16000
# **tfidf_para2,
# preprocessor=get_col("title"))
# ),
])
vectorizer.fit(df.to_dict("records"))
ready_full_df = vectorizer.transform(df.to_dict("records"))
tfvocab = vectorizer.get_feature_names()
df.drop(["text_feature", "text_feature_2", "description","title", "title_description"], axis=1, inplace=True)
df.fillna(-1, inplace=True)
return df, ready_full_df, tfvocab
# =============================================================================
# Ridge feature https://www.kaggle.com/demery/lightgbm-with-ridge-feature/code
# =============================================================================
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((len_train,))
oof_test = np.zeros((len_test,))
oof_test_skf =
|
np.empty((NFOLDS, len_test))
|
numpy.empty
|
# -*- coding: utf-8 -*-
"""
.. module:: tensorsignatures
:synopsis: TensorSignatures main module
.. moduleauthor:: <NAME> <github.com/sagar87>
"""
import sys
import os
import tensorflow as tf
import numpy as np
import h5py as h5
from tensorsignatures.config import *
from tensorsignatures.util import Initialization
from tqdm import trange
import functools
def doublewrap(function):
# A decorator decorator, allowing to use the decorator to be used without
# parentheses if no arguments are provided. All arguments must be optional.
# https://gist.github.com/danijar/8663d3bbfd586bffecf6a0094cd116f2
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
# A decorator for functions that define TensorFlow operations. The wrapped
# function will only be executed once. Subsequent calls to it will directly
# return the result so that operations are added to the graph only once.
# The operations added by the function live within a tf.variable_scope().
# If this decorator is used with arguments, they will be forwarded to the
# variable scope. The scope name defaults to the name of the wrapped
# function.
# https://gist.github.com/danijar/8663d3bbfd586bffecf6a0094cd116f2
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class TensorSignature(object):
r"""Extracts tensor signatures from a SNV count tensor and a matrix
containing other mutation types.
Args:
snv (array-like, shape :math:`(3, 3, (t_1+1), \dots, (t_l+1), p, n)`):
Input SNV tensor; first and second dimension represent
transcription and replication, while the last two dimensions
contain p mutation types and n samples. Other dimensions may
represent arbtrary genomic states.
other (array-like, shape :math:`(q, n)`): Mutation count matrix with q
mutation types and n samples.
rank (:obj:`int`, :math:`2 \leq s < n`): Rank :math:`s` of the
decomposition.
N (array_like, shape :math:`(3, 3, (t_1+1), \dots, (t_l+1), p, 1)`):
Optional normalization tensor containing trinucleotide frequencies
for each genomic state.
size (:obj:`int`, :math:`1 \leq \tau \leq + \inf`): Size parameter
:math:`\tau` for negative binomial distribution.
objective (:obj:`str`, :obj:`{'nbconst', 'poisson'}`): Likelihood
distribution to model mutation counts. Currently, the negative
binomial or poisson are supported.
collapse (:obj:`bool`): Deprecated convinience function.
starter_learning_rate (:obj:`float`): Starting Learning rate.
decay_learning_rate (:obj:`str`, :obj:`{'exponential', 'constant'}`):
Learning rate decay.
optimizer (:obj:`str`, :obj:`{'ADAM', 'gradient_descent'}`): Allows
to set the optimizer.
epochs (:obj:`int`): Number of training epochs.
log_step (:obj:`int`): Log freuqency.
display_step (:obj:`int`): Update intervals of progress bar during.
dtype (:obj:`dtype`): Allows to set tensorflow number type.
verbose (:obj:`bool`): Verbose mode.
id (:obj:`str`): Job id.
init (:obj:`int`): Initialization.
seed (:obj:`int`): Random seed.
Returns:
A tensorsignatures model.
Examples:
>>> from tensorsignatures.tensorsignatures import TensorSignature
>>> model = TensorSignature(snv, other, rank = 5)
>>> model.fit()
"""
def __init__(self,
snv,
other,
rank,
N=None,
size=50,
objective='nbconst',
collapse=False,
starter_learning_rate=0.1,
decay_learning_rate='exponential',
optimizer='ADAM',
epochs=10000,
log_step=100,
display_step=100,
id='TSJOB',
init=0,
seed=None,
dtype=tf.float32,
verbose=True):
# store hyperparameters
self.rank = rank
self.size = size
self.objective = objective
self.collapse = collapse
self.starter_learning_rate = starter_learning_rate
self.decay_learning_rate = decay_learning_rate
self.optimizer = optimizer
self.epochs = epochs
self.log_step = log_step
self.display_step = display_step
self.id = id
self.init = init
self.seed = seed
self.dtype = dtype
self.verbose = verbose
# hyperparams
self.samples = snv.shape[-1]
self.observations = np.sum(~np.isnan(snv)) + np.sum(~np.isnan(other))
# dimensions
self.p = snv.shape[-2]
self.q = other.shape[0]
# intialize C1 and C2
self.tau = tf.constant(self.size, dtype=self.dtype)
# keep data
if self.collapse:
self.snv = TensorSignature.collapse_data(snv)
else:
self.snv = snv
self.other = other
if N is not None:
# added to enable tissue specific normalisations
N_dim = N.shape[-1]
if self.collapse:
self.N = TensorSignature.collapse_data(
N).reshape(3, 3, -1, 96, N_dim)
else:
self.N = N.reshape(3, 3, -1, 96, N_dim)
else:
self.N = None
# clustering dims
self.c = len(self.snv.shape) - 4
self.card = list(self.snv.shape)[2: -2]
self.card_prod = np.prod(self.card)
self.idex = self.indices_to_assignment(
np.arange(self.card_prod), self.card)
# initialize variables
self.M
self.S
self.E
self.T
self.Chat1
self.Chat2
self.C1
self.C2
self.L1
self.L2
self.L
# learning rate
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate
self.minimize
# intialize logs
self.log_epochs = np.zeros(self.epochs // self.log_step)
self.log_learning_rate = np.zeros(self.epochs // self.log_step)
self.log_L = np.zeros(self.epochs // self.log_step)
self.log_L1 = np.zeros(self.epochs // self.log_step)
self.log_L2 = np.zeros(self.epochs // self.log_step)
def indices_to_assignment(self, I, card):
# Helper function to collapse additional genomic dimension
card = np.array(card, copy=False)
C = card.flatten()
A = np.mod(
np.floor(
np.tile(I.flatten().T, (len(card), 1)).T /
np.tile(np.cumprod(np.concatenate(([1.0], C[:0:-1]))),
(len(I), 1))),
np.tile(C[::-1], (len(I), 1)))
return A[:, ::-1]
@define_scope
def learning_rate(self):
# Initialize learning rates.
if self.decay_learning_rate == 'constant':
self._learning_rate = tf.constant(
self.starter_learning_rate, dtype=tf.float32, shape=())
elif self.decay_learning_rate == 'exponential':
self._learning_rate = tf.train.exponential_decay(
self.starter_learning_rate, self.global_step, 1000, 0.95,
staircase=True)
return self._learning_rate
@define_scope
def minimize(self):
# Initializes the minimizer.
if self.optimizer == 'ADAM':
self._minimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(-self.L, self.global_step)
if self.optimizer == 'gradient_descent':
self._minimize = tf.train.GradientDescentOptimizer(
self.learning_rate).minimize(-self.L, self.global_step)
return self._minimize
@define_scope
def S1(self):
# Initializes the SNV signature tensor.
self.S0 = tf.Variable(
tf.truncated_normal(
[2, 2, self.p - 1, self.rank],
dtype=self.dtype,
seed=self.seed),
name='S0')
# apply softmax
self.S0s = tf.nn.softmax(
tf.concat(
[self.S0, tf.zeros([2, 2, 1, self.rank])], axis=2),
dim=2, name='S1')
# stack tensor
self._S1 = tf.reshape(
tf.stack([
self.S0s[0, 0, :, :],
self.S0s[1, 0, :, :],
0.5 * tf.reduce_sum(self.S0s[:, 0, :, :], axis=0),
self.S0s[1, 1, :, :],
self.S0s[0, 1, :, :],
0.5 * tf.reduce_sum(self.S0s[:, 1, :, :], axis=0),
0.5 * (self.S0s[0, 0, :, :] + self.S0s[1, 1, :, :]),
0.5 * (self.S0s[1, 0, :, :] + self.S0s[0, 1, :, :]),
0.25 * (tf.reduce_sum(self.S0s, axis=(0, 1)))
]), (3, 3, 1, self.p, self.rank))
if self.verbose:
print('S1:', self._S1.shape)
return self._S1
@define_scope
def T(self):
# Initializes the signature matrix for other mutaiton types.
# initialize T0 with values from a truncated normal
self.T0 = tf.Variable(
tf.truncated_normal(
[self.q - 1, self.rank], dtype=self.dtype, seed=self.seed),
name='T0')
# apply softmax
T1 = tf.nn.softmax(
tf.concat(
[self.T0, tf.zeros([1, self.rank], dtype=self.dtype)], axis=0),
dim=0, name='T')
# factor mixing factor
self._T = T1 * (1 - tf.reshape(self.M, (1, self.rank)))
if self.verbose:
print('T:', self._T.shape)
return self._T
@define_scope
def E(self):
# Initializes exposures.
self.E0 = tf.Variable(
tf.truncated_normal(
[self.rank, self.samples], dtype=self.dtype, seed=self.seed),
name='E0')
# exponentiate to satisfy non-negativity constraint
self._E = tf.exp(self.E0, name='E')
if self.verbose:
print('E:', self._E.shape)
return self._E
@define_scope
def A(self):
# Initializes signature activities transcription/replication.
# self.a0[0,:] => to a_t
# self.a0[1,:] => to a_r
self.a0 = tf.Variable(
tf.truncated_normal(
[2, self.rank], dtype=tf.float32, seed=self.seed),
name='a0')
a1 = tf.exp(
tf.reshape(
tf.concat(
[self.a0, self.a0, tf.zeros([2, self.rank])], axis=0),
(3, 2, self.rank)))
# outer product
self._A = tf.reshape(
a1[:, 0, :][:, None, :] * a1[:, 1, :][None, :, :],
(3, 3, 1, 1, self.rank))
if self.verbose:
print('A:', self._A.shape)
return self._A
@define_scope
def B(self):
# Intializes transcription/replication biases.
# self.b0[0,:] => b_t (coding / template)
# self.b0[1,:] => b_r (lagging / leading)
self.b0 = tf.Variable(
tf.truncated_normal(
[2, self.rank], dtype=tf.float32, seed=self.seed),
name='b0')
# stack variables
self._B = tf.exp(tf.reshape(
tf.stack([
self.b0[0, :] + self.b0[1, :],
self.b0[0, :] - self.b0[1, :],
self.b0[0, :],
self.b0[1, :] - self.b0[0, :],
-self.b0[1, :] - self.b0[0, :],
-self.b0[0, :],
self.b0[1, :],
-self.b0[1, :], tf.zeros(self.b0[0, :].shape)]),
(3, 3, 1, 1, self.rank)))
if self.verbose:
print('B:', self._B.shape)
return self._B
@define_scope
def K(self):
# Initializes variables for generic tensorfactors
self._clu_var = {}
self._cbiases = {}
for i in range(2, 2 + self.c):
k = i - 2
v = tf.Variable(
tf.truncated_normal(
[self.card[k] - 1, self.rank],
dtype=self.dtype,
seed=self.seed),
name='k{}'.format(k))
self._clu_var[i - 2] = v
shapes = [1 if k != i else self.card[k] for i in range(self.c)]
dim = (1, 1, *shapes, 1, 1, self.rank)
self._cbiases[i - 2] = tf.concat(
[tf.zeros([1, self.rank], dtype=self.dtype), v], axis=0)
if self.verbose:
print('k{}:'.format(k), self._cbiases[i - 2].shape)
final_tensor = []
for r in range(self.idex.shape[0]):
current_term = []
for c in range(self.idex.shape[1]):
current_term.append(
self._cbiases[c][self.idex[r, c].astype(int), :])
final_tensor.append(tf.reduce_sum(tf.stack(current_term), axis=0))
self._K = tf.exp(
tf.reshape(tf.stack(final_tensor), (1, 1, -1, 1, self.rank)))
if self.verbose:
print('K:'.format(i), self._K.shape)
return self._K
@define_scope
def M(self):
# Initializes mixing factor variables.
self.m0 = tf.Variable(
tf.truncated_normal(
[1, self.rank], dtype=self.dtype, seed=self.seed),
name='m0')
self.m1 = tf.sigmoid(self.m0, name='m1')
self._M = tf.reshape(self.m1, (1, 1, 1, 1, self.rank))
if self.verbose:
print('m:', self._M.shape)
return self._M
@define_scope
def S(self):
# Initialize the final SNV tensor.
self._S = self.S1 * self.A * self.B * self.K * self.M
if self.verbose:
print('S:', self._S.shape)
return self._S
@define_scope
def C1(self):
# Stores the count tensor.
self._C1 = tf.constant(
self.snv.reshape(3, 3, -1, self.p, self.samples), dtype=self.dtype)
if self.verbose:
print('C1:', self._C1.shape)
return self._C1
@define_scope
def C2(self):
# Stores the other mutation types tensor.
sub_set = np.ones_like(self.other)
sub_set[np.where(np.isnan(self.other))] = 0
self.other[np.where(np.isnan(self.other))] = 0
self.C2_nans = tf.constant(sub_set, dtype=self.dtype)
self._C2 = tf.constant(self.other, dtype=self.dtype)
if self.verbose:
print('C2:', self._C2.shape)
return self._C2
@define_scope
def Chat1(self):
# Compute predicted counts of the count tensor.
self._Chat1 = tf.reshape(
tf.matmul(tf.reshape(self.S, (-1, self.rank)), self.E),
(3, 3, -1, 96, self.samples), name='Chat1')
if self.N is not None:
self._Chat1 *= (self.N.astype('float32') + 1e-6)
if self.verbose:
print('Multiplied N:', self.N.shape)
if self.verbose:
print('Chat1:', self._Chat1.shape)
return self._Chat1
@define_scope
def Chat2(self):
# Computes predicate counts for the ohter mutation count matrix.
self._Chat2 = tf.matmul(self.T, self.E)
if self.verbose:
print('Chat2:', self._Chat2.shape)
return self._Chat2
@define_scope
def L1ij(self):
# Computes the log likelihood for each enty in SNV count tensor.
if self.objective == 'nbconst':
if self.verbose:
print('Using negative binomial likelihood')
self._L1ij = self.tau \
* tf.log(self.tau) \
- tf.lgamma(self.tau) \
+ tf.lgamma(self.C1 + self.tau) \
+ self.C1 * tf.log(self.Chat1) \
- tf.log(self.Chat1 + self.tau) \
* (self.tau + self.C1) \
- tf.lgamma(self.C1 + 1)
if self.objective == 'poisson':
if self.verbose:
print('Using poisson likelihood')
self._L1ij = self.C1 \
* tf.log(self.Chat1) \
- self.Chat1 \
- tf.lgamma(self.C1 + 1)
return self._L1ij
@define_scope
def L2ij(self):
# Computes the log likelhood for each entry in the matrix of other.
# mutation types
if self.objective == 'nbconst':
self._L2ij = self.tau \
* tf.log(self.tau) \
- tf.lgamma(self.tau) \
+ tf.lgamma(self.C2 + self.tau) \
+ self.C2 * tf.log(self.Chat2) \
- tf.log(self.Chat2 + self.tau) \
* (self.tau + self.C2) \
- tf.lgamma(self.C2 + 1)
if self.objective == 'poisson':
self._L2ij = self.C2 \
* tf.log(self.Chat2) \
- self.Chat2 \
- tf.lgamma(self.C2 + 1)
return self._L2ij
@define_scope
def L1(self):
# Sums the log likelihood of each entry in L1ij.
self._L1 = tf.reduce_sum(self.L1ij)
return self._L1
@define_scope
def L2(self):
# Sums the log likelihood of each entry in L2ij.
self._L2 = tf.reduce_sum(self.L2ij * self.C2_nans)
return self._L2
@define_scope
def L(self):
# Sum of log likelihoods L1 and L2.
self._L = self.L1 + self.L2
return self._L
def fit(self, sess=None):
"""Fits the model.
Args:
sess (:obj:`tensorflow.Session`): Tensorflow session, if None
TensorSignatures will open new tensorflow session and close it
after fitting the model.
Returns:
The tensoflow session.
"""
# fits the model
if sess is None:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
t = trange(self.epochs, desc='Progress', leave=True)
previous_likelihood = 0
for i in t:
_ = sess.run(self.minimize)
log_step = i // self.log_step
if (i % self.log_step == 0):
self.log_epochs[log_step] = i
self.log_learning_rate[log_step] = sess.run(self.learning_rate)
self.log_L[log_step] = sess.run(self.L)
self.log_L1[log_step] = sess.run(self.L1)
self.log_L2[log_step] = sess.run(self.L2)
if (i % self.display_step == 0) and self.verbose:
current_likelihood = sess.run(self.L)
log_string = LOG_STRING.format(
lh=current_likelihood,
snv=sess.run(self.L1),
other=sess.run(self.L2),
lr=sess.run(self.learning_rate),
delta=current_likelihood - previous_likelihood)
t.set_description(log_string)
t.refresh()
previous_likelihood = current_likelihood
# save the loglikelihood value of the last iteration
self.log_epochs[-1] = i
self.log_learning_rate[-1] = sess.run(self.learning_rate)
self.log_L[-1] = sess.run(self.L)
self.log_L1[-1] = sess.run(self.L1)
self.log_L2[-1] = sess.run(self.L2)
self.result = Initialization(S0=sess.run(self.S0),
a0=sess.run(self.a0), b0=sess.run(self.b0),
ki=sess.run(self._clu_var), m0=sess.run(self.m0),
T0=sess.run(self.T0), E0=sess.run(self.E0), rank=self.rank,
size=self.size, objective=self.objective,
starter_learning_rate=self.starter_learning_rate,
decay_learning_rate=self.decay_learning_rate,
optimizer=self.optimizer, epochs=self.epochs,
log_step=self.log_step, display_step=self.display_step,
observations=self.observations, id=self.id, init=self.init,
seed=self.seed, log_epochs=self.log_epochs,
log_learning_rate=self.log_learning_rate, log_L=self.log_L,
log_L1=self.log_L1, log_L2=self.log_L2,
sample_indices=np.arange(self.samples))
if sess is None:
sess.close()
return self.result
def get_tensors(self, sess):
"""Extracts signatures, exposures and tensor factors.
Args:
sesss (:obj:`tf.Session`): Tensorflow session in which the model
was trained.
Returns:
A :obj:`dict` containing signatures, exposures and tensorfactors.
"""
VARS = PARAMETERS + VARIABLES
tensors = [var for var in dir(self) if (var.strip('_') in VARS)]
data = {}
for var in tensors:
if (type(getattr(self, var)) == tf.Tensor or
type(getattr(self, var)) == tf.Variable):
data[var.strip('_')] = np.array(sess.run(getattr(self, var)))
elif (type(getattr(self, var)) == np.ndarray):
data[var.strip('_')] = getattr(self, var)
elif (type(getattr(self, var)) == int):
data[var] = getattr(self, var)
for k, v in self._clu_var.items():
data['k{}'.format(k)] = np.array(sess.run(v))
return data
@staticmethod
def collapse_data(snv):
r"""Deprecated convinience function to collapse pyrimidine/purine
dimension (snv.shape[-2])
Args:
snv (array-like, shape :math:`(3,3,(t_1+1),\dots,(t_l),2,p,n)`):
SNV count tensor with distinct pyrimidine purine dimension.
Returns:
snv (array, shape :math:`(3, 3, (t_1+1), \dots, (t_l), p, n)`):
Collapsed SNV array.
"""
col1 = snv[[slice(None)] * (snv.ndim - 3) + [0] + [slice(None)] * 2]
col2 = []
dims = [
(1, 1), (1, 0), (1, 2),
(0, 1), (0, 0), (0, 2),
(2, 1), (2, 0), (2, 2)]
for i, j in dims:
idx = [i, j] \
+ [slice(None)] \
* (snv.ndim - 5) \
+ [1] \
+ [slice(None)] \
* 2
col2.append(snv[idx])
col2 = np.stack(col2).reshape(col1.shape)
return col1 + col2
class TensorSignatureRefit(TensorSignature):
r"""Fits a set of signatures (ts.Initialization)to a dataset.
Args:
snv (array-like, shape :math:`(3, 3, (t_1+1), \dots, (t_l+1), p, n)`):
Input SNV tensor; first and second dimension represent
transcription and replication, while the last two dimensions
contain p mutation types and n samples. Other dimensions may
represent arbtrary genomic states.
other (array-like, shape :math:`(q, n)`): Mutation count matrix with q
mutation types and n samples.
rank (:obj:`int`, :math:`2 \leq s < n`): Rank :math:`s` of the
decomposition.
N (array_like, shape :math:`(3, 3, (t_1+1), \dots, (t_l+1), p, 1)`):
Optional normalization tensor containing trinucleotide frequencies
for each genomic state.
size (:obj:`int`, :math:`1 \leq \tau \leq + \inf`): Size parameter
:math:`\tau` for negative binomial distribution.
objective (:obj:`str`, :obj:`{'nbconst', 'poisson'}`): Likelihood
distribution to model mutation counts. Currently, the negative
binomial or poisson are supported.
collapse (:obj:`bool`): Deprecated convinience function.
starter_learning_rate (:obj:`float`): Starting Learning rate.
decay_learning_rate (:obj:`str`, :obj:`{'exponential', 'constant'}`):
Learning rate decay.
optimizer (:obj:`str`, :obj:`{'ADAM', 'gradient_descent'}`): Allows
to set the optimizer.
epochs (:obj:`int`): Number of training epochs.
log_step (:obj:`int`): Log freuqency.
display_step (:obj:`int`): Update intervals of progress bar during.
dtype (:obj:`dtype`): Allows to set tensorflow number type.
verbose (:obj:`bool`): Verbose mode.
id (:obj:`str`): Job id.
init (:obj:`int`): Initialization.
seed (:obj:`int`): Random seed.
Returns:
A tensorsignatures model.
Examples:
>>> from tensorsignatures.tensorsignatures import TensorSignature
>>> model = TensorSignature(snv, other, rank = 5)
>>> model.fit()
"""
def __init__(self, snv, other, reference, N=None, **kwargs):
self.ref = reference
self.rank = self.ref.rank
self.size = self.ref.size
self.objective = self.ref.objective
self.collapse = kwargs.get('collapse', False)
self.starter_learning_rate = kwargs.get(
STARTER_LEARNING_RATE, self.ref.starter_learning_rate)
self.decay_learning_rate = kwargs.get(
DECAY_LEARNING_RATE, self.ref.decay_learning_rate)
self.optimizer = kwargs.get(OPTIMIZER, self.ref.optimizer)
self.epochs = kwargs.get(EPOCHS, 5000)
self.log_step = kwargs.get(LOG_STEP, self.ref.log_step)
self.display_step = kwargs.get(DISPLAY_STEP, self.ref.log_step)
self.id = kwargs.get(ID, self.ref.id)
self.init = kwargs.get(INIT, 0)
self.seed = kwargs.get(SEED, None)
self.dtype = kwargs.get('dtype', tf.float32)
self.verbose = kwargs.get('verbose', False)
# hyper
self.samples = snv.shape[-1]
self.observations = self.ref.observations
# dimensions
self.p = snv.shape[-2]
self.q = other.shape[0]
self.tau = tf.constant(self.ref.size, dtype=self.dtype)
if self.collapse:
self.snv = TensorSignature.collapse_data(snv)
else:
self.snv = snv
self.other = other
if N is not None:
if self.collapse:
self.N = TensorSignature.collapse_data(
N).reshape(3, 3, -1, 96, 1)
else:
self.N = N.reshape(3, 3, -1, 96, 1)
else:
self.N = None
self.card = [k + 1 for k in self.ref._kdim]
self.card_prod = np.prod(self.card)
self.idex = self.indices_to_assignment(
np.arange(self.card_prod), self.card)
# initialize variables
self.M
self.S
self.E
self.T
self.Chat1
self.Chat2
self.C1
self.C2
self.L1
self.L2
self.L
# learning rate
self.global_step = tf.Variable(0, trainable=False)
self.learning_rate
self.minimize
# intialize logs
self.log_epochs = np.zeros(self.epochs // self.ref.log_step)
self.log_learning_rate = np.zeros(self.epochs // self.ref.log_step)
self.log_L = np.zeros(self.epochs // self.ref.log_step)
self.log_L1 =
|
np.zeros(self.epochs // self.ref.log_step)
|
numpy.zeros
|
import json
import os
import sys
import traceback
import numpy as np
from collections import namedtuple
from ws.shared.logger import *
from ws.shared.converter import OneHotVectorTransformer
class HyperparameterConfigurationReader(object):
def __init__(self, cfg_file_name, config_path=""):
self._dict = {}
if not cfg_file_name.endswith('.json'):
cfg_file_name += '.json'
path ="{}{}".format(config_path, cfg_file_name)
if os.path.exists(path):
self._dict = self.read_json(path)
else:
error("hyperparam config not found: {}".format(path))
def read_json(self, cfg_file_name):
with open(cfg_file_name) as json_cfg:
json_dict = json.load(json_cfg)
return json_dict
def get_config(self):
try:
hc = HyperparameterConfiguration(self._dict)
if self.validate(hc):
return hc
except Exception as ex:
raise ValueError("Invalid configuration: {}".format(self._dict))
def validate(self, cfg):
if not hasattr(cfg, 'hyperparams'):
error('json object does not contain hyperparams attribute: {}'.format(cfg))
return False
for hyperparam, conf in cfg.hyperparams.__dict__.items():
# attribute existence test
if not hasattr(conf, 'type'):
error(hyperparam + " has not type attribute.")
return False
else:
supported_types = ['int', 'float', 'str', 'bool', 'unicode']
if not conf.type in supported_types:
return False
if not hasattr(conf, 'value_type'):
error(hyperparam + " has not value_type attribute.")
return False
else:
supported_value_types = ['discrete', 'continuous', 'preordered', 'categorical']
if not conf.value_type in supported_value_types:
return False
if not hasattr(conf, 'range'):
error(hyperparam + " has not range attribute.")
return False
else:
range_list = conf.range
if len(range_list) is 0:
error(hyperparam + " has no range values")
return False
for value in range_list:
value_type_name = type(value).__name__
if value_type_name == 'unicode':
value_type_name = 'str'
if value_type_name != conf.type:
if not hasattr(conf, 'power_of'):
error(hyperparam + " has invalid type item.")
return False
return True
class DictionaryToObject(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [DictionaryToObject(x)
if isinstance(
x, dict) else x for x in b])
else:
setattr(self, a, DictionaryToObject(b)
if isinstance(b, dict) else b)
class HyperparameterConfiguration(DictionaryToObject):
def __init__(self, d):
self._dict = d
super(HyperparameterConfiguration, self).__init__(d)
def get_param_names(self):
if 'param_order' in self._dict:
return self._dict['param_order']
else:
param_list = [ p for p in self.hyperparams.__dict__.keys() ]
param_list.sort()
return param_list
def get_type(self, name):
if name in self.get_param_names():
hyperparam = getattr(self.hyperparams, name)
if hyperparam.type == 'unicode':
return "str"
else:
return hyperparam.type
raise ValueError("Invalid hyperparameter name: {}".format(name))
def get_value_type(self, name):
if name in self.get_param_names():
hyperparam = getattr(self.hyperparams, name)
return hyperparam.value_type
raise ValueError("Invalid hyperparameter name: {}".format(name))
def get_range(self, name):
if name in self.get_param_names():
hyperparam = getattr(self.hyperparams, name)
r = hyperparam.range
if hasattr(hyperparam, 'power_of'):
base = hyperparam.power_of
r = []
for power in hyperparam.range:
r.append(base**power)
if hyperparam.type == 'unicode':
r = []
for item in hyperparam.range:
r.append(item.encode('ascii', 'ignore'))
return r
else:
raise ValueError("Invalid hyperparameter name: {}".format(name))
def get_default_vector(self):
vec = []
for name in self.get_param_names():
hyperparam = getattr(self.hyperparams, name)
if hasattr(hyperparam, 'default'):
vec.append(hyperparam.default)
else:
min_val = self.get_range(name)[0]
debug("No default value setting. Use {} as a minimum value of the range.".format(min_val))
vec.append(min_val)
return vec
def get_dict(self):
return self._dict
def convert(self, source_type, target_type, value):
if source_type == 'grid' and target_type == 'hpv_list':
return self.grid_to_hpv_list(value)
elif source_type == 'dict' and target_type == 'arr':
return self.dict_to_array(value, False)
elif source_type == 'dict' and target_type == 'norm_arr':
return self.dict_to_array(value, True)
elif source_type == 'arr' and target_type == 'norm_arr':
return self.arr_to_norm_vec(value)
elif source_type == 'arr' and target_type == 'list':
return self.arr_to_list(value)
elif source_type == 'hopt_dict' and target_type == 'dict':
return self.replace_cat_number(value)
elif source_type == 'arr' and target_type == 'dict':
return self.arr_to_dict(value)
elif target_type == 'one_hot':
return self.to_one_hot_vector(value)
else:
raise TypeError("Invalid type.")
def grid_to_hpv_list(self, grid_list):
hpvs = []
p_names = self.get_param_names()
# TODO:speeding up may be required using parallelization
for i in range(len(grid_list)):
g = grid_list[i]
hpv = []
for j in range(len(g)):
arg = self.unnormalize(p_names[j], g[j])
hpv.append(arg)
hpvs.append(hpv)
return hpvs
def dict_to_array(self, hp_dict, normalize):
arr = []
for p in self.get_param_names():
arr.append(hp_dict[p])
if normalize == True:
arr = self.arr_to_norm_vec(arr)
return arr
def arr_to_list(self, arr):
# arr to type-casted list
typed_list = []
p_list = self.get_param_names()
if len(p_list) != len(arr):
raise TypeError("Invalid hyperparameter vector: {}".format(arr))
for i in range(len(p_list)):
p = p_list[i]
t = self.get_type(p)
if t == 'int':
v = int(float(arr[i])) # FIX: float type string raises ValueError
else:
v = eval(t)(arr[i])
typed_list.append(v)
return typed_list
def arr_to_dict(self, arr):
# arr to type-casted dictionary
typed_dict = {}
p_list = self.get_param_names()
if len(p_list) != len(arr):
raise TypeError("Invalid hyperparameter vector: {}".format(arr))
for i in range(len(p_list)):
p = p_list[i]
t = self.get_type(p)
v = eval(t)(arr[i])
typed_dict[p] = v
return typed_dict
def to_one_hot_vector(self, vector):
one_hot = []
vector_dict = {}
p_list = self.get_param_names()
if isinstance(vector, dict):
vector_dict = vector
else:
vector = self.arr_to_list(vector)
for i in range(len(vector)):
k = p_list[i]
v = vector[i]
vector_dict[k] = v
t = OneHotVectorTransformer(self)
one_hot = t.transform(vector_dict)
return one_hot
def arr_to_norm_vec(self, vector):
value_types = []
ranges = []
types = []
for param in self.get_param_names():
value_types.append(self.get_value_type(param))
ranges.append(self.get_range(param))
types.append(self.get_type(param))
if isinstance(vector, dict):
vector = self.dict_to_list(vector)
vector = self.arr_to_list(vector)
p_list = self.get_param_names()
normalized = []
# min-max normalization
for i in range(0, len(vector)):
param_name = p_list[i]
value_type = value_types[i]
type = types[i]
value = vector[i]
param_range = ranges[i]
if value_type != 'categorical' and value_type != 'preordered':
max_val = param_range[-1]
min_val = param_range[0]
denominator = max_val - min_val
numerator = float(value) - min_val
normalized.append(float(numerator) / float(denominator))
else:
#debug("Categorical/preordered type in config: {}({})".format(param_name, value_type))
n_v = float(param_range.index(value)) / float(len(param_range))
normalized.append(n_v)
return np.array(normalized)
def get_nearby_index(self, candidates, hpv, params):
vec = params
p_list = self.get_param_names()
if type(params) == dict:
vector_list = []
for i in range(len(p_list)):
vector_list.append(params[p_list[i]])
vec =
|
np.array(vector_list)
|
numpy.array
|
"""
Implementation of MOSES SVD (2nd algorithm)
arXiv:1806.01304v3
"""
import numpy as np
# import scipy.linalg as spLinalg
from numba import jit
@jit(nopython=True)
def gramSchmidt_T(A):
"""
Applies the Gram-Schmidt method to A
and returns Q and R, so Q*R = A.
"""
R = np.zeros((A.shape[1], A.shape[1]), dtype=A.dtype)
Q = np.zeros(A.shape, dtype=A.dtype)
for k in range(0, A.shape[1]):
R[k, k] = np.sqrt(np.dot(A[:, k], A[:, k]))
Q[:, k] = A[:, k]/R[k, k]
for j in range(k+1, A.shape[1]):
R[k, j] = np.dot(Q[:, k], A[:, j])
A[:, j] = A[:, j] - R[k, j]*Q[:, k]
return Q, R
class MOSESSVD():
def __init__(self, rank, dtype=np.complex64):
"""
Implementation of the MOSES SVD algorithm (2nd algorithm)
arXiv:1806.01304v3
Parameters
----------
rank : int
The rank for the SVD. The first r largest singular values are
calculated.
dtype : numpy.dtype, optional
The desired datatype. The default is np.complex64.
Returns
-------
None.
"""
self.X = None
self.rank = rank
self.S = None
self.Gamma = None
self.Q = None
self.first_iter = True
self.dtype = dtype
# @profile
def update(self, x):
"""
Execute one iteration of the MOSES SVD algorithm.
Parameters
----------
x : numpy.ndarray
Chunk of input data that is added to the SVD.
Returns
-------
None.
"""
if self.first_iter:
self.S, self.Gamma, self.Q = self._update_first(x, self.rank, self.dtype)
self.first_iter = False
else:
self.S, self.Gamma, self.Q = self._update(x, self.S, self.Gamma, self.Q, self.rank, self.dtype)
# @profile
@staticmethod
@jit(nopython=True)
# def _update_first(self, x, r, dtype):
def _update_first(x, r, dtype):
"""
Initialize the algorithm with a regular SVD.
Parameters
----------
x : numpy.ndarray
Chunk of input data that is added to the SVD.
r : int
The rank for the SVD. The first r largest singular values are
calculated.
dtype : numpy.dtype
The desired datatype.
Returns
-------
S : numpy.ndarray
U matrix.
Gamma : numpy.ndarray
Sigma matrix.
Q : numpy.ndarray
V matrix.
"""
S, Gamma, Q = np.linalg.svd(x, full_matrices=False)
Q = Q.conj().T
Gamma = np.diag(Gamma)
S = S[:, :r].astype(dtype)
Gamma = Gamma[:r, :r].astype(dtype)
Q = Q[:, :r].astype(dtype)
return S, Gamma, Q
# @profile
@staticmethod
@jit(nopython=True)
# def _update(self, x, S, Gamma, Q, r, dtype):
def _update(x, S, Gamma, Q, r, dtype):
"""
The main loop of MOSES SVD. It iteratively updates U, s, V, by taking
in data in chunks.
Parameters
----------
x : numpy.ndarray
Chunk of input data that is added to the SVD.
S : numpy.ndarray
U matrix.
Gamma : numpy.ndarray
Sigma matrix.
Q : numpy.ndarray
V matrix.
r : int
The rank for the SVD. The first r largest singular values are
calculated.
dtype : numpy.dtype
The desired datatype.
Returns
-------
S : numpy.ndarray
U matrix.
Gamma : numpy.ndarray
Sigma matrix.
Q : numpy.ndarray
V matrix.
"""
# n = x.shape[0]
b = x.shape[1]
qq = S.conj().T.dot(x)
z = x - S.dot(qq)
ss, v = np.linalg.qr(z)
ss = ss.astype(dtype)
v = v.astype(dtype)
M_1 = np.hstack((Gamma, qq))
M_2 = np.hstack((np.zeros((b, r)).astype(dtype), v))
M = np.vstack((M_1, M_2))
u, Gamma, q_h = np.linalg.svd(M, full_matrices=False)
u = u[:, :r].astype(dtype)
Gamma = np.diag(Gamma)[:r, :r].astype(dtype)
q_h = q_h.conj().T[:, :r].astype(dtype)
S = np.hstack((S, ss)).dot(u)
S = S[:, :r]
Q_1 = np.hstack((Q, np.zeros((Q.shape[0], b)))).astype(dtype)
Q_2 = np.hstack((np.zeros((b, Q.shape[1])),
|
np.eye(b)
|
numpy.eye
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from drl_hw1.utils.gym_env import EnvSpec
from torch.utils.data import TensorDataset, DataLoader
class MLPBaseline:
def __init__(self, env_spec: EnvSpec, hidden_sizes=(64,64), learning_rate=1e-4, epoch=10, batch=10, seed=None):
self.feature_size = env_spec.observation_dim + 4
self.loss_fn = nn.MSELoss(size_average=False)
self.learning_rate = learning_rate
self.hidden_sizes = hidden_sizes
self.epoch = epoch
# torch.manual_seed(seed)
self.batch = batch
self.model = nn.Sequential()
self.model.add_module('fc_0', nn.Linear(self.feature_size, self.hidden_sizes[0]))
self.model.add_module('tanh_0', nn.Tanh())
self.model.add_module('fc_1', nn.Linear(self.hidden_sizes[0], self.hidden_sizes[1]))
self.model.add_module('tanh_1', nn.Tanh())
self.model.add_module('fc_2', nn.Linear(self.hidden_sizes[1], 1))
def _features(self, path):
# compute regression features for the path
o = np.clip(path["observations"], -10, 10)
if o.ndim > 2:
o = o.reshape(o.shape[0], -1)
l = len(path["rewards"])
al = np.arange(l).reshape(-1, 1) / 1000.0
feat = np.concatenate([o, al, al**2, al**3, np.ones((l, 1))], axis=1)
return feat
def fit(self, paths, return_errors=False):
featmat = np.concatenate([self._features(path) for path in paths])
returns = np.concatenate([path["returns"] for path in paths])
dataset = TensorDataset(torch.FloatTensor(featmat), torch.FloatTensor(returns))
data_loader = DataLoader(dataset, batch_size=self.batch, shuffle=True)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
if return_errors:
error_before = self.get_error(data_loader)
for _ in range(self.epoch):
for batch_idx, (data, target) in enumerate(data_loader):
data = Variable(data)
target = Variable(target).float()
predictions = self.model(data)
loss = self.loss_fn(predictions, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if return_errors:
error_after = self.get_error(data_loader)
return error_before/np.sum(returns**2), error_after/
|
np.sum(returns**2)
|
numpy.sum
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
import mujoco_py
from mujoco_py.mjlib import mjlib
from PIL import Image
class PusherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, xml_file=None, distractors=False):
utils.EzPickle.__init__(self)
print(xml_file)
if xml_file is None:
xml_file = 'pusher.xml'
self.include_distractors = distractors
mujoco_env.MujocoEnv.__init__(self, xml_file, 5)
def get_current_obs(self):
return self._get_obs()
def step(self, action):
return self._step(a=action)
def _step(self, a):
# normalize actions
if self.action_space is not None:
lb, ub = self.action_space.low, self.action_space.high
a = lb + (a + 1.) * 0.5 * (ub - lb)
a = np.clip(a, lb, ub)
vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object") - self.get_body_com("goal")
reward_near = -
|
np.linalg.norm(vec_1)
|
numpy.linalg.norm
|
from contextlib import contextmanager
import numpy as np
import pytest
from pymatgen.electronic_structure.core import Spin
from amset.util import (
cast_dict_list,
cast_dict_ndarray,
cast_elastic_tensor,
cast_piezoelectric_tensor,
cast_tensor,
get_progress_bar,
groupby,
parse_deformation_potential,
parse_doping,
parse_ibands,
parse_temperatures,
tensor_average,
validate_settings,
)
@contextmanager
def does_not_raise():
yield
@pytest.mark.parametrize(
"tensor,expected",
[
pytest.param([[3, 0, 0], [0, 4, 0], [0, 0, 5]], 4, id="diagonal"),
pytest.param([[0, 3, 3], [3, 3, 4], [3, 4, 3]], 2, id="off-diagonal"),
],
)
def test_tensor_average(tensor, expected):
assert tensor_average(tensor) == expected
@pytest.mark.parametrize(
"elements, groups, expected",
[
pytest.param(
["a", "b", "1", "2", "c", "d"],
[2, 0, 1, 2, 0, 0],
[["b", "c", "d"], ["1"], ["a", "2"]],
id="mixed",
),
pytest.param(
[[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]],
[2, 2, 1, 0, 0],
[[[3, 3, 3], [4, 4, 4]], [[2, 2, 2]], [[0, 0, 0], [1, 1, 1]]],
id="coords",
),
],
)
def test_groupby(elements, groups, expected):
elements = ["a", "b", "1", "2", "c", "d"]
groups = [2, 0, 1, 2, 0, 0]
expected_output = [["b", "c", "d"], ["1"], ["a", "2"]]
output = groupby(elements, groups)
output = [x.tolist() for x in output]
assert output == expected_output
_expected_elastic = [
[
[[3.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 1.5, 0.0], [1.5, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 1.5], [0.0, 0.0, 0.0], [1.5, 0.0, 0.0]],
],
[
[[0.0, 1.5, 0.0], [1.5, 0.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 3.0, 0.0], [0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.5], [0.0, 1.5, 0.0]],
],
[
[[0.0, 0.0, 1.5], [0.0, 0.0, 0.0], [1.5, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 1.5], [0.0, 1.5, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
],
]
_elastic_voigt = [
[3, 0, 0, 0, 0, 0],
[0, 3, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0],
[0, 0, 0, 1.5, 0, 0],
[0, 0, 0, 0, 1.5, 0],
[0, 0, 0, 0, 0, 1.5],
]
_expected_piezo = [
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0084], [0.0, 0.0084, 0.0]],
[[0.0, 0.0, 0.0084], [0.0, 0.0, 0.0], [0.0084, 0.0, 0.0]],
[[0.0, 0.0084, 0.0], [0.0084, 0.0, 0.0], [0.0, 0.0, 0.0]],
]
_piezo_voigt = [
[0, 0, 0, 0.0084, 0, 0],
[0, 0, 0, 0, 0.0084, 0],
[0, 0, 0, 0, 0, 0.0084],
]
@pytest.mark.parametrize(
"settings,expected",
[
pytest.param(
{},
{
"scattering_type": "auto",
"temperatures": np.array([300]),
"calculate_mobility": True,
"separate_mobility": True,
"mobility_rates_only": False,
},
id="empty",
),
pytest.param(
{"doping": "1E16:1E20:5", "temperatures": "100:500:5"},
{
"doping": np.array([1e16, 1e17, 1e18, 1e19, 1e20]),
"temperatures":
|
np.array([100, 200, 300, 400, 500])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dataclasses
from collections import defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, MaxPool2D, UpSampling2D, concatenate
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation
import cv2
# U-Net(エンコーダー8層、デコーダー8層)の構築
@dataclasses.dataclass
class CNN:
input_shape: tuple # 入力画像サイズ
classes: int # 分類クラス数
def __post_init__(self):
# 入力画像サイズは32の倍数でなければならない
assert self.input_shape[0]%32 == 0, 'Input size must be a multiple of 32.'
assert self.input_shape[1]%32 == 0, 'Input size must be a multiple of 32.'
# エンコーダーブロック
@staticmethod
def encoder(x, blocks, filters, pooling):
for i in range(blocks):
x = Conv2D(filters, (3, 3), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
if pooling:
return MaxPool2D(pool_size=(2, 2))(x), x
else:
return x
# デコーダーブロック
@staticmethod
def decoder(x1, x2, blocks, filters):
x = UpSampling2D(size=(2, 2))(x1)
x = concatenate([x, x2], axis=-1)
for i in range(blocks):
x = Conv2D(filters, (3, 3), padding='same', kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
return x
def create(self):
# エンコーダー
inputs = Input(shape=(self.input_shape[0], self.input_shape[1], 3)) # 入力層
x, x1 = self.encoder(inputs, blocks=1, filters=32, pooling=True) # 1層目
x, x2 = self.encoder(x, blocks=1, filters=64, pooling=True) # 2層目
x, x3 = self.encoder(x, blocks=1, filters=128, pooling=True) # 3層目
x, x4 = self.encoder(x, blocks=1, filters=256, pooling=True) # 4層目
x, x5 = self.encoder(x, blocks=2, filters=512, pooling=True) # 5、6層目
x = self.encoder(x, blocks=2, filters=1024, pooling=False) # 7、8層目
# デコーダー
x = self.encoder(x, blocks=1, filters=1024, pooling=False) # 1層目
x = self.decoder(x, x5, blocks=2, filters=512) # 2、3層目
x = self.decoder(x, x4, blocks=1, filters=256) # 4層目
x = self.decoder(x, x3, blocks=1, filters=128) # 5層目
x = self.decoder(x, x2, blocks=1, filters=64) # 6層目
## 7、8層目
x = UpSampling2D(size=(2, 2))(x)
x = concatenate([x, x1], axis=-1)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
x = Conv2D(self.classes, (1, 1), strides=(1, 1), padding='same', kernel_initializer='he_normal')(x)
outputs = Activation('softmax')(x)
return Model(inputs=inputs, outputs=outputs)
def main():
directory = 'CaDIS' # 画像が保存されているフォルダ
df_test = pd.read_csv('test.csv') # テストデータの情報がかかれたDataFrame
image_size = (224, 224) # 入力画像サイズ
classes = 36 # 分類クラス数
# ネットワーク構築
model = CNN(input_shape=image_size, classes=classes).create()
model.summary()
model.load_weights('model_weights.h5')
# 推論
dict_iou = defaultdict(list)
for i in tqdm(range(len(df_test)), desc='predict'):
img = cv2.imread(f'{directory}/{df_test.at[i, "filename"]}')
height, width = img.shape[:2]
img = cv2.resize(img, image_size, interpolation=cv2.INTER_LANCZOS4)
img = np.array(img, dtype=np.float32)
img *= 1./255
img = np.expand_dims(img, axis=0)
label = cv2.imread(f'{directory}/{df_test.at[i, "label"]}', cv2.IMREAD_GRAYSCALE)
pred = model.predict(img)[0]
pred = cv2.resize(pred, (width, height), interpolation=cv2.INTER_LANCZOS4)
## IoUの計算
pred =
|
np.argmax(pred, axis=2)
|
numpy.argmax
|
import gurobipy as gp
from gurobipy import GRB
import numpy as np
from scipy.optimize import linprog
def opt_plan_discrete_multiasset(values11,prob11,values12,prob12,values21,prob21,values22,prob22,func,prob1=None,
prob2=None,onedim=True,minimize=True,schmithals=False,correlation_1=False,
corr_1=0.5,correlation_2=False,corr_2=0.5,
basket_prices = None,basket_strikes=None,basket_time = 1,
same_correlation = False, increasing_prices = False,
proportional = False,prop_constant_lower = 0.02,prop_constant_upper = 0.4, t_1 = 1, t_2 =2,prop_range = 0.005,
q_corr_greater_p = False, q_corr_greater_p_const = 0,
copula_indices = [], copula_strikes=[], copula_prices=[],
martingale_condition = True
):
n11 = len(values11) # Length of the vector with the values of the first marginal, 1st security
n21 = len(values21) # Length of the vector with the values of the 2nd marginal, 1st security
n12 = len(values12) # Length of the vector with the values of the first marginal, 2nd security
n22 = len(values22) # Length of the vector with the values of the 2nd marginal, 2nd security
# Conversion to np arrays:
values11 = np.array(values11)
prob11 = np.array(prob11)
values12 = np.array(values12)
prob12 = np.array(prob12)
values21 = np.array(values21)
prob21 = np.array(prob21)
values22 = np.array(values22)
prob22 = np.array(prob22)
prob1 = np.array(prob1)
prob2 = np.array(prob2)
# Sclaing the probabilities
prob11 = prob11 / np.sum(prob11)
prob12 = prob12 / np.sum(prob12)
prob21 = prob21 / np.sum(prob21)
prob22 = prob22 / np.sum(prob22)
# Scaling to the same mean
mean11 = np.sum(values11*prob11)
mean12 = np.sum(values12*prob12)
mean21 = np.sum(values21*prob21)
mean22 = np.sum(values22*prob22)
values11 = values11 + 0.5*(mean21-mean11)
values21 = values21 + 0.5*(mean11-mean21)
values12 = values12 + 0.5*(mean22-mean12)
values22 = values22 + 0.5*(mean12-mean22)
# Initiate the Gurobi Model
m = gp.Model("m")
# No Output
m.setParam( 'OutputFlag', False )
# The measure variable
x = m.addMVar(shape=np.int(n11*n12*n21*n22),lb = 0, ub = 1, vtype=GRB.CONTINUOUS, name="x")
if correlation_1:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
second_moment_1 = np.sum(values11**2*prob11)
second_moment_2 = np.sum(values12**2*prob12)
#r = np.concatenate((r,corr_1*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2))
if correlation_2:
S_0_1 = np.sum(values21*prob21)
S_0_2 = np.sum(values22*prob22)
second_moment_1 = np.sum(values21**2*prob21)
second_moment_2 = np.sum(values22**2*prob22)
#r = np.concatenate((r,corr_2*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2))
if martingale_condition:
if schmithals:
for i in range(n11):
a = np.zeros((n11,n12,n21,n22))
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values21[k]-values11[i]
#A[i,:] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == 0)
for j in range(n12):
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values22[l]-values12[j]
#A[n11*n12+j,:] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == 0)
elif schmithals == False:
for i in range(n11):
for j in range(n12):
a = np.zeros((n11,n12,n21,n22))
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values21[k]-values11[i]
#A[i+(j)*(n11),:] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == 0)
for j in range(n12):
for i in range(n11):
a = np.zeros((n11,n12,n21,n22))
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values22[l]-values12[j]
#A[n11*n12+i+(j)*(n11),:] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == 0)
# marginal Constraints
for i in range(n11):
a = np.zeros((n11,n12,n21,n22))
a[i,:,:,:] = 1
#A[2*n11*n12+i,] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob11[i])
for i in range(n12):
a = np.zeros((n11,n12,n21,n22))
a[:,i,:,:] = 1
#A[2*n11*n12+n11+i,] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob12[i])
for i in range(n21):
a = np.zeros((n11,n12,n21,n22))
a[:,:,i,:] = 1
#A[2*n11*n12+n11+n12+i,] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob21[i])
for i in range(n22):
a = np.zeros((n11,n12,n21,n22))
a[:,:,:,i] = 1
#A[2*n11*n12+n11+n12+n21+i,] = np.reshape(a, n11*n12*n21*n22)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob22[i])
# Additional Constraints
if onedim == False:
for i in range(n11):
for j in range(n12):
a = np.zeros((n11,n12,n21,n22))
a[i,j,:,:] = 1
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob1[i+j*n11])
#A[2*n11*n12+n11+n12+n21+n22+i+(j)*n11,:] = np.reshape(a, n11*n12*n21*n22)
for i in range(n21):
for j in range(n22):
a = np.zeros((n11,n12,n21,n22))
a[:,:,i,j] = 1
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == prob2[i+j*n21])
#A[2*n11*n12+n11+n12+n21+n22+n11*n12+i+(j)*n21,:] = np.reshape(a, n11*n12*n21*n22)
if correlation_1:
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values11[i]*values12[j]
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == corr_1*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2)
#A = np.vstack([A, np.reshape(a, n11*n12*n21*n22)])
if correlation_2:
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = values21[k]*values22[l]
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == corr_2*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2)
#A = np.vstack([A, np.reshape(a, n11*n12*n21*n22)])
# Directly incorporate Basket Option Prices
if basket_prices != None and basket_strikes != None and basket_time == 1:
for p,s in zip(basket_prices,basket_strikes):
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = max(0.5*values11[i]+0.5*values12[j]-s,0)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == p)
if basket_prices != None and basket_strikes != None and basket_time == 2:
for p,s in zip(basket_prices,basket_strikes):
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = max(0.5*values21[k]+0.5*values22[l]-s,0)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == p)
# Assumption on same Correlation
if same_correlation:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
second_moment_11 = np.sum(values11**2*prob11)
second_moment_12 = np.sum(values12**2*prob12)
second_moment_21 = np.sum(values21**2*prob21)
second_moment_22 = np.sum(values22**2*prob22)
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = (values11[i]*values12[j]-S_0_1*S_0_2)/(np.sqrt(second_moment_11-S_0_1**2)*np.sqrt(second_moment_12-S_0_2**2))-(values21[k]*values22[l]-S_0_1*S_0_2)/(np.sqrt(second_moment_21-S_0_1**2)*np.sqrt(second_moment_22-S_0_2**2))
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == 0)
if increasing_prices:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
S_0 = S_0_1 + S_0_2
a = np.zeros((n11,n12,n21,n22))
for K in np.linspace(0,S_0*2,100):
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = max(values21[k]+values22[l]-K,0)-max(values11[i]+values12[j]-K,0)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x >= 0)
if proportional:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
S_0 = S_0_1 + S_0_2
a = np.zeros((n11,n12,n21,n22))
b = np.zeros((n11,n12,n21,n22))
for K in np.linspace(S_0*(1-prop_range),S_0*(1+prop_range),100):
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = max(values21[k]+values22[l]-K,0)
b[i,j,k,l] = max(values11[i]+values12[j]-K,0)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x >= prop_constant_lower*np.sqrt(t_2)*S_0)
m.addConstr(np.reshape(b, n11*n12*n21*n22) @ x >= prop_constant_lower*np.sqrt(t_1)*S_0)
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x <= prop_constant_upper*np.sqrt(t_2)*S_0)
m.addConstr(np.reshape(b, n11*n12*n21*n22) @ x <= prop_constant_upper*np.sqrt(t_1)*S_0)
if q_corr_greater_p:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
second_moment_11 = np.sum(values11**2*prob11)
second_moment_12 = np.sum(values12**2*prob12)
second_moment_21 = np.sum(values21**2*prob21)
second_moment_22 = np.sum(values22**2*prob22)
a = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = (values11[i]*values12[j]-S_0_1*S_0_2)/(np.sqrt(second_moment_11-S_0_1**2)*np.sqrt(second_moment_12-S_0_2**2))
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x >= q_corr_greater_p_const)
counter = 0
for ind in copula_indices:
# indicator for the indices
i11 = (ind[0]==1)*(ind[2]==1)+(ind[1]==1)*(ind[3]==1)
i12 = (ind[0]==1)*(ind[2]==2)+(ind[1]==1)*(ind[3]==2)
i21 = (ind[0]==2)*(ind[2]==1)+(ind[1]==2)*(ind[3]==1)
i22 = (ind[0]==2)*(ind[2]==2)+(ind[1]==2)*(ind[3]==2)
a = np.zeros((n11,n12,n21,n22))
for K in range(len(copula_strikes[counter])):
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
a[i,j,k,l] = (np.max([values11[i]*i11,values12[j]*i12,values21[k]*i21,values22[l]*i22]) <= copula_strikes[counter][K])
m.addConstr(np.reshape(a, n11*n12*n21*n22) @ x == copula_prices[counter][K])
counter+=1
costs = np.zeros((n11,n12,n21,n22))
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
costs[i,j,k,l] = func(values11[i],values12[j],values21[k],values22[l])
costs = np.reshape(costs,n11*n12*n21*n22)
#print(costs)
if minimize == True:
m.setObjective(costs @ x, GRB.MINIMIZE)
elif minimize == False:
m.setObjective(costs @ x, GRB.MAXIMIZE)
m.optimize()
price = m.objVal
q = [v.x for v in m.getVars()]
return price, q
def dual_multiasset_2dim(values11,prob11,values12,prob12,values21,prob21,values22,prob22,func,prob1=None,
prob2=None,onedim=True,minimize=True,schmithals=False,correlation_1=False,
corr_1=0.5,correlation_2=False,corr_2=0.5,
basket_prices = None,basket_strikes=None):
n11 = len(values11) # Length of the vector with the values of the first marginal, 1st security
n21 = len(values21) # Length of the vector with the values of the 2nd marginal, 1st security
n12 = len(values12) # Length of the vector with the values of the first marginal, 2nd security
n22 = len(values22) # Length of the vector with the values of the 2nd marginal, 2nd security
# Conversion to np.arrays:
values11 = np.array(values11)
prob11 = np.array(prob11)
values12 = np.array(values12)
prob12 = np.array(prob12)
values21 = np.array(values21)
prob21 = np.array(prob21)
values22 = np.array(values22)
prob22 = np.array(prob22)
prob1 = np.array(prob1)
prob2 = np.array(prob2)
costs = np.zeros((n11,n12,n21,n22))
if correlation_1:
S_0_1 = np.sum(values11*prob11)
S_0_2 = np.sum(values12*prob12)
second_moment_1 = np.sum(values11**2*prob11)
second_moment_2 = np.sum(values12**2*prob12)
#r = np.concatenate((r,corr_1*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2))
if correlation_2:
S_0_1 = np.sum(values21*prob21)
S_0_2 = np.sum(values22*prob22)
second_moment_1 = np.sum(values21**2*prob21)
second_moment_2 = np.sum(values22**2*prob22)
#r = np.concatenate((r,corr_2*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)+S_0_1*S_0_2))
# Defining Cost Function
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
costs[i,j,k,l] = func(values11[i],values12[j],values21[k],values22[l])
# Defining Length of the Variables
if schmithals:
length_of_trading_variables = n11+n12
elif schmithals == False:
length_of_trading_variables = 2*n11*n12
if correlation_1:
length_of_trading_variables+=1
if correlation_2:
length_of_trading_variables+=1
if onedim:
length_of_static_variables = n11+n12+n21+n22
elif onedim == False:
length_of_static_variables = n11*n12 + n21*n22
nr_of_variables = length_of_static_variables + length_of_trading_variables
# Gurobi Model
m = gp.Model("m")
m.setParam( 'OutputFlag', False )
x = m.addMVar(shape=nr_of_variables,lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name="x")
# Defining the Conditions
for i in range(n11):
for j in range(n12):
for k in range(n21):
for l in range(n22):
if onedim == True:
a1 = np.repeat(0,n11)
a1[i] = 1
a2 = np.repeat(0,n12)
a2[j] = 1
a3 = np.repeat(0,n21)
a3[k] = 1
a4 = np.repeat(0,n22)
a4[l] = 1
if schmithals:
lhs = np.concatenate((a1,a2,a3,a4,a1*(values21[k]-values11[i]),a2*(values22[l]-values12[j])))
elif schmithals == False:
a5= np.zeros((n11,n12))
a5[i,j] = 1
a5=np.reshape(a5,n11*n12)
lhs = np.concatenate((a1,a2,a3,a4,a5*(values21[k]-values11[i]),a5*(values22[l]-values12[j])))
if correlation_1:
add = np.reshape(values11[i]*values12[j]
-corr_1*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)-S_0_1*S_0_2,1)
lhs = np.concatenate((lhs,add))
if correlation_2:
add = np.reshape(values21[k]*values22[l]-corr_2*np.sqrt(second_moment_1-S_0_1**2)*np.sqrt(second_moment_2-S_0_2**2)-S_0_1*S_0_2,1)
lhs = np.concatenate((lhs,add))
if minimize == True:
m.addConstr(lhs @ x <= np.array(costs[i,j,k,l]))
elif minimize == False:
m.addConstr(lhs @ x >= np.array(costs[i,j,k,l]))
elif onedim == False:
a1 = np.repeat(0,n11)
a1[i] = 1
a2 = np.repeat(0,n12)
a2[j] = 1
a3 = np.zeros((n11,n12))
a3[i,j] = 1
a3 = np.reshape(a3,n11*n12)
a4 = np.zeros((n21,n22))
a4[k,l] = 1
a4 =
|
np.reshape(a4,n21*n22)
|
numpy.reshape
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import PowerNorm
from scipy.interpolate import interpn
from matplotlib import cm
from matplotlib.colors import Normalize
import seaborn as sns
import matplotlib
matplotlib.use('Tkagg')
import matplotlib as mpl
mpl.rc('font',**{'family':'sans-serif', 'serif':['Computer Modern Serif'],'sans-serif':['Helvetica'], 'size':15,'weight':'bold'})
mpl.rc('axes',**{'labelweight':'bold', 'linewidth':2.0})
mpl.rc('ytick',**{'major.pad':22, 'color':'k'})
mpl.rc('xtick',**{'major.pad':10,})
mpl.rc('mathtext',**{'default':'regular','fontset':'cm','bf':'monospace:bold'})
mpl.rc('text', **{'usetex':True})
mpl.rc('text.latex',preamble=r'\usepackage{cmbright},\usepackage{relsize},'+r'\usepackage{upgreek}, \usepackage{amsmath}')
mpl.rc('contour', **{'negative_linestyle':'solid'})
def CustomPlot(x , y, ax = None, sort = True, bins=50, **kwargs ) :
"""
Scatter plot colored by 2d histogram
"""
if ax is None :
fig , ax = plt.subplots()
data , x_e, y_e = np.histogram2d( x, y, bins = bins, density = True )
z = interpn( ( 0.5*(x_e[1:] + x_e[:-1]) , 0.5*(y_e[1:]+y_e[:-1]) ) , data , np.vstack([x,y]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
z[np.where(np.isnan(z))] = 0.0
# Sort the points by density, so that the densest points are plotted last
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
fig,ax = plt.subplots(figsize=(12,10))
ax.scatter(x, y, c=z, s=2)
norm = Normalize(vmin = np.min(z), vmax = np.max(z))
#cbar = fig.colorbar(cm.ScalarMappable(norm = norm), ax=ax)
#cbar.ax.set_ylabel('Density')
return ax
def FormatLabel(LabelsX):
LabelsX = np.array(LabelsX)
Diff=np.mean(np.diff(LabelsX))
if min(abs(LabelsX))<10.0 and min(abs(LabelsX))>0.001:
LabelStr = ["%.2f" %(Value) for Value in LabelsX]
elif min(abs(LabelsX))<1000 and min(abs(LabelsX))>1:
LabelStr = ["%.0d" %(Value) for Value in LabelsX]
else:
LabelStr = ["%.2e" %(Value) for Value in LabelsX]
return LabelStr
def CustomCornerPlot(Data, Parameters, Values=None):
NDim = len(Data)
FigDim = NDim*2.5
if len(Parameters)>0:
try:
assert len(Parameters) == NDim
except:
raise("The number of should match the dimension of the data provided.")
fig, ax = plt.subplots(NDim, NDim, figsize=(FigDim, FigDim), dpi=80)
for i in range(NDim):
for j in range(NDim):
if j<i:
NBins = 10
counts,xbins,ybins=np.histogram2d(Data[i,:], Data[j,:],bins=NBins)
Levels = np.percentile(counts,[68.27,95.45,99.73])
#labels = np.round(np.linspace(min(xbins), max(xbins),6),2)
#good options for colormap are gist_earth_r, gray_r
ax[i,j].hist2d(Data[i,:], Data[j,:], orientation='horizontal', cmap='Reds', bins = 2*NBins)#, norm=PowerNorm(gamma=0.5))
ax[i,j].contour(counts.transpose(),Levels,extent=[xbins.min(),xbins.max(),
ybins.min(),ybins.max()],linewidths=2,cmap="Reds",
linestyles='-')
if Values:
ax[i,j].plot(Values[i],Values[j], "r+", markersize=30, markeredgewidth=3)
#Format the labels
NumLabels = 5
StepSizeX = (max(xbins) - min(xbins))/NumLabels
StepSizeY = (max(ybins) - min(ybins))/NumLabels
LabelsX = np.linspace(min(xbins)+StepSizeX, max(xbins)-StepSizeX, NumLabels)
LabelsXStr = FormatLabel(LabelsX)
LabelsY = np.linspace(min(ybins)+StepSizeY, max(ybins)-StepSizeY, NumLabels)
LabelsYStr = FormatLabel(LabelsX)
ax[i,j].set_xticks(LabelsX)
ax[i,j].set_xticklabels(LabelsXStr, rotation=45)
ax[i,j].set_xlim(min(xbins), max(xbins))
ax[i,j].set_yticks(LabelsY)
ax[i,j].set_yticklabels(LabelsYStr, rotation=45)
ax[i,j].set_ylim(min(ybins), max(ybins))
ax[i,j].tick_params(which="both", direction="in", pad=5)
elif i==j:
print("The value of i is::", i)
print("The value of j is::", j)
print("The shape of data is given by::", np.shape(Data))
print("The length of the data is given by::", len(Data[i, :]))
ax[i,j].hist(Data[i,:], fill=False, histtype='step', linewidth=2, color="navy", normed=True)
PercentileValues = np.percentile(Data[i,:],[15.8, 50.0, 84.2])
for counter_pc, Value in enumerate(PercentileValues):
if counter_pc == 1:
ax[i,j].axvline(x=Value, color="red", lw=1.5)
else:
ax[i,j].axvline(x=Value, color="cyan", linestyle="--", lw=2.5)
#assign the title
Median = PercentileValues[1]
if Median<100 and Median>0.001:
MedianStr = "%0.2f" %Median
else:
MedianStr = "%0.2e" %Median
UpperError = PercentileValues[2] - PercentileValues[1]
if UpperError<100 and UpperError>0.001:
UpperErrorStr = "%0.2f" %UpperError
else:
UpperErrorStr = "%0.2e" %UpperError
LowerError = PercentileValues[1] - PercentileValues[0]
if LowerError<100 and LowerError>0.001:
LowerErrorStr = "%0.2f" %LowerError
else:
LowerErrorStr = "%0.2e" %LowerError
Title = Parameters[i]+ " = %s$^{+%s}_{-%s}$" %(MedianStr, UpperErrorStr, LowerErrorStr)
print(Title)
ax[i,j].set_title(Title)
ax[i,j].tick_params(which="both", direction="in", pad=5)
else:
ax[i,j].set_visible(False)
ax[i,j].tick_params(which="both", direction="in", pad=5)
#Now for the ylabels
if j!=0 or i==j:
ax[i,j].set_yticklabels([])
#Now for the xlabels
if i!=NDim-1 or i==j:
ax[i,j].set_xticklabels([])
#assign the title
#
plt.subplots_adjust(wspace=0.025, hspace=0.025, left = 0.05,
right = 0.95, bottom = 0.05, top = 0.95)
plt.savefig("Trial.png")
plt.savefig("Trial.pdf", format='pdf')
plt.show()
def DoubleCustomCornerPlot(Data1, Data2, Parameters, Values=None, CMapList=None, colorList=None, SaveName=None):
if not(CMapList):
CMap = ["Reds", "Blues"]
colorList = ["red", "blue"]
else:
CMap = CMapList
colorList = colorList
NDim = len(Data1)
assert len(Data2) == NDim
FigDim = NDim*2.5
if len(Parameters)>0:
try:
assert len(Parameters) == NDim
except:
raise("The number of should match the dimension of the data provided.")
fig, ax = plt.subplots(NDim, NDim, figsize=(FigDim, FigDim), dpi=80)
for i in range(NDim):
for j in range(NDim):
if j<i:
NBins = 10
#counts1,xbins1,ybins1=np.histogram2d(Data1[i,:], Data1[j,:],bins=NBins)
#counts2,xbins2,ybins2=np.histogram2d(Data2[i,:], Data2[j,:],bins=NBins)
x1 = Data1[i,:]
y1 = Data1[j,:]
x2 = Data2[i,:]
y2 = Data2[j,:]
#For the first set of data
data1, x_e1, y_e1 = np.histogram2d(x1, y1, bins=NBins, density=True )
z1 = interpn( ( 0.5*(x_e1[1:] + x_e1[:-1]) , 0.5*(y_e1[1:]+y_e1[:-1]) ) , data1, np.vstack([x1,y1]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
z1[np.where(np.isnan(z1))] = 0.0
# Sort the points by density, so that the densest points are plotted last
idx1 = z1.argsort()
x1, y1, z1 = x1[idx1], y1[idx1], z1[idx1]
#For the second set of data
data2, x_e2, y_e2 = np.histogram2d(x2, y2, bins=NBins, density=True )
z2 = interpn( ( 0.5*(x_e2[1:] + x_e2[:-1]) , 0.5*(y_e2[1:]+y_e2[:-1]) ) , data2, np.vstack([x2,y2]).T , method = "splinef2d", bounds_error = False)
#To be sure to plot all data
z2[np.where(np.isnan(z2))] = 0.0
# Sort the points by density, so that the densest points are plotted last
idx2 = z2.argsort()
x2, y2, z2 = x2[idx2], y2[idx2], z2[idx2]
#Levels1 = np.percentile(counts1,[68.27,95.45,99.73])
#Levels2 = np.percentile(counts2,[68.27,95.45,99.73])
#good options for colormap are gist_earth_r, gray_r
#norm = Normalize(vmin = np.min(z), vmax = np.max(z))
ax[i,j].scatter(y1, x1, c=z1, s=2, cmap=CMap[0], alpha=0.1)
ax[i,j].scatter(y2, x2, c=z2, s=2, cmap=CMap[1], alpha=0.1)
#ax[i,j].contourf((y1, x1), z1)
#ax[i,j].contourf((y2, x2), z2)
if Values:
ax[i,j].plot(Values[j], Values[i], "k+", lw=2, markersize=50, markeredgewidth=3)
elif i==j:
ax[i,j].hist(Data1[i,:], fill=False, histtype='step', linewidth=2, color=colorList[0], normed=True)
ax[i,j].hist(Data2[i,:], fill=False, histtype='step', linewidth=2, color=colorList[1], normed=True)
PercentileValues1 = np.percentile(Data1[i,:],[15.8, 50.0, 84.2])
PercentileValues2 = np.percentile(Data2[i,:],[15.8, 50.0, 84.2])
for counter_pc in range(len(PercentileValues1)):
Value1 = PercentileValues1[counter_pc]
Value2 = PercentileValues2[counter_pc]
if counter_pc == 1:
ax[i,j].axvline(x=Value1, color=colorList[0], linestyle=":", lw=1.5, alpha=0.90)
ax[i,j].axvline(x=Value2, color=colorList[1], linestyle=":", lw=1.5, alpha=0.90)
if Values:
ax[i,j].axvline(Values[i],Values[j], color="black", lw=4, markersize=100)
else:
ax[i,j].axvline(x=Value1, color=colorList[0], linestyle="--", alpha=0.5, lw=2.5)
ax[i,j].axvline(x=Value2, color=colorList[1], linestyle="--", alpha=0.5, lw=2.5)
#assign the title
Median = PercentileValues1[1]
if np.abs(Median)<500 and np.abs(Median)>0.001:
MedianStr = "%0.2f" %Median
else:
MedianStr = "%0.2e" %Median
UpperError = PercentileValues1[2] - PercentileValues1[1]
if np.abs(UpperError)<100 and
|
np.abs(UpperError)
|
numpy.abs
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
# Copyright 2021 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file was modified by <NAME> in 2021
"""Utility functions that are useful for the different metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import sklearn
from sklearn import ensemble
from sklearn import linear_model
from sklearn import model_selection
import gin.tf
def generate_batch_factor_code(ground_truth_data, representation_function,
num_points, random_state, batch_size):
"""Sample a single training sample based on a mini-batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observation as input and
outputs a representation.
num_points: Number of points to sample.
random_state: Numpy random state used for randomness.
batch_size: Batchsize to sample points.
Returns:
representations: Codes (num_codes, num_points)-np array.
factors: Factors generating the codes (num_factors, num_points)-np array.
"""
representations = None
factors = None
i = 0
while i < num_points:
num_points_iter = min(num_points - i, batch_size)
current_factors, current_observations = \
ground_truth_data.sample(num_points_iter, random_state)
if i == 0:
factors = current_factors
representations = representation_function(current_observations)
else:
factors = np.vstack((factors, current_factors))
representations = np.vstack((representations,
representation_function(
current_observations)))
i += num_points_iter
return np.transpose(representations), np.transpose(factors)
def split_train_test(observations, train_percentage):
"""Splits observations into a train and test set.
Args:
observations: Observations to split in train and test. They can be the
representation or the observed factors of variation. The shape is
(num_dimensions, num_points) and the split is over the points.
train_percentage: Fraction of observations to be used for training.
Returns:
observations_train: Observations to be used for training.
observations_test: Observations to be used for testing.
"""
num_labelled_samples = observations.shape[1]
num_labelled_samples_train = int(
np.ceil(num_labelled_samples * train_percentage))
num_labelled_samples_test = num_labelled_samples - num_labelled_samples_train
observations_train = observations[:, :num_labelled_samples_train]
observations_test = observations[:, num_labelled_samples_train:]
assert observations_test.shape[1] == num_labelled_samples_test, \
"Wrong size of the test set."
return observations_train, observations_test
def obtain_representation(observations, representation_function, batch_size):
""""Obtain representations from observations.
Args:
observations: Observations for which we compute the representation.
representation_function: Function that takes observation as input and
outputs a representation.
batch_size: Batch size to compute the representation.
Returns:
representations: Codes (num_codes, num_points)-Numpy array.
"""
representations = None
num_points = observations.shape[0]
i = 0
while i < num_points:
num_points_iter = min(num_points - i, batch_size)
current_observations = observations[i:i + num_points_iter]
if i == 0:
representations = representation_function(current_observations)
else:
representations = np.vstack((representations,
representation_function(
current_observations)))
i += num_points_iter
return np.transpose(representations)
def discrete_mutual_info(mus, ys):
"""Compute discrete mutual information."""
num_codes = mus.shape[0]
num_factors = ys.shape[0]
m = np.zeros([num_codes, num_factors])
for i in range(num_codes):
for j in range(num_factors):
m[i, j] = sklearn.metrics.mutual_info_score(ys[j, :], mus[i, :])
return m
def discrete_entropy(ys):
"""Compute discrete mutual information."""
num_factors = ys.shape[0]
h = np.zeros(num_factors)
for j in range(num_factors):
h[j] = sklearn.metrics.mutual_info_score(ys[j, :], ys[j, :])
return h
@gin.configurable(
"discretizer", denylist=["target"])
def make_discretizer(target, num_bins=gin.REQUIRED,
discretizer_fn=gin.REQUIRED):
"""Wrapper that creates discretizers."""
return discretizer_fn(target, num_bins)
@gin.configurable("histogram_discretizer", denylist=["target"])
def _histogram_discretize(target, num_bins=gin.REQUIRED):
"""Discretization based on histograms."""
discretized =
|
np.zeros_like(target)
|
numpy.zeros_like
|
import numpy as np
from numpy import zeros, arange, searchsorted, cross
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.field_writer_16 import print_card_16
from pyNastran.bdf.bdf_interface.assign_type import (integer, integer_or_blank,
double_or_blank)
from pyNastran.dev.bdf_vectorized.cards.vectorized_card import VectorizedCard
class CAERO1(VectorizedCard):
"""
Defines an aerodynamic macro element (panel) in terms of two leading edge
locations and side chords. This is used for Doublet-Lattice theory for
subsonic aerodynamics and the ZONA51 theory for supersonic aerodynamics.::
+--------+-----+-----+----+-------+--------+--------+--------+------+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+--------+-----+-----+----+-------+--------+--------+--------+------+
| CAERO1 | EID | PID | CP | NSPAN | NCHORD | LSPAN | LCHORD | IGID |
+--------+-----+-----+----+-------+--------+--------+--------+------+
| | X1 | Y1 | Z1 | X12 | X4 | Y4 | Z4 | X43 |
+--------+-----+-----+----+-------+--------+--------+--------+------+
"""
type = 'CAERO1'
def __init__(self, model):
"""
::
1
| \
| \
| \
| 4
| |
| |
2------3
"""
VectorizedCard.__init__(self, model)
def add_card(self, card, comment=''):
i = self.i
self.element_id[i] = integer(card, 1, 'element_id')
self.property_id[i] = integer(card, 2, 'property_id')
self.coord_id[i] = integer_or_blank(card, 3, 'cp', 0)
self.nspan[i] = integer_or_blank(card, 4, 'nspan', 0)
self.nchord[i] = integer_or_blank(card, 5, 'nchord', 0)
#if self.nspan==0:
self.lspan[i] = integer_or_blank(card, 6, 'lspan', 0)
#if self.nchord==0:
self.lchord[i] = integer_or_blank(card, 7, 'lchord', 0)
self.igid[i] = integer(card, 8, 'igid')
self.p1[i, :] = [double_or_blank(card, 9, 'x1', 0.0),
double_or_blank(card, 10, 'y1', 0.0),
double_or_blank(card, 11, 'z1', 0.0)]
self.x12[i] = double_or_blank(card, 12, 'x12', 0.)
self.p4[i, :] = [double_or_blank(card, 13, 'x4', 0.0),
double_or_blank(card, 14, 'y4', 0.0),
double_or_blank(card, 15, 'z4', 0.0)]
self.x43[i] = double_or_blank(card, 16, 'x43', 0.)
assert len(card) <= 17, 'len(CAERO1 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def allocate(self, ncards):
self.n = ncards
float_fmt = self.model.float_fmt
#: Element ID
self.element_id = zeros(ncards, 'int32')
#: Property ID of a PAERO2
self.property_id = zeros(ncards, 'int32')
#: Coordinate system for locating point 1.
self.coord_id = zeros(ncards, 'int32')
#: Node IDs
self.node_ids =
|
zeros((ncards, 4), 'int32')
|
numpy.zeros
|
#!/usr/bin/env python3
import sys
from itertools import combinations
import numpy as np
positions = []
velocities = []
for line in sys.stdin:
line = line.rstrip().replace('<', '').replace('>', '').split(',')
x = int(line[0].split('=')[1])
y = int(line[1].split('=')[1])
z = int(line[2].split('=')[1])
positions.append([x, y, z])
velocities.append([0, 0, 0])
pos = np.array(positions, dtype = np.int64)
vel = np.array(velocities, dtype = np.int64)
def gravity(ra, rb, va, vb):
sa =
|
np.sign(rb - ra)
|
numpy.sign
|
from comet_ml import Experiment
import comet_ml
import _pickle as pickle
import os
import time
import numpy as np
import shutil
import tensorflow as tf
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesLimit
import reader
from common import Common
import re
class Model:
topk = 10
num_batches_to_log = 100
def __init__(self, config):
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "0"
# tf.logging.set_verbosity(tf.logging.INFO)
tf.random.set_random_seed(1234)
self.experiment = Experiment(
api_key="<KEY>",
project_name="code2seq",
workspace="cspiess",
disabled=False
)
self.evaluation_counter = 0
self.config = config
self.sess = tf.Session() # TensorFlow
self.eval_queue = None
self.predict_queue = None
self.eval_placeholder = None
self.predict_placeholder = None
self.eval_predicted_indices_op, self.eval_top_values_op, self.eval_true_target_strings_op, self.eval_topk_values = (
None,
None,
None,
None,
)
self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op = (
None,
None,
None,
)
self.subtoken_to_index = None
if config.LOAD_PATH:
self.load_model(sess=None)
print("No loading")
else:
with open("{}.dict.c2s".format(config.TRAIN_PATH), "rb") as file:
subtoken_to_count = pickle.load(file)
node_to_count = pickle.load(file)
target_to_count = pickle.load(file)
max_contexts = pickle.load(file)
self.num_training_examples = pickle.load(file)
print("Dictionaries loaded.")
if self.config.DATA_NUM_CONTEXTS <= 0:
self.config.DATA_NUM_CONTEXTS = max_contexts
self.subtoken_to_index, self.index_to_subtoken, self.subtoken_vocab_size, _ = Common.load_vocab_from_dict(
subtoken_to_count,
add_values=[Common.PAD, Common.UNK],
max_size=config.SUBTOKENS_VOCAB_MAX_SIZE,
kind="subtoken"
)
print("Subtoken to count %d" % len(subtoken_to_count))
print("target to count %d" % len(target_to_count))
print("node to count %d" % len(node_to_count))
print("Loaded subtoken vocab. size: %d" % self.subtoken_vocab_size)
self.target_to_index, self.index_to_target, self.target_vocab_size, _ = Common.load_vocab_from_dict(
target_to_count,
add_values=[Common.PAD, Common.UNK, Common.SOS],
max_size=config.TARGET_VOCAB_MAX_SIZE,
kind="target"
)
print("Loaded target word vocab. size: %d" % self.target_vocab_size)
self.node_to_index, self.index_to_node, self.nodes_vocab_size, _ = Common.load_vocab_from_dict(
node_to_count, add_values=[Common.PAD, Common.UNK], max_size=None,
kind="node"
)
print("Loaded nodes vocab. size: %d" % self.nodes_vocab_size)
self.epochs_trained = 0
def close_session(self):
self.sess.close()
def train(self):
print("Starting training")
start_time = time.time()
batch_num = 0
sum_loss = 0
best_f1 = 0
best_epoch = 0
best_f1_precision = 0
best_f1_recall = 0
epochs_no_improve = 0
self.queue_thread = reader.Reader(
subtoken_to_index=self.subtoken_to_index,
node_to_index=self.node_to_index,
target_to_index=self.target_to_index,
config=self.config,
)
optimizer, train_loss = self.build_training_graph(
self.queue_thread.get_output()
)
self.print_hyperparams()
self.initialize_session_variables(self.sess)
print("Initalized variables")
if self.config.LOAD_PATH:
self.load_model(self.sess)
print("Started reader...")
multi_batch_start_time = time.time()
self.experiment.log_parameters(
{
"num_epochs": self.config.NUM_EPOCHS,
"batch_size": self.config.BATCH_SIZE,
"NUM_EPOCHS": self.config.NUM_EPOCHS,
"SAVE_EVERY_EPOCHS": self.config.SAVE_EVERY_EPOCHS,
"PATIENCE": self.config.PATIENCE,
"BATCH_SIZE": self.config.BATCH_SIZE,
"TEST_BATCH_SIZE": self.config.TEST_BATCH_SIZE,
"READER_NUM_PARALLEL_BATCHES": self.config.READER_NUM_PARALLEL_BATCHES,
"SHUFFLE_BUFFER_SIZE": self.config.SHUFFLE_BUFFER_SIZE,
"CSV_BUFFER_SIZE": self.config.CSV_BUFFER_SIZE,
"TRAIN_PATH": self.config.TRAIN_PATH,
"TEST_PATH": self.config.TEST_PATH,
"DATA_NUM_CONTEXTS": self.config.DATA_NUM_CONTEXTS,
"MAX_CONTEXTS": self.config.MAX_CONTEXTS,
"SUBTOKENS_VOCAB_MAX_SIZE": self.config.SUBTOKENS_VOCAB_MAX_SIZE,
"TARGET_VOCAB_MAX_SIZE": self.config.TARGET_VOCAB_MAX_SIZE,
"EMBEDDINGS_SIZE": self.config.EMBEDDINGS_SIZE,
"RNN_SIZE": self.config.RNN_SIZE,
"DECODER_SIZE": self.config.DECODER_SIZE,
"NUM_DECODER_LAYERS": self.config.NUM_DECODER_LAYERS,
"SAVE_PATH": self.config.SAVE_PATH,
"LOAD_PATH": self.config.LOAD_PATH,
"MAX_PATH_LENGTH": self.config.MAX_PATH_LENGTH,
"MAX_NAME_PARTS": self.config.MAX_NAME_PARTS,
"MAX_TARGET_PARTS": self.config.MAX_TARGET_PARTS,
"EMBEDDINGS_DROPOUT_KEEP_PROB": self.config.EMBEDDINGS_DROPOUT_KEEP_PROB,
"RNN_DROPOUT_KEEP_PROB": self.config.RNN_DROPOUT_KEEP_PROB,
"BIRNN": self.config.BIRNN,
"GRU": self.config.GRU,
"RANDOM_CONTEXTS": self.config.RANDOM_CONTEXTS,
"BEAM_WIDTH": self.config.BEAM_WIDTH,
"USE_MOMENTUM": self.config.USE_MOMENTUM,
"RELEASE": self.config.RELEASE,
"ATTENTION": self.config.ATTENTION,
"NORM_OR_SCALE": self.config.NORM_OR_SCALE,
"SPARSE_CROSS_ENT": self.config.SPARSE_CROSS_ENT,
"PENALIZE_UNK": self.config.PENALIZE_UNK
}
)
for epoch in range(
1, (self.config.NUM_EPOCHS // self.config.SAVE_EVERY_EPOCHS) + 1
):
# for epoch in range(self.config.NUM_EPOCHS):
self.queue_thread.reset(self.sess)
print("Starting new epoch %d" % epoch)
try:
while True:
batch_num += 1
# if batch_num >= 200:
# print("End of batch!!")
# raise tf.errors.OutOfRangeError(
# tf.NodeDef.ExperimentalDebugInfo, None, "I'm fake!"
# )
_, batch_loss = self.sess.run([optimizer, train_loss])
self.experiment.set_step(batch_num)
self.experiment.log_metric("loss", batch_loss)
sum_loss += batch_loss # aka train loss
if batch_num % self.num_batches_to_log == 0:
self.trace(sum_loss, batch_num, multi_batch_start_time)
sum_loss = 0
multi_batch_start_time = time.time()
except tf.errors.OutOfRangeError:
self.epochs_trained += self.config.SAVE_EVERY_EPOCHS
print("Finished %d epochs" % self.config.SAVE_EVERY_EPOCHS)
results, precision, recall, f1, _, _, _, _, _, _ = self.evaluate()
print("Epochs trained: %.5f" % self.epochs_trained)
print("Accuracy after %d epochs: %.5f" % (self.epochs_trained, results))
print(
"After %d epochs: Precision: %.5f, recall: %.5f, F1: %.5f"
% (self.epochs_trained, precision, recall, f1)
)
self.experiment.log_metric("epochs_trained",self.epochs_trained,step=self.epochs_trained)
self.experiment.log_metric("precision",precision,step=self.epochs_trained)
self.experiment.log_metric("recall",recall,step=self.epochs_trained)
self.experiment.log_metric("f1",f1,step=self.epochs_trained)
if f1 > best_f1:
best_f1 = f1
best_f1_precision = precision
best_f1_recall = recall
best_epoch = self.epochs_trained
epochs_no_improve = 0
self.save_model(self.sess, self.config.SAVE_PATH)
else:
epochs_no_improve += self.config.SAVE_EVERY_EPOCHS
if epochs_no_improve >= self.config.PATIENCE:
print(
"Not improved for %d epochs, stopping training"
% self.config.PATIENCE
)
print("Best scores - epoch %d: " % best_epoch)
print(
"Precision: %.5f, recall: %.5f, F1: %.5f"
% (best_f1_precision, best_f1_recall, best_f1)
)
return
if self.config.SAVE_PATH:
self.save_model(self.sess, self.config.SAVE_PATH + ".final")
print("Model saved in file: %s" % self.config.SAVE_PATH)
elapsed = int(time.time() - start_time)
print(
"Training time: %sh%sm%ss\n"
% ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60)
)
def trace(self, sum_loss, batch_num, multi_batch_start_time):
multi_batch_elapsed = time.time() - multi_batch_start_time
avg_loss = sum_loss / (self.num_batches_to_log * self.config.BATCH_SIZE)
self.experiment.log_metric("throughput", self.config.BATCH_SIZE
* self.num_batches_to_log
/ (multi_batch_elapsed if multi_batch_elapsed > 0 else 1), step=batch_num)
print(
"Average loss at batch %d: %f, \tthroughput: %d samples/sec"
% (
batch_num,
avg_loss,
self.config.BATCH_SIZE
* self.num_batches_to_log
/ (multi_batch_elapsed if multi_batch_elapsed > 0 else 1),
)
)
def build_training_graph(self, input_tensors):
target_index = input_tensors[reader.TARGET_INDEX_KEY]
target_lengths = input_tensors[reader.TARGET_LENGTH_KEY]
path_source_indices = input_tensors[reader.PATH_SOURCE_INDICES_KEY]
node_indices = input_tensors[reader.NODE_INDICES_KEY]
path_target_indices = input_tensors[reader.PATH_TARGET_INDICES_KEY]
valid_context_mask = input_tensors[reader.VALID_CONTEXT_MASK_KEY]
path_source_lengths = input_tensors[reader.PATH_SOURCE_LENGTHS_KEY]
path_lengths = input_tensors[reader.PATH_LENGTHS_KEY]
path_target_lengths = input_tensors[reader.PATH_TARGET_LENGTHS_KEY]
with tf.variable_scope("model"):
subtoken_vocab = tf.get_variable(
"SUBTOKENS_VOCAB",
shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=1.0, mode="FAN_OUT", uniform=True
), # TODO: Refactor tf.contrib tf.keras.initializers.VarianceScaling
)
target_words_vocab = tf.get_variable(
"TARGET_WORDS_VOCAB",
shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=1.0, mode="FAN_OUT", uniform=True
), # nn.init.kaiming_uniform_(tensor, mode='fan_out', nonlinearity='relu')
) # TODO: Refactor tf.contrib
nodes_vocab = tf.get_variable(
"NODES_VOCAB",
shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
initializer=tf.contrib.layers.variance_scaling_initializer(
factor=1.0, mode="FAN_OUT", uniform=True
),
) # TODO: Refactor tf.contrib
# (batch, max_contexts, decoder_size)
batched_contexts = self.compute_contexts(
subtoken_vocab=subtoken_vocab,
nodes_vocab=nodes_vocab,
source_input=path_source_indices,
nodes_input=node_indices,
target_input=path_target_indices,
valid_mask=valid_context_mask,
path_source_lengths=path_source_lengths,
path_lengths=path_lengths,
path_target_lengths=path_target_lengths,
)
batch_size = tf.shape(target_index)[0] # TensorFlow
outputs, final_states = self.decode_outputs(
target_words_vocab=target_words_vocab,
target_input=target_index,
batch_size=batch_size,
batched_contexts=batched_contexts,
valid_mask=valid_context_mask,
)
step = tf.Variable(0, trainable=False) # TensorFlow
logits = (
outputs.rnn_output
) # (batch, max_output_length, dim * 2 + rnn_size)
if self.config.SPARSE_CROSS_ENT:
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=target_index, logits=logits
)
else:
crossent = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=target_index, logits=logits
)
target_words_nonzero = tf.sequence_mask(
target_lengths + 1,
maxlen=self.config.MAX_TARGET_PARTS + 1,
dtype=tf.float32,
)
loss = tf.reduce_sum(crossent * target_words_nonzero) / tf.to_float(
batch_size
)
if self.config.USE_MOMENTUM:
learning_rate = tf.train.exponential_decay( # TensorFlow
0.01,
step * self.config.BATCH_SIZE,
self.num_training_examples,
0.95,
staircase=True,
)
optimizer = tf.train.MomentumOptimizer( # TensorFlow
learning_rate, 0.95, use_nesterov=True
)
train_op = optimizer.minimize(loss, global_step=step)
else:
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, clip_norm=5)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.apply_gradients(zip(clipped_gradients, params))
self.saver = tf.train.Saver(max_to_keep=10) # TensorFlow
return train_op, loss
def decode_outputs(
self,
target_words_vocab,
target_input,
batch_size,
batched_contexts,
valid_mask,
is_evaluating=False,
):
num_contexts_per_example = tf.count_nonzero(valid_mask, axis=-1) # TensorFlow
start_fill = tf.fill( # TensorFlow
[batch_size], self.target_to_index[Common.SOS]
) # (batch, )
if self.config.GRU:
decoder_cell = tf.nn.rnn_cell.MultiRNNCell( # TODO: tf.keras.layers.StackedRNNCells
[
tf.nn.rnn_cell.GRUCell(self.config.DECODER_SIZE)
for _ in range(self.config.NUM_DECODER_LAYERS)
]
)
else:
decoder_cell = tf.nn.rnn_cell.MultiRNNCell( # TODO: tf.keras.layers.StackedRNNCells
[
tf.nn.rnn_cell.LSTMCell( # TensorFlow
self.config.DECODER_SIZE
) # tf.keras.layers.GRUCell
for _ in range(self.config.NUM_DECODER_LAYERS)
]
)
contexts_sum = tf.reduce_sum( # TensorFlow
batched_contexts * tf.expand_dims(valid_mask, -1), axis=1 # TensorFlow
) # (batch_size, dim * 2 + rnn_size)
contexts_average = tf.divide( # TensorFlow
contexts_sum,
tf.to_float(tf.expand_dims(num_contexts_per_example, -1)), # TensorFlow
)
if self.config.GRU:
fake_encoder_state = tuple(
contexts_average for _ in range(self.config.NUM_DECODER_LAYERS)
)
else:
fake_encoder_state = tuple(
tf.nn.rnn_cell.LSTMStateTuple(
contexts_average, contexts_average
) # TensorFlow
for _ in range(self.config.NUM_DECODER_LAYERS)
)
projection_layer = tf.layers.Dense(
self.target_vocab_size, use_bias=False
) # TensorFlow
if is_evaluating and self.config.BEAM_WIDTH > 0:
batched_contexts = tf.contrib.seq2seq.tile_batch( # TODO: Refactor tf.contrib
batched_contexts, multiplier=self.config.BEAM_WIDTH
)
num_contexts_per_example = tf.contrib.seq2seq.tile_batch( # TODO: Refactor tf.contrib
num_contexts_per_example, multiplier=self.config.BEAM_WIDTH
)
if self.config.ATTENTION == "luong":
attention_mechanism = tf.contrib.seq2seq.LuongAttention( # TODO: Refactor tf.contrib
num_units=self.config.DECODER_SIZE,
memory=batched_contexts,
memory_sequence_length=None,
scale=self.config.NORM_OR_SCALE,
probability_fn=None,
score_mask_value=None,
dtype=None,
)
elif self.config.ATTENTION == "bahdanau":
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention( # TODO: Refactor tf.contrib
num_units=self.config.DECODER_SIZE,
memory=batched_contexts,
memory_sequence_length=None,
normalize=self.config.NORM_OR_SCALE,
probability_fn=None,
score_mask_value=None,
dtype=None,
)
# TF doesn't support beam search with alignment history
should_save_alignment_history = is_evaluating and self.config.BEAM_WIDTH == 0
decoder_cell = tf.contrib.seq2seq.AttentionWrapper( # TODO: Refactor tf.contrib
decoder_cell,
attention_mechanism,
attention_layer_size=self.config.DECODER_SIZE,
alignment_history=should_save_alignment_history,
)
if is_evaluating:
if self.config.BEAM_WIDTH > 0:
decoder_initial_state = decoder_cell.zero_state(
dtype=tf.float32, batch_size=batch_size * self.config.BEAM_WIDTH
)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tf.contrib.seq2seq.tile_batch( # TODO: Refactor tf.contrib
fake_encoder_state, multiplier=self.config.BEAM_WIDTH
)
)
decoder = tf.contrib.seq2seq.BeamSearchDecoder( # TODO: Refactor tf.contrib
cell=decoder_cell,
embedding=target_words_vocab,
start_tokens=start_fill,
end_token=self.target_to_index[Common.PAD],
initial_state=decoder_initial_state,
beam_width=self.config.BEAM_WIDTH,
output_layer=projection_layer,
length_penalty_weight=0.0,
)
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( # TODO: Refactor tf.contrib
target_words_vocab, start_fill, 0
)
initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(
cell_state=fake_encoder_state
)
decoder = tf.contrib.seq2seq.BasicDecoder( # TODO: Refactor tf.contrib
cell=decoder_cell,
helper=helper,
initial_state=initial_state,
output_layer=projection_layer,
)
else:
decoder_cell = tf.nn.rnn_cell.DropoutWrapper(
decoder_cell, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
target_words_embedding = tf.nn.embedding_lookup(
target_words_vocab,
tf.concat([tf.expand_dims(start_fill, -1), target_input], axis=-1),
) # (batch, max_target_parts, dim * 2 + rnn_size)
helper = tf.contrib.seq2seq.TrainingHelper( # TODO: Refactor tf.contrib
inputs=target_words_embedding,
sequence_length=tf.ones([batch_size], dtype=tf.int32)
* (self.config.MAX_TARGET_PARTS + 1),
)
initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(
cell_state=fake_encoder_state
)
decoder = tf.contrib.seq2seq.BasicDecoder( # TODO: Refactor tf.contrib
cell=decoder_cell,
helper=helper,
initial_state=initial_state,
output_layer=projection_layer,
)
outputs, final_states, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode( # TODO: Refactor tf.contrib
decoder, maximum_iterations=self.config.MAX_TARGET_PARTS + 1
)
return outputs, final_states
def calculate_path_abstraction(
self, path_embed, path_lengths, valid_contexts_mask, is_evaluating=False
):
return self.path_rnn_last_state(
is_evaluating, path_embed, path_lengths, valid_contexts_mask
)
def path_rnn_last_state(
self, is_evaluating, path_embed, path_lengths, valid_contexts_mask
):
# path_embed: (batch, max_contexts, max_path_length+1, dim)
# path_length: (batch, max_contexts)
# valid_contexts_mask: (batch, max_contexts)
max_contexts = tf.shape(path_embed)[1]
flat_paths = tf.reshape(
path_embed,
shape=[-1, self.config.MAX_PATH_LENGTH, self.config.EMBEDDINGS_SIZE],
) # (batch * max_contexts, max_path_length+1, dim)
flat_valid_contexts_mask = tf.reshape(
valid_contexts_mask, [-1]
) # (batch * max_contexts)
lengths = tf.multiply(
tf.reshape(path_lengths, [-1]), tf.cast(flat_valid_contexts_mask, tf.int32)
) # (batch * max_contexts)
if self.config.BIRNN:
if self.config.GRU:
rnn_cell_fw = tf.nn.rnn_cell.GRUCell(
self.config.RNN_SIZE / 2
) # tf.keras.layers.GRUCell
rnn_cell_bw = tf.nn.rnn_cell.GRUCell(
self.config.RNN_SIZE / 2
) # tf.keras.layers.GRUCell
if not is_evaluating:
rnn_cell_fw = tf.nn.rnn_cell.DropoutWrapper( # TODO: tf.keras.layers.Dropout
rnn_cell_fw, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
rnn_cell_bw = tf.nn.rnn_cell.DropoutWrapper( # TODO: tf.keras.layers.Dropout
rnn_cell_bw, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
# (state_fw, state_bw)
_, gru_state = tf.nn.bidirectional_dynamic_rnn( # TODO: keras.layers.Bidirectional(keras.layers.RNN(cell))
cell_fw=rnn_cell_fw,
cell_bw=rnn_cell_bw,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths,
)
# tf.print(gru_state)
# print(gru_state)
final_rnn_state = gru_state # tf.concat(
# [state_fw.h, state_bw.h], axis=-1
# ) # (batch * max_contexts, rnn_size)
else:
rnn_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config.RNN_SIZE / 2
) # TODO: tf.keras.layers.GRUCell
rnn_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config.RNN_SIZE / 2
) # TODO: tf.keras.layers.GRUCell
if not is_evaluating:
rnn_cell_fw = tf.nn.rnn_cell.DropoutWrapper( # TODO: tf.keras.layers.Dropout
rnn_cell_fw, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
rnn_cell_bw = tf.nn.rnn_cell.DropoutWrapper( # TODO: tf.keras.layers.Dropout
rnn_cell_bw, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
_, (
state_fw,
state_bw,
) = tf.nn.bidirectional_dynamic_rnn( # TODO: keras.layers.Bidirectional(keras.layers.RNN(cell))
cell_fw=rnn_cell_fw,
cell_bw=rnn_cell_bw,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths,
)
final_rnn_state = tf.concat(
[state_fw.h, state_bw.h], axis=-1
) # (batch * max_contexts, rnn_size)
else:
if self.config.GRU:
gru_cell = tf.nn.rnn_cell.GRUCell(self.config.RNN_SIZE)
if not is_evaluating:
gru_cell = tf.nn.rnn_cell.DropoutWrapper(
gru_cell, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
_, state = tf.nn.dynamic_rnn(
cell=gru_cell,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths,
)
# print(state)
final_rnn_state = state # (batch * max_contexts, rnn_size)
else:
rnn_cell = tf.nn.rnn_cell.LSTMCell(
self.config.RNN_SIZE
) # tf.keras.layers.GRUCell
if not is_evaluating:
rnn_cell = tf.nn.rnn_cell.DropoutWrapper(
rnn_cell, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB
)
_, state = tf.nn.dynamic_rnn(
cell=rnn_cell,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths,
)
final_rnn_state = state.h # (batch * max_contexts, rnn_size)
return tf.reshape(
final_rnn_state, shape=[-1, max_contexts, self.config.RNN_SIZE]
) # (batch, max_contexts, rnn_size)
def compute_contexts(
self,
subtoken_vocab,
nodes_vocab,
source_input,
nodes_input,
target_input,
valid_mask,
path_source_lengths,
path_lengths,
path_target_lengths,
is_evaluating=False,
):
source_word_embed = tf.nn.embedding_lookup(
params=subtoken_vocab, ids=source_input
) # (batch, max_contexts, max_name_parts, dim)
path_embed = tf.nn.embedding_lookup(
params=nodes_vocab, ids=nodes_input
) # (batch, max_contexts, max_path_length+1, dim)
target_word_embed = tf.nn.embedding_lookup(
params=subtoken_vocab, ids=target_input
) # (batch, max_contexts, max_name_parts, dim)
source_word_mask = tf.expand_dims(
tf.sequence_mask(
path_source_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32
),
-1,
) # (batch, max_contexts, max_name_parts, 1)
target_word_mask = tf.expand_dims(
tf.sequence_mask(
path_target_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32
),
-1,
) # (batch, max_contexts, max_name_parts, 1)
source_words_sum = tf.reduce_sum(
source_word_embed * source_word_mask, axis=2
) # (batch, max_contexts, dim)
path_nodes_aggregation = self.calculate_path_abstraction(
path_embed, path_lengths, valid_mask, is_evaluating
) # (batch, max_contexts, rnn_size)
target_words_sum = tf.reduce_sum(
target_word_embed * target_word_mask, axis=2
) # (batch, max_contexts, dim)
context_embed = tf.concat(
[source_words_sum, path_nodes_aggregation, target_words_sum], axis=-1
) # (batch, max_contexts, dim * 2 + rnn_size)
if not is_evaluating:
context_embed = tf.nn.dropout(
context_embed, self.config.EMBEDDINGS_DROPOUT_KEEP_PROB
)
batched_embed = tf.layers.dense( # TODO: keras.layers.dense
inputs=context_embed,
units=self.config.DECODER_SIZE,
activation=tf.nn.tanh,
trainable=not is_evaluating,
use_bias=False,
)
return batched_embed
@staticmethod
def initialize_session_variables(sess):
return sess.run(
tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer(),
tf.tables_initializer(),
)
)
def build_test_graph(self, input_tensors):
target_index = input_tensors[reader.TARGET_INDEX_KEY]
path_source_indices = input_tensors[reader.PATH_SOURCE_INDICES_KEY]
node_indices = input_tensors[reader.NODE_INDICES_KEY]
path_target_indices = input_tensors[reader.PATH_TARGET_INDICES_KEY]
valid_mask = input_tensors[reader.VALID_CONTEXT_MASK_KEY]
path_source_lengths = input_tensors[reader.PATH_SOURCE_LENGTHS_KEY]
path_lengths = input_tensors[reader.PATH_LENGTHS_KEY]
path_target_lengths = input_tensors[reader.PATH_TARGET_LENGTHS_KEY]
with tf.variable_scope("model", reuse=self.get_should_reuse_variables()):
subtoken_vocab = tf.get_variable(
"SUBTOKENS_VOCAB",
shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
trainable=False,
)
target_words_vocab = tf.get_variable(
"TARGET_WORDS_VOCAB",
shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
trainable=False,
)
nodes_vocab = tf.get_variable(
"NODES_VOCAB",
shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32,
trainable=False,
)
batched_contexts = self.compute_contexts(
subtoken_vocab=subtoken_vocab,
nodes_vocab=nodes_vocab,
source_input=path_source_indices,
nodes_input=node_indices,
target_input=path_target_indices,
valid_mask=valid_mask,
path_source_lengths=path_source_lengths,
path_lengths=path_lengths,
path_target_lengths=path_target_lengths,
is_evaluating=True,
)
outputs, final_states = self.decode_outputs(
target_words_vocab=target_words_vocab,
target_input=target_index,
batch_size=tf.shape(target_index)[0],
batched_contexts=batched_contexts,
valid_mask=valid_mask,
is_evaluating=True,
)
if self.config.BEAM_WIDTH > 0:
predicted_indices = outputs.predicted_ids
topk_values = outputs.beam_search_decoder_output.scores
attention_weights = [tf.no_op()]
else:
predicted_indices = outputs.sample_id
topk_values = tf.constant(1, shape=(1, 1), dtype=tf.float32)
attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1)
return predicted_indices, topk_values, target_index, attention_weights
def predict(self, predict_data_lines):
if self.predict_queue is None:
self.predict_queue = reader.Reader(
subtoken_to_index=self.subtoken_to_index,
node_to_index=self.node_to_index,
target_to_index=self.target_to_index,
config=self.config,
is_evaluating=True,
)
self.predict_placeholder = tf.placeholder(tf.string)
reader_output = self.predict_queue.process_from_placeholder(
self.predict_placeholder
)
reader_output = {
key: tf.expand_dims(tensor, 0) for key, tensor in reader_output.items()
}
self.predict_top_indices_op, self.predict_top_scores_op, _, self.attention_weights_op = self.build_test_graph(
reader_output
)
self.predict_source_string = reader_output[reader.PATH_SOURCE_STRINGS_KEY]
self.predict_path_string = reader_output[reader.PATH_STRINGS_KEY]
self.predict_path_target_string = reader_output[
reader.PATH_TARGET_STRINGS_KEY
]
self.predict_target_strings_op = reader_output[reader.TARGET_STRING_KEY]
self.initialize_session_variables(self.sess)
self.saver = tf.train.Saver()
self.load_model(self.sess)
results = []
for line in predict_data_lines:
predicted_indices, top_scores, true_target_strings, attention_weights, path_source_string, path_strings, path_target_string = self.sess.run(
[
self.predict_top_indices_op,
self.predict_top_scores_op,
self.predict_target_strings_op,
self.attention_weights_op,
self.predict_source_string,
self.predict_path_string,
self.predict_path_target_string,
],
feed_dict={self.predict_placeholder: line},
)
top_scores = np.squeeze(top_scores, axis=0)
path_source_string = path_source_string.reshape((-1))
path_strings = path_strings.reshape((-1))
path_target_string = path_target_string.reshape((-1))
predicted_indices =
|
np.squeeze(predicted_indices, axis=0)
|
numpy.squeeze
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import copy
class Disk():
def __init__(self,fname='outputs/res.dat',p=0,second=False):
dfname = fname + '.disk'
fname_d = fname +'.{:d}'.format(p)
dat = np.fromfile(fname_d)
self.n,self.nm,self.mi,self.mf = dat[:4].astype(int)
print(p,self.n,self.nm,self.mi,self.mf)
dat=dat[4:]
self.mvals = np.arange(self.mi,self.mf+1)
self.TL = dat[:self.nm]
dat=dat[self.nm:]
self.TR = dat[:self.nm]
dat=dat[self.nm:]
self.r = dat[:self.n]
dat=dat[self.n:]
self.lamex = dat[:self.n*self.nm].reshape(self.nm,self.n).T
dat=dat[self.n*self.nm:]
self.lamdep = dat[:self.n*self.nm].reshape(self.nm,self.n).T
dat=dat[self.n*self.nm:]
self.drfw = dat[:self.n*self.nm].reshape(self.nm,self.n).T
dat=dat[self.n*self.nm:]
self.fw = dat[:self.n*self.nm].reshape(self.nm,self.n).T
dat=dat[self.n*self.nm:]
self.pot = dat[:self.n*self.nm].reshape(self.nm,self.n).T
dat=dat[self.n*self.nm:]
self.u = np.zeros((self.n,self.nm),dtype='complex')
self.v = np.zeros((self.n,self.nm),dtype='complex')
self.s = np.zeros((self.n,self.nm),dtype='complex')
print(fname)
dat = np.fromfile(fname,dtype='complex').reshape(self.nm,3,self.n).T
self.u = dat[:,0,:]
self.v = dat[:,1,:]
self.s = dat[:,2,:]
if second:
dat = np.fromfile(fname+'.2',dtype='complex').reshape(self.nm,3,self.n).T
self.u += dat[:,0,:]
self.v += dat[:,1,:]
self.s += dat[:,2,:]
#for indx,i in enumerate(np.arange(self.nm)+self.mi):
# dat = np.loadtxt('outputs/sol{:d}.dat.{:d}'.format(i,p))
# self.u[:,indx] = dat[:,0] + 1j*dat[:,1]
# self.v[:,indx] = dat[:,2] + 1j*dat[:,3]
# self.s[:,indx] = dat[:,4] + 1j*dat[:,5]
self.dlr = np.diff(np.log(self.r))[0]
dat = np.fromfile(dfname)[1:]
self.dbar = dat[:self.n]; dat=dat[self.n:]
self.dlsdlr = dat[:self.n]; dat=dat[self.n:]
self.d2lsdlr = dat[:self.n]; dat=dat[self.n:]
self.omega = dat[:self.n]; dat=dat[self.n:]
self.dlomdlr = dat[:self.n]; dat=dat[self.n:]
self.kappa2 = dat[:self.n]; dat=dat[self.n:]
self.lamex_tot = self.lamex[:,1:].sum(axis=1)
self.lamdep_tot = self.lamdep[:,1:].sum(axis=1)
self.drfw_tot = self.drfw[:,1:].sum(axis=1)
self.fw_tot = self.fw[:,1:].sum(axis=1)
self.pot_tot = self.pot[:,1:].sum(axis=1)
self.drfd = self.drfw[:,0].copy()
self.mdot_d = self.fw[:,0].copy()
indR = self.r>=1
indL = self.r<=1
self.rR = self.r[indR]
self.rL = self.r[indL]
self.ilamexR = (self.lamex*2*np.pi*self.r[:,np.newaxis]**2 * self.dlr)[indR,:].cumsum(axis=0)
self.ilamexL = -(self.lamex*2*np.pi*self.r[:,np.newaxis]**2 * self.dlr)[indL,:][::-1].cumsum(axis=0)[::-1]
self.ilam = np.vstack((self.ilamexL,self.ilamexR))
def Dfunc(self,m):
return self.kappa2 - m*(self.omega - 1)**2
def torque_contours(self,ax=None,scaleH=True,norm=1,cmap='bwr',ylims=None,xlims=None):
if ax is None:
fig=plt.figure()
ax=fig.add_subplot(111)
rvals = np.hstack((self.rL,self.rR))
if xlims is not None:
indx = (rvals>=xlims[0])&(rvals<=xlims[1])
else:
indx = np.ones(rvals.shape).astype(bool)
if ylims is not None:
indy = (self.mvals>=ylims[0])&(self.mvals<=ylims[1])
else:
indy = np.ones(self.mvals.shape).astype(bool)
yy,mm = np.meshgrid(rvals[indx],self.mvals[indy],indexing='ij')
if scaleH:
yy = (yy-1)/.05
TT = np.vstack((self.ilamexL,self.ilamexR))
TT = TT[indx,:][:,indy]
img=ax.contour(yy,mm,TT/norm,100,cmap=cmap)
return img
def gt80_torque_density(self,r,mu=-.5,hp=.05,d=1.3):
res = np.zeros(self.dbar.shape)
ind = abs(r-1)>=d*hp
res[ind] = np.sign(r[ind]-1)*2*np.pi*.4/abs(r[ind]-1)**4 * self.dbar[ind]
return res
def total_torque(self,logx=False,xlims=None,axes=None,cumulative=True,fig=None,norm=(1./.05**2)):
if axes is None:
if cumulative:
fig,axes = plt.subplots(1,2,figsize=(10,5))
else:
fig = plt.figure()
axes = fig.add_subplot(111)
if cumulative:
axes[0].plot(self.mvals,self.TR/norm,'ok',label='Outer')
axes[0].plot(self.mvals,self.TL/norm,'sr',label='Inner')
axes[0].plot(self.mvals,self.TR/norm-self.TL/norm,'db',label='$\\Delta T$')
axes[1].plot(self.mvals,self.TR.cumsum()/self.TR.sum(),'--ok')
axes[1].plot(self.mvals,self.TL.cumsum()/self.TL.sum(),'-sr')
for ax in axes:
if logx:
ax.set_xscale('log')
ax.set_xlabel('m',fontsize=15)
axes[0].legend(loc='upper right')
else:
axes.plot(self.mvals,self.TR/norm,'ok',label='Outer')
axes.plot(self.mvals,self.TL/norm,'sr',label='Inner')
axes.plot(self.mvals,self.TR/norm-self.TL/norm,'db',label='$\\Delta T$')
if logx:
axes.set_xscale('log')
axes.set_xlabel('m',fontsize=15)
axes.legend(loc='upper right')
return fig,axes
def itorque(self,m,logx=True,xlims=None,ax=None,plot_dep=False,tot=False,**kargs):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
i = m-1
if tot:
lamex = self.lamex_tot.copy()
lamdep = self.lamdep_tot.copy()
else:
lamex = self.lamex[:,i].copy()
lamdep = self.lamdep[:,i].copy()
ind = self.r >= 1
ilamex = np.zeros(lamex.shape)
ilamex[ind] = (lamex*2*np.pi*self.r*self.r*self.dlr)[ind].cumsum()
ilamex[ind] -= ilamex[ind][0]
ilamdep= np.zeros(lamdep.shape)
ilamdep[ind] = (lamdep*2*np.pi*self.r*self.r*self.dlr)[ind].cumsum()
ilamdep[ind] -= ilamdep[ind][0]
ind = self.r <= 1
ilamex[ind] = -(lamex*2*np.pi*self.r*self.r*self.dlr)[ind][::-1].cumsum()[::-1]
ilamdep[ind] = -(lamdep*2*np.pi*self.r*self.r*self.dlr)[ind][::-1].cumsum()[::-1]
ilamex[ind] -= ilamex[ind][-1]
ilamdep[ind] -= ilamdep[ind][-1]
ax.plot(self.r,ilamex,ls='-',**kargs)
if plot_dep:
ax.plot(self.r,ilamdep,ls='--',**kargs)
ax.set_xlabel('$r/r_p$',fontsize=20)
def torque(self,m,logx=True,xlims=None,ax=None,integ=False,from_inner=False,tot=False,**kargs):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
i = m-1
if tot:
lamex = self.lamex_tot.copy()
lamdep = self.lamdep_tot.copy()
else:
lamex = self.lamex[:,i].copy()
lamdep = self.lamdep[:,i].copy()
if integ:
if tot:
fw = 2*np.pi*(self.fw_tot)
else:
fw = 2*np.pi*(self.fw[:,i])
if from_inner:
lamex = 2*np.pi*(lamex*self.r**2*self.dlr).cumsum()
lamdep = 2*np.pi*(lamdep*self.r**2*self.dlr).cumsum()
else:
#fr = self.fw[:,i][self.r>=1][0]
#fl = self.fw[:,i][self.r<=1][-1]
#rr = self.r[self.r>=1][0]
#rl = self.r[self.r<=1][-1]
#fp = fl + (fr-fl)/(np.log(rr/rl)) *(np.log(1./rl))
#fw -= 2*np.pi*fp
ind = self.r >= 1
ilamex = np.zeros(lamex.shape)
ilamex[ind] = (lamex*2*np.pi*self.r*self.r*self.dlr)[ind].cumsum()
ilamex[ind] -= ilamex[ind][0]
ilamdep= np.zeros(lamdep.shape)
ilamdep[ind] = (lamdep*2*np.pi*self.r*self.r*self.dlr)[ind].cumsum()
ilamdep[ind] -= ilamex[ind][0]
ind = self.r <= 1
ilamex[ind] = -(lamex*2*np.pi*self.r*self.r*self.dlr)[ind][::-1].cumsum()[::-1]
ilamdep[ind] = -(lamdep*2*np.pi*self.r*self.r*self.dlr)[ind][::-1].cumsum()[::-1]
ilamex[ind] -= ilamex[ind][-1]
ilamdep[ind] -= ilamex[ind][-1]
lamdep = ilamdep
lamex = ilamex
else:
if tot:
fw = self.drfw_tot.copy()
else:
fw = self.drfw[:,i].copy()
ax.plot(self.r,lamex,c='k',**kargs)
ax.plot(self.r,fw,c='r',**kargs)
ax.plot(self.r,lamdep,c='b',**kargs)
if logx:
ax.set_xscale('log')
if xlims is not None:
ax.set_xlim(xlims)
return lamex,lamdep,fw
def plot(self,m,logx=True,axes=None,fig=None,xlims=None,ylims=None):
if m == 0:
if axes is None:
fig,axes = plt.subplots(2,3,figsize=(15,5))
axes[0,0].plot(self.r,self.dbar,'-k')
axes[0,1].plot(self.r,self.dlsdlr,'-k')
axes[0,2].plot(self.r,self.d2lsdlr,'-k')
axes[1,0].plot(self.r,self.omega,'-k')
axes[1,1].plot(self.r,self.dlomdlr,'-k')
axes[1,2].plot(self.r,self.kappa2,'-k')
axes[0,0].text(.75,.8,'$\\Sigma$',fontsize=20,transform=axes[0,0].transAxes)
axes[0,1].text(.75,.8,'$\\frac{d \\ln \\Sigma}{ d\\ln r}$',fontsize=20,transform=axes[0,1].transAxes)
axes[0,2].text(.75,.8,'$\\frac{d^2 \\ln \\Sigma}{ d \\ln r^2}$',fontsize=20,transform=axes[0,2].transAxes)
axes[1,0].text(.75,.8,'$\\Omega$',fontsize=20,transform=axes[1,0].transAxes)
axes[1,1].text(.75,.8,'$\\frac{d \\ln \\Omega}{d \\ln r} $',fontsize=20,transform=axes[1,1].transAxes)
axes[1,2].text(.75,.8,'$\\kappa^2$',fontsize=20,transform=axes[1,2].transAxes)
else:
if axes is None:
fig,axes = plt.subplots(1,4,figsize=(20,5))
i = m-1
axes[0].plot(self.r,self.u.real[:,i],self.r,self.u.imag[:,i])
axes[1].plot(self.r,self.v.real[:,i],self.r,self.v.imag[:,i])
axes[2].plot(self.r,self.s.real[:,i],self.r,self.s.imag[:,i])
self.torque(m,ax=axes[3],integ=True)
for ax in axes.flatten():
if logx:
ax.set_xscale('log')
if xlims is not None:
ax.set_xlim(xlims)
if ylims is not None:
ax.set_ylim(ylims)
return fig,axes
def mod_pi(self,angle):
from copy import copy
p = copy(angle)
try:
while (p < -np.pi):
p += 2*np.pi
while (p > np.pi):
p -= 2*np.pi
except ValueError:
for i in range(len(p)):
while (p[i] < -np.pi):
p[i] += 2*np.pi
while (p[i] > np.pi):
p[i] -= 2*np.pi
return p
def interp_2d(self,r_range,q='s',norm=1,h=.05,d=.5,mu=.5):
from scipy.interpolate import RectBivariateSpline
ind = (self.r>=r_range[0])&(self.r<=r_range[1])
r = self.r[ind].copy()
#dlr = np.diff(np.log(r))[0]
if q[0] == 's':
dat = self.s[ind,:]/norm
elif q[0] == 'u':
dat = self.u[ind,:]/norm
elif q[0] == 'v':
dat = self.v[ind,:]/norm
else:
print('{} not a valid choice!'.format(q))
return
#dr_dat = np.gradient(dat,axis=0)/(dlr*r[:,np.newaxis])
nr = dat.shape[0]
phi = np.linspace(-np.pi,np.pi,nr)
nphi = len(phi)
nm =len(self.mvals)
dat_f = np.zeros((nr,nphi))
#dr_dat_f = np.zeros((nr,nphi))
dp_dat_f = np.zeros((nr,nphi))
for j,m in enumerate(self.mvals):
for i in range(nr):
dat_f[i,:] += np.real(dat[i,j]*np.exp(1j*m*phi))/nm
#dr_dat_f[i,:] += np.real(dr_dat[i,j]*np.exp(1j*m*phi))
dp_dat_f[i,:] += np.real(1j*m*dat[i,j]*np.exp(1j*m*phi))/nm
#S_dat =( 2**(5./4) * h**(3./2) / 3.) *( (h*dr_dat_f + r[:,np.newaxis]**(.5) *(r[:,np.newaxis]**(-1.5)-1)*dp_dat_f)/(abs(r[:,np.newaxis]-1)**(1.5)*r[:,np.newaxis]**( .5*(5*d+mu)-11./4))) - (2.*h/3.)*dat_f*dp_dat_f
g = 2**(.25) * h**(.5) * r**( .5*(1-mu-3*d))/np.sqrt(abs(r**(1.5)-1))
func_chi = RectBivariateSpline(r,phi, dat_f*g[:,np.newaxis])
func_s = RectBivariateSpline(r,phi, -(2*h/3)*dp_dat_f*dat_f*g[:,np.newaxis]**2)
func = RectBivariateSpline(r,phi,dat_f)
#func_r = RectBivariateSpline(r,phi,dr_dat_f)
func_p = RectBivariateSpline(r,phi,dp_dat_f)
return func,func_p,func_chi, func_s
def get_ring(self,r,q='s',shift=0,norm=1,skiprange=None,skiplist=[],skip=False,skip_high=False):
if skip:
if skiprange is not None:
skiplist = range(skiprange[0],skiprange[1]+1)
if skip_high:
mtop = 50
else:
mtop = 1e6
else:
mtop = 1e6
skiplist=[]
ind = np.argwhere(self.r>=r)[0][0]
if q[0] == 's':
dat = self.s[ind,:]
elif q[0] == 'u':
dat = self.u[ind,:]
elif q[0] == 'v':
dat = self.v[ind,:]
else:
print('{} not a valid choice!'.format(q))
return
ny = len(self.r)
phi = np.linspace(-np.pi,np.pi,ny)
nm = len(self.mvals)
nm = max(1,nm)
#res = sum([np.real( dat[j]*np.exp(1j*m*phi)) for j,m in enumerate(self.mvals) if m not in skiplist and m <= mtop])/nm
res = np.zeros(phi.shape)
for j,m in enumerate(self.mvals):
if m not in skiplist and m <= mtop:
res += np.real(dat[j]*np.exp(1j*m*phi))
res /= nm
power = np.real(dat*np.conj(dat))
normp = sum(power)
for m in skiplist:
power[m] = 0
if skip_high:
power[50:] = 0
if normp > 0:
power /= normp
if norm == 'max':
res /= abs(res).max()
else:
res /= norm
res -= shift
return phi,res,power
def plotslice_power(self,r,q='s',shift=0,norm=1,ax=None,fig=None,skiprange=None,skiplist=[],skip=False,skip_high=False,phicenter=False,**kargs):
if ax is None:
fig,ax = plt.subplots(figsize=(8,6))
phi,res,power = self.get_ring(r,q=q,shift=shift,norm=norm,skiprange=skiprange,skiplist=skiplist,skip=skip,skip_high=skip_high)
c = kargs.pop('c','k')
marker = kargs.pop('marker','s')
ax.plot(range(self.u.shape[1]),power,marker=marker,c=c,**kargs)
ax.minorticks_on()
ax.set_xlabel('$m$')
ax.set_ylabel('$|A_m|^2$')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_ylim((1e-4,ax.get_ylim()[1]))
return fig,ax
def plotslice(self,r,q='s',shift=0,norm=1,ax=None,fig=None,skiprange=None,skiplist=[],skip=False,skip_high=False,phicenter=False,**kargs):
if ax is None:
fig,ax = plt.subplots(figsize=(8,6))
phi,res,_ = self.get_ring(r,q=q,shift=shift,norm=norm,skiprange=skiprange,skiplist=skiplist,skip=skip,skip_high=skip_high)
c = kargs.pop('c','k')
ls = kargs.pop('ls','-')
if phicenter:
phi -= 2./(3*.05) * (r**(1.5) - 1.5*np.log(r)-1)
ax.plot(phi,res,ls=ls,c=c,**kargs)
ax.minorticks_on()
ax.set_xlabel('$\\phi$')
ax.set_ylabel('$\\sigma(r={:.2f})$'.format(r))
return fig,ax
def fullplot(self,q='s',skip=1,rlims=None,cartesian=True,ax=None,fig=None,full=False,norm=1,shift=0,log=False,contour=False,skiplist = [],skiprange=None,skip_high=False,divbar=False,clrbar=True,**kargs):
if skiprange is not None:
skiplist = range(skiprange[0],skiprange[1]+1)
if skip_high:
mtop = 50
else:
mtop = 1e6
r = self.r.copy()
if rlims is not None:
ind =(r>=rlims[0])&(r<=rlims[1])
else:
ind = np.ones(r.shape).astype(bool)
r = r[ind][::skip]
ny = len(r)
phi = np.linspace(-np.pi,np.pi,ny)
rr,pp = np.meshgrid(r,phi,indexing='ij')
if q[0] == 's':
dat = self.s[ind,:][::skip,:]
dat0 = self.dbar[ind][::skip]
elif q[0] == 'u':
dat = self.u[ind,:][::skip,:]
elif q[0] == 'v':
dat = self.v[ind,:][::skip,:]
dat0 = r**(-.5)
else:
print('{} not a valid choice!'.format(q))
return
res = np.zeros((ny,ny))
nm = len(self.mvals)
for i in range(len(r)):
res[i,:] = sum([np.real( dat[i,j]*np.exp(1j*m*phi)) for j,m in enumerate(self.mvals) if m not in skiplist and m <= mtop])/nm
if divbar:
res /= dat0[:,np.newaxis]
res = (res-shift)/norm
if full:
res += dat0[:,np.newaxis]
vmin = kargs.pop('vmin',res.min())
vmax = kargs.pop('vmax',res.max())
cmap = kargs.pop('cmap','viridis')
if log:
norm = colors.LogNorm(vmin=vmin,vmax=vmax)
else:
norm = colors.Normalize(vmin=vmin,vmax=vmax)
if ax is None:
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111,polar=cartesian)
if cartesian:
if contour:
img = ax.contour(pp,rr,res,100,cmap=cmap)
else:
img = ax.pcolormesh(pp,rr,res,cmap=cmap,norm=norm)
else:
if contour:
img = ax.contour(rr,pp,res,100,cmap=cmap)
else:
img = ax.pcolormesh(rr,pp,res,cmap=cmap,norm=norm)
if clrbar:
plt.colorbar(img,ax=ax)
if cartesian:
ax.set_yticklabels('')
ax.set_xticklabels('')
ax.set_xlabel('')
ax.set_ylabel('')
else:
ax.minorticks_on()
ax.set_xlabel('$r/a$')
ax.set_ylabel('$\\phi-\\phi_p$')
return fig,ax
def burgers_t(self,r,mu=.5,d=.5,h=.05):
from scipy.integrate import quad
try :
t = np.zeros(r.shape)
for i,j in enumerate(r):
t[i] =abs(quad(lambda s: abs(s**(1.5) - 1)**(1.5) * s**((5.*d+mu)/2 - 11./4), 1,j)[0])
except AttributeError:
t =abs(quad(lambda s: abs(s**(1.5) - 1)**(1.5) * s**((5.*d+mu)/2 - 11./4), 1,r)[0])
return (3./(2**(5./4) * h**(5./2))) * t
def spiral_loc(self,r,d=.5,h=.05):
if d == .5:
return self.mod_pi( (np.sign(r-1)/h)*(2./3)*(1 - r**(1.5) + 1.5*np.log(r)) )
elif d == -1:
return self.mod_pi( (np.sign(r-1)/h)*(2./3)*(1 - r**(-1.5) - 1.5*np.log(r)) )
else:
return self.mod_pi( (np.sign(r-1)/h)*( r**(d-.5)/(d-.5) - r**(d+1)/(d+1) - 3./( (2*d-1)*(d+1))))
def convert_coords(self,r,mu=.5,d=.5,h=.05,**kargs):
from scipy.integrate import quad
phi,sig,_ = self.get_ring(r,**kargs)
chi = sig*( 2**(.25) * h**(.5) * r**((1. - mu -3*d)/2) / np.sqrt(abs(r**(1.5)-1)))
if d == .5:
phi0 = (np.sign(r-1)/h)*(2./3)*(1 - r**(1.5) + 1.5*np.log(r))
elif d == -1:
phi0 = (np.sign(r-1)/h)*(2./3)*(1 - r**(-1.5) - 1.5*np.log(r))
else:
phi0 = (np.sign(r-1)/h)*( r**(d-.5)/(d-.5) - r**(d+1)/(d+1) - 3./( (2*d-1)*(d+1)))
#phi = np.array([self.mod_pi(p) for p in phi])
eta = -1.5/h * (phi - self.mod_pi(phi0))
inds = np.argsort(eta)
t = (3./(2**(5./4) * h**(5./2))) * abs(quad(lambda s: abs(s**(1.5) - 1)**(1.5) * s**((5.*d+mu)/2 - 11./4), 1,r)[0])
return t,eta[inds],chi[inds]
def convert_coords_back(self,t,eta,chi,r=None,mu=.5,d=.5,h=.05,norm=1):
from scipy.optimize import fsolve
from scipy.integrate import quad
if r is None:
r = fsolve( lambda x: t - (3./(2**(5./4) * h**(5./2))) * abs(quad(lambda s: abs(s**(1.5) - 1)**(1.5) * s**((5.*d+mu)/2 - 11./4), 1,x)[0]),.8)[0]
sig = chi/( 2**(.25) * h**(.5) * r**((1. - mu -3*d)/2) / np.sqrt(abs(r**(1.5)-1)))
sig *= norm
if d == .5:
phi0 = (np.sign(r-1)/h)*(2./3)*(1 - r**(1.5) + 1.5*
|
np.log(r)
|
numpy.log
|
"""
Reference: https://github.com/qqwweee/keras-yolo3.git
"""
from glob import glob
from PIL import Image
import numpy as np
from apphelper.image import get_box_spilt,read_voc_xml,resize_im
class YOLO_Kmeans:
def __init__(self, cluster_number,root,scales = [416,512,608,608,608,768,960,1024],splitW=8):
self.cluster_number = cluster_number
self.filenames =glob(root)
self.scales = scales
self.splitW = splitW
boxWH = self.voc2boxes()
res = self.kmeans(np.array(boxWH), k=cluster_number)
self.anchors = self.gen_anchors(sorted(res,key=lambda x:x[1]))
def gen_anchors(self, boxWH):
row =
|
np.shape(boxWH)
|
numpy.shape
|
"""
The transformers in this module act on a single numpy array.
"""
from abc import ABC, abstractmethod
import logging
import numpy as np
from PIL import Image
from typing import Tuple, Dict
LOG = logging.getLogger(__name__)
class NumpyTransform(ABC):
@staticmethod
def check_type_with_warning(input_: np.ndarray) -> None:
if not isinstance(input_, np.ndarray):
raise TypeError(f'Attempting to use a numpy transform with input of type {type(input_)}. Abort.')
@abstractmethod
def __call__(self, *args, **kwargs) -> np.ndarray:
raise NotImplementedError
class Numpy2PILTransform(NumpyTransform):
"""Transforms a Numpy nd.array into a PIL image."""
def __call__(self, array: np.ndarray) -> Image:
self.check_type_with_warning(array)
return Image.fromarray(array)
class NumpyReshapeTransform(NumpyTransform):
"""Take a flattened 1D numpy array and transform into new 2D shape and returns a PIL image (for torchvision)."""
def __init__(self, new_shape: Tuple[int, int]) -> None:
self._new_shape = new_shape
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return np.reshape(array, self._new_shape)
class NumpyNormalizeTransform(NumpyTransform):
"""Normalizes a numpy array to have zero mean and unit variance.
Note: This transformer takes NO mask into account!
"""
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return normalize_2d_array(array)
class NumpyNormalize01Transform(NumpyTransform):
"""Normalizes the data such that it lies in the range of [0, 1].
Note: This transformer takes NO mask into account!
"""
def __call__(self, array: np.ndarray) -> np.ndarray:
self.check_type_with_warning(array)
return (array -
|
np.min(array)
|
numpy.min
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
from collections import namedtuple
import numpy as np
from ..extern import six
from . import angle_utilities as util
from .. import units as u
from ..utils import isiterable
from ..utils.compat import NUMPY_LT_1_14_1
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats::
Angle('10.2345d')
Angle(['10.2345d', '-20d'])
Angle('1:2:30.43 degrees')
Angle('1 2 0 hours')
Angle(np.arange(1, 8), unit=u.deg)
Angle('1°2′3″')
Angle('1d2m3.4s')
Angle('-1h2m3s')
Angle('-1h2.5m')
Angle('-1:2.5', unit=u.deg)
Angle((10, 11, 12), unit='hourangle') # (h, m, s)
Angle((-1, 2, 3), unit=u.deg) # (d, m, s)
Angle(10.2345 * u.deg)
Angle(Angle(10.2345 * u.deg))
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : `~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=None, copy=True):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, six.string_types):
angle, angle_unit = util.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, angle_unit)
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super(Angle, cls).__new__(cls, angle, unit, dtype=dtype,
copy=copy)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return util.hms_to_hours(*angle)
elif unit == u.degree:
return util.dms_to_degrees(*angle)
else:
raise u.UnitsError("Can not parse '{0}' as unit '{1}'"
.format(angle, unit))
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super(Angle, self)._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*util.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*util.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*util.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^\mathrm{h}', r'^\mathrm{m}', r'^\mathrm{s}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
if sep == 'fromunit':
if format not in separators:
raise ValueError("Unknown format '{0}'".format(format))
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: util.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{0:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: util.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{0:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return "{0:g}{1}".format(val, unit_string)
func = plain_unit_format
else:
raise ValueError(
"'{0}' can not be represented in sexagesimal "
"notation".format(
unit.name))
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex':
s = '${0}$'.format(s)
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `Angle` object is wrapped in
place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : str, `Angle`, angular `~astropy.units.Quantity`
Specifies a single value for the wrap angle. This can be any
object that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `Angle`
Returns
-------
out : Angle or `None`
If ``inplace is False`` (default), return new `Angle` object
with angles wrapped accordingly. Otherwise wrap in place and
return `None`.
"""
wrap_angle = Angle(wrap_angle) # Convert to an Angle
wrapped = np.mod(self - wrap_angle, 360.0 * u.deg) - (360.0 * u.deg - wrap_angle)
if inplace:
self[()] = wrapped
else:
return wrapped
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies lower bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : str, `Angle`, angular `~astropy.units.Quantity`, `None`
Specifies upper bound for checking. This can be any object
that can initialize an `Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
if NUMPY_LT_1_14_1:
def formatter(x):
return x.to_string(format=format)
else:
# In numpy 1.14.1, array2print formatters get passed plain numpy scalars instead
# of subclass array scalars, so we need to recreate an array scalar.
def formatter(x):
return self._new_view(x).to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, Angle) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `Angle`. The
angle value(s). If a tuple, will be interpreted as ``(h, m, s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : :class:`~astropy.units.UnitBase`, str, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super(Latitude, cls).__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
lower = u.degree.to(angles.unit, -90.0)
upper = u.degree.to(angles.unit, 90.0)
if np.any(angles.value < lower) or
|
np.any(angles.value > upper)
|
numpy.any
|
#!/usr/bin/python
#Essential imports. HereFishy will absolutely not run without these installed
import time, math, os
from mss import mss
import numpy as np
#Necessary for some _dev functions
#from matplotlib import pyplot as plt
#import cv2
#from scipy.spatial import distance
#Import OS specific modules
if os.name == 'posix': #unix systems
#import pyautogui #should be phased out by now
from Quartz.CoreGraphics import CGEventCreateMouseEvent, CGEventPost, \
kCGEventMouseMoved, kCGEventLeftMouseDown, kCGEventLeftMouseDown, \
kCGEventLeftMouseUp, kCGMouseButtonLeft, kCGHIDEventTap, \
kCGEventRightMouseDown, kCGEventRightMouseUp, CGEventCreateKeyboardEvent, \
kCGSessionEventTap
elif os.name == 'nt': #windows systems
pass
else:
raise Exception("Operating system not supported. Please run in " + \
"Mac OS X or Windows (os.name='" + os.name + "').")
def show_img(img):
'''
Dev function to show image
'''
cv2.imshow('image',img)
cv2.waitKey()
cv2.destroyAllWindows()
#end function show_img
def is_like_rect(points,ang_range=(70,120)):
'''
Given four points as ndarray, determines if the shape is rectangle-like
by calculating the angle between each of the polygon's sides.
'''
pts = np.concatenate([points,points],axis=0)
for i in range(4):
s1 = pts[i] - pts[i+1]
s2 = pts[i+1] - pts[i+2]
lower = (np.linalg.norm(s1) * np.linalg.norm(s2))
if lower == 0:
return False
try:
angle = np.degrees(math.acos(np.dot(s1,s2) / lower))
except ValueError: #math domain error
return False
if angle < ang_range[0] or angle > ang_range[1]:
return False
return True
#end function is_like_rect
def rect_bounds(pts):
x_min = np.min(pts[:,0])
x_max = np.max(pts[:,0])
y_min = np.min(pts[:,1])
y_max = np.max(pts[:,1])
return(x_min,x_max,y_min,y_max)
#end function rect_bounds
def pts_in_box(query,subject):
'''
Determines which pts in query lie inside rectangle defined by
subject
'''
x_min = np.min(subject[:,0])
x_max = np.max(subject[:,0])
y_min = np.min(subject[:,1])
y_max = np.max(subject[:,1])
return np.array([
(x_min < pt[0] < x_max) and (y_min < pt[1] < y_max)
for pt in query
])
#end function pts_in_box
def sortpts_clockwise(A):
'''
Given four 2D points in array A, sorts points clockwise.
Taken from
https://stackoverflow.com/questions/30088697/4-1-2-numpy-array-sort-clockwise
'''
# Sort A based on Y(col-2) coordinates
sortedAc2 = A[
|
np.argsort(A[:,1])
|
numpy.argsort
|
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: <NAME>
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import shutil
sys.path.append("../../pyUSID/")
from pyUSID.io import hdf_utils, Dimension, USIDataset
from .. import data_utils
if sys.version_info.major == 3:
unicode = str
class TestSimple(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
data_utils.make_sparse_sampling_file()
data_utils.make_incomplete_measurement_file()
data_utils.make_relaxation_file()
def tearDown(self):
for file_path in [data_utils.std_beps_path,
data_utils.sparse_sampling_path,
data_utils.incomplete_measurement_path,
data_utils.relaxation_path]:
data_utils.delete_existing_file(file_path)
class TestCheckIfMain(TestSimple):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
expected_dsets = [h5_f['/Raw_Measurement/source_main'],
h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'],
h5_f['/Raw_Measurement/source_main-Fitter_001/results_main']]
for dset in expected_dsets:
self.assertTrue(hdf_utils.check_if_main(dset, verbose=False))
def test_illegal_01(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
not_main_dsets = [h5_f,
4.123,
np.arange(6),
h5_f['/Raw_Measurement/Position_Indices'],
h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices'],
h5_f['/Raw_Measurement/Spectroscopic_Values']]
for dset in not_main_dsets:
self.assertFalse(hdf_utils.check_if_main(dset))
def test_anc_not_dsets(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_f.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_missing_str_attrs(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_dset.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_invalid_str_attrs(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_dset = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
h5_dset.attrs['quantity'] = [1, 2, 3]
h5_dset.attrs['units'] = 4.1234
for anc_dset_name in ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_dset.attrs[anc_dset_name] = h5_dset.ref
self.assertFalse(hdf_utils.check_if_main(h5_dset, verbose=False))
os.remove(temp_path)
def test_anc_shapes_not_matching(self):
temp_path = 'test.h5'
data_utils.delete_existing_file(temp_path)
with h5py.File(temp_path, mode='w') as h5_f:
h5_main = h5_f.create_dataset('Main', data=np.random.rand(2, 3))
h5_pos_ind = h5_f.create_dataset('Pos_Inds', data=np.random.rand(2, 1))
h5_spec_ind = h5_f.create_dataset('Spec_Inds', data=np.random.rand(1, 5))
h5_main.attrs['quantity'] = 'quant'
h5_main.attrs['units'] = 'unit'
for anc_dset_name in ['Position_Indices', 'Position_Values']:
h5_main.attrs[anc_dset_name] = h5_pos_ind.ref
for anc_dset_name in ['Spectroscopic_Indices', 'Spectroscopic_Values']:
h5_main.attrs[anc_dset_name] = h5_spec_ind.ref
self.assertFalse(hdf_utils.check_if_main(h5_main, verbose=False))
os.remove(temp_path)
class TestGetSourceDataset(TestSimple):
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
h5_groups = [h5_f['/Raw_Measurement/source_main-Fitter_000'],
h5_f['/Raw_Measurement/source_main-Fitter_001']]
h5_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
for h5_grp in h5_groups:
self.assertEqual(h5_main, hdf_utils.get_source_dataset(h5_grp))
def test_invalid_type(self):
with self.assertRaises(TypeError):
_ = hdf_utils.get_source_dataset('/Raw_Measurement/Misc')
def test_illegal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
with self.assertRaises(ValueError):
_ = hdf_utils.get_source_dataset(h5_f['/Raw_Measurement/Misc'])
class TestGetAllMain(TestSimple):
def test_invalid_type(self):
with self.assertRaises(TypeError):
_ = hdf_utils.get_all_main("sdsdsds")
with self.assertRaises(TypeError):
_ = hdf_utils.get_all_main(np.arange(4))
def test_legal(self):
with h5py.File(data_utils.std_beps_path, mode='r') as h5_f:
expected_dsets = [h5_f['/Raw_Measurement/source_main'],
h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'],
h5_f['/Raw_Measurement/source_main-Fitter_001/results_main']]
main_dsets = hdf_utils.get_all_main(h5_f, verbose=False)
# self.assertEqual(set(main_dsets), set(expected_dsets))
self.assertEqual(len(main_dsets), len(expected_dsets))
self.assertTrue(np.all([x.name == y.name for x, y in zip(main_dsets, expected_dsets)]))
class TestWriteIndValDsets(TestSimple):
def base_bare_minimum_inputs(self, slow_to_fast, is_spectral):
num_cols = 3
num_rows = 2
sizes = [num_cols, num_rows]
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
if slow_to_fast:
dim_names = dim_names[::-1]
dim_units = dim_units[::-1]
sizes = sizes[::-1]
descriptor = []
for length, name, units in zip(sizes, dim_names, dim_units):
descriptor.append(Dimension(name, units, np.arange(length)))
inds_data = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols)))\
if not is_spectral:
inds_data = inds_data.T
if slow_to_fast:
func = np.flipud if is_spectral else np.fliplr
inds_data = func(inds_data)
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_inds, h5_vals = hdf_utils.write_ind_val_dsets(h5_f, descriptor, is_spectral=is_spectral,
slow_to_fast=slow_to_fast)
data_utils.validate_aux_dset_pair(self, h5_f, h5_inds, h5_vals, dim_names, dim_units, inds_data,
is_spectral=is_spectral, slow_to_fast=slow_to_fast)
os.remove(file_path)
def test_legal_bare_minimum_pos_f2s(self):
self.base_bare_minimum_inputs(False, False)
def test_legal_bare_minimum_pos_s2f(self):
self.base_bare_minimum_inputs(True, False)
def test_legal_bare_minimum_spec_f2s(self):
self.base_bare_minimum_inputs(False, True)
def test_legal_bare_minimum_spec_s2f(self):
self.base_bare_minimum_inputs(True, True)
def test_legal_override_steps_offsets_base_name(self):
num_cols = 2
num_rows = 3
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
col_step = 0.25
row_step = 0.05
col_initial = 1
row_initial = 0.2
descriptor = []
for length, name, units, step, initial in zip([num_cols, num_rows], dim_names, dim_units,
[col_step, row_step], [col_initial, row_initial]):
descriptor.append(Dimension(name, units, initial + step * np.arange(length)))
new_base_name = 'Overriden'
# Sending in Fast to Slow but what comes out is slow to fast
spec_inds = np.vstack((np.tile(np.arange(num_cols), num_rows),
np.repeat(np.arange(num_rows), num_cols)))
spec_vals = np.vstack((np.tile(np.arange(num_cols), num_rows) * col_step + col_initial,
np.repeat(np.arange(num_rows), num_cols) * row_step + row_initial))
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
h5_group = h5_f.create_group("Blah")
h5_inds, h5_vals = hdf_utils.write_ind_val_dsets(h5_group, descriptor, is_spectral=True,
base_name=new_base_name, slow_to_fast=False)
data_utils.validate_aux_dset_pair(self, h5_group, h5_inds, h5_vals, dim_names, dim_units, spec_inds,
vals_matrix=spec_vals, base_name=new_base_name, is_spectral=True, slow_to_fast=False)
os.remove(file_path)
def test_illegal(self):
sizes = [3, 2]
dim_names = ['X', 'Y']
dim_units = ['nm', 'um']
descriptor = []
for length, name, units in zip(sizes, dim_names, dim_units):
descriptor.append(Dimension(name, units, np.arange(length)))
file_path = 'test_write_ind_val_dsets.h5'
data_utils.delete_existing_file(file_path)
with h5py.File(file_path, mode='w') as h5_f:
pass
with self.assertRaises(ValueError):
# h5_f should be valid in terms of type but closed
_ = hdf_utils.write_ind_val_dsets(h5_f, descriptor)
os.remove(file_path)
class TestWriteReducedAncDsets(TestSimple):
def test_spec_2d_to_1d(self):
duplicate_path = 'copy_test_hdf_utils.h5'
data_utils.delete_existing_file(duplicate_path)
shutil.copy(data_utils.std_beps_path, duplicate_path)
with h5py.File(duplicate_path, mode='r+') as h5_f:
h5_spec_inds_orig = h5_f['/Raw_Measurement/Spectroscopic_Indices']
h5_spec_vals_orig = h5_f['/Raw_Measurement/Spectroscopic_Values']
new_base_name = 'Blah'
# cycle_starts = np.where(h5_spec_inds_orig[0] == 0)[0]
h5_spec_inds_new, h5_spec_vals_new = hdf_utils.write_reduced_anc_dsets(h5_spec_inds_orig.parent,
h5_spec_inds_orig,
h5_spec_vals_orig,
'Bias',
basename=new_base_name)
dim_names = ['Cycle']
dim_units = ['']
ref_data = np.expand_dims(
|
np.arange(2)
|
numpy.arange
|
#!/usr/bin/env python
# coding: utf-8
"""
Call this script with the path of p_15_img_metadata folder
Example: /media/spectors/HDD320/lidar/20201218_fresh/p_15_img_metadata
Only use it if the 3 CSV files in this folder have the same number of rows.
"""
import time, os, sys, datetime, platform, logging, shutil, re
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
from osgeo import gdal, ogr, osr
# from scipy.interpolate import interp1d
from collections import OrderedDict
import osgeo
GDAL_VERSION_LESS_THAN_3 = (True if int(osgeo.__version__[0]) < 3 else False)
this_script = sys.argv[0]
WGS84 = 4326
EPSG = 32632
proj_lon_name = f'lon_{EPSG}'
proj_lat_name = f'lat_{EPSG}'
if len(sys.argv)<=1:
print("ERROR\n\nPlease provide the absolute path of the img_metadata directory.\n\nEXITING")
sys.exit(0)
else:
try: top_dir = sys.argv[1].rstrip(os.path.sep)
except Exception as e: print(e); sys.exit(0)
def get_year_month_day(gps_week, week_day):
a = datetime.datetime(1980, 1, 6, tzinfo=datetime.timezone.utc) + \
datetime.timedelta(weeks=int(gps_week),
days=int(week_day))
return a.year, a.month, a.day
def get_utc_time(gps_week, leap_seconds, week_seconds):
a = datetime.datetime(1980, 1, 6, tzinfo=datetime.timezone.utc) + \
datetime.timedelta(weeks = int(gps_week),
seconds=float(week_seconds-leap_seconds))
return a.timestamp()
def check_same(df_column):
return np.all(df_column == df_column[0])
def check_same_colnames(df, list_of_colnames):
for colname in list_of_colnames:
if not check_same(df[colname]):
print(colname)
return False
return True
def coord_transf(lon, lat, source, target):
lon = lon[:,np.newaxis]
lat = lat[:,np.newaxis]
src = osr.SpatialReference()
src.ImportFromEPSG(source)
dst = osr.SpatialReference()
dst.ImportFromEPSG(target)
if not GDAL_VERSION_LESS_THAN_3:
src.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)
dst.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)
ct = osr.CoordinateTransformation(src, dst)
new_coords = np.array(ct.TransformPoints(np.hstack((lon, lat))))
new_lon, new_lat = new_coords[:,0], new_coords[:,1]
return new_lon, new_lat
def coord_to_str(coordinate):
degrees = coordinate.astype(int)
min_sec = (coordinate - degrees) * 60
minutes = min_sec.astype(int)
seconds = (min_sec - minutes) * 60
out = pd.DataFrame(columns = ['deg', 'min', 'sec'])
out['deg'] = pd.Series(degrees)
out['min'] = pd.Series(minutes)
out['sec'] = pd.Series(seconds)
return out['deg'].astype('string')+' deg '+out['min'].astype('string')+"' "+out['sec'].astype('string')+'"'
def lat_to_str(coordinate):
magnitude = coord_to_str(np.abs(coordinate))
out = pd.DataFrame(columns = ['sign', 'value'])
out['sign'] = pd.Series(np.sign(coordinate))
out.loc[out['sign'] >= 0, 'value'] = 'N'
out.loc[out['sign'] < 0, 'value'] = 'S'
return magnitude+' '+out['value']
def lon_to_str(coordinate):
magnitude = coord_to_str(np.abs(coordinate))
out = pd.DataFrame(columns = ['sign', 'value'])
out['sign'] = pd.Series(np.sign(coordinate))
out.loc[out['sign'] >= 0, 'value'] = 'E'
out.loc[out['sign'] < 0, 'value'] = 'W'
return magnitude+' '+out['value']
os.chdir(top_dir)
if os.getcwd() != top_dir:
print("Something went wrong.")
print(f"cwd: {os.getcwd}")
sys.exit(0)
papl_df = pd.read_csv('PAPL.txt', sep = ";", index_col = 0, na_values = "NAN")
ptnl_df = pd.read_csv('PTNL.txt', sep = ";", index_col = 0, na_values = "NAN")
logf_df = pd.read_csv('LOGFILE.txt', sep = ";", index_col = 0, na_values = "NAN")
for df in [papl_df, ptnl_df, logf_df]:
print(df.shape)
#print(df.head(5))
#print(df.tail(5))
if not (papl_df.shape[0] == ptnl_df.shape[0] == logf_df.shape[0]):
print("Number of records does not match.\n\nExiting")
sys.exit(0)
if not check_same_colnames(ptnl_df, ["event_name", "gps_week", "week_day", "leap_seconds"]):
print("check the columns of PTNL")
sys.exit(0)
if not check_same_colnames(logf_df, ["format"]):
print("check the columns of LOGFILE")
sys.exit(0)
GPS_WEEK = ptnl_df.loc[0,"gps_week"]
LEAP_SECONDS = int(ptnl_df.loc[0,"leap_seconds"])
YEAR, MONTH, DAY = get_year_month_day(GPS_WEEK, ptnl_df.loc[0,"week_day"])
clean_df = pd.DataFrame(index = papl_df.index,
columns = ["SourceFile", "FileName", "Model", "utc_time",
"longitude", "latitude", "elevation",
"ExposureTime", "ISO", "FNumber", "FocalLength", "Make",
"GPSLatitude", "GPSLongitude",
"GimbalRollDegree", "GimbalPitchDegree", "GimbalYawDegree"]) #"CameraRoll", "CameraPitch", "CameraYaw"
for row in papl_df.index:
clean_df.loc[row, "utc_time"] = get_utc_time(GPS_WEEK, LEAP_SECONDS, papl_df.loc[row, "week_seconds"])
proj_lon, proj_lat = coord_transf(papl_df['lon'].values,
papl_df['lat'].values,
WGS84, EPSG)
elev = papl_df['elevation'].values
# get position.. _UAV means in UAV coordinates
X_UAV = np.ones((clean_df.shape[0],), dtype = np.float64) * 0.0085 #measured offset between cam and puck
Y_UAV = np.zeros_like(X_UAV) #looks like 0, but most likely wrong...
Z_UAV = np.zeros_like(X_UAV)
XYZ_UAV = np.vstack((X_UAV, Y_UAV, Z_UAV)).T
XYZ_UAV = XYZ_UAV[:,np.newaxis,:]
#print(XYZ_UAV.shape)
#now rotate to real world...
yaw_correction, pit_correction, rol_correction = -np.radians(papl_df["yaw"].values), -np.radians(papl_df["pitch"].values), -np.radians(papl_df["roll"].values)
cos_gamma = np.cos(rol_correction)
sin_gamma = np.sin(rol_correction)
cos_beta = np.cos(pit_correction)
sin_beta = np.sin(pit_correction)
cos_alpha = np.cos(yaw_correction)
sin_alpha = np.sin(yaw_correction)
R_gamma = np.array([[ np.ones_like(cos_gamma),
|
np.zeros_like(cos_gamma)
|
numpy.zeros_like
|
import cv2
import numpy as np
import sys
import os
import time
def genGaussiankernel(width, sigma):
x = np.arange(-int(width/2), int(width/2)+1, 1, dtype=np.float32)
x2d, y2d = np.meshgrid(x, x)
kernel_2d = np.exp(-(x2d ** 2 + y2d ** 2) / (2 * sigma ** 2))
kernel_2d = kernel_2d / np.sum(kernel_2d)
return kernel_2d
def pyramid(im, sigma=1, prNum=6):
height_ori, width_ori, ch = im.shape
G = im.copy()
pyramids = [G]
# gaussian blur
Gaus_kernel2D = genGaussiankernel(5, sigma)
# downsample
for i in range(1, prNum):
G = cv2.filter2D(G, -1, Gaus_kernel2D)
height, width, _ = G.shape
G = cv2.resize(G, (int(width/2), int(height/2)))
pyramids.append(G)
# upsample
for i in range(1, 6):
curr_im = pyramids[i]
for j in range(i):
if j < i-1:
im_size = (curr_im.shape[1]*2, curr_im.shape[0]*2)
else:
im_size = (width_ori, height_ori)
curr_im = cv2.resize(curr_im, im_size)
curr_im = cv2.filter2D(curr_im, -1, Gaus_kernel2D)
pyramids[i] = curr_im
return pyramids
def foveat_img(im, fixs):
"""
im: input image
fixs: sequences of fixations of form [(x1, y1), (x2, y2), ...]
This function outputs the foveated image with given input image and fixations.
"""
sigma=0.248
prNum = 6
As = pyramid(im, sigma, prNum)
height, width, _ = im.shape
# compute coef
p = 1 # blur strength
k = 3 # size of foveation
alpha = 5 # also size?
x = np.arange(0, width, 1, dtype=np.float32)
y = np.arange(0, height, 1, dtype=np.float32)
x2d, y2d = np.meshgrid(x, y)
theta = np.sqrt((x2d - fixs[0][0]) ** 2 + (y2d - fixs[0][1]) ** 2) / p
for fix in fixs[1:]:
theta = np.minimum(theta, np.sqrt((x2d - fix[0]) ** 2 + (y2d - fix[1]) ** 2) / p)
R = alpha / (theta + alpha)
Ts = []
for i in range(1, prNum):
Ts.append(np.exp(-((2 ** (i-3)) * R / sigma) ** 2 * k))
Ts.append(np.zeros_like(theta))
# omega
omega = np.zeros(prNum)
for i in range(1, prNum):
omega[i-1] = np.sqrt(np.log(2)/k) / (2**(i-3)) * sigma
omega[omega>1] = 1
# layer index
layer_ind = np.zeros_like(R)
for i in range(1, prNum):
ind = np.logical_and(R >= omega[i], R <= omega[i - 1])
layer_ind[ind] = i
# B
Bs = []
for i in range(1, prNum):
Bs.append((0.5 - Ts[i]) / (Ts[i-1] - Ts[i] + 1e-5))
# M
Ms = np.zeros((prNum, R.shape[0], R.shape[1]))
for i in range(prNum):
ind = layer_ind == i
if np.sum(ind) > 0:
if i == 0:
Ms[i][ind] = 1
else:
Ms[i][ind] = 1 - Bs[i-1][ind]
ind = layer_ind - 1 == i
if np.sum(ind) > 0:
Ms[i][ind] = Bs[i][ind]
print('num of full-res pixel', np.sum(Ms[0] == 1))
# generate periphery image
im_fov =
|
np.zeros_like(As[0], dtype=np.float32)
|
numpy.zeros_like
|
"""
Copyright (c) 2020, ETH Zurich, Computer Engineering Group
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: <NAME>
"""
"""
usage from command line: python3.7 config_read_parse.py jlink_serial, device_name, ts_prescaler,
trace_address0, access_mode0, trace_pc0,
trace_address1, access_mode1, trace_pc1,
trace_address2, access_mode2, trace_pc2,
trace_address3, access_mode3, trace_pc3)
"""
import pylink
try:
import StringIO
except ImportError:
import io as StringIO
import sys
import time
import datetime
import traceback
import numpy as np
import lib.flocklab as flocklab
jlinklibpath = '/opt/jlink/libjlinkarm.so'
logging_on = False # debug prints enabled?
running = True
debuglogfile = '/home/flocklab/log/dwt_daemon.log'
def stop_swo_read():
global running
running = False
### END sigterm_handler()
def log(msg):
try:
with open(debuglogfile, "a") as f:
f.write("%s %s\n" % (str(datetime.datetime.now()), msg.strip()))
f.flush()
except:
print("Failed to append message '%s' to log file." % msg)
def measure_sleep_overhead(numSamples=100, sleepTime=0.001):
"""
Measure the overhead of time.sleep() which is platform (kernel version) dependent.
Overhead can vary up to 300us with different kernel versions.
Parameters:
numSamples (int): number of samples for measuring sleep overhead
device_name (string): the device name (eg STM32L433CC)
Returns:
overhead of time.sleep() in seconds
"""
a = np.empty([numSamples], dtype=np.float64)
for i in range(numSamples):
a[i] = time.time()
time.sleep(sleepTime)
diff = np.diff(a)
return
|
np.mean(diff)
|
numpy.mean
|
from __future__ import print_function
import time
import numpy as np
from numpy.testing import assert_array_equal as assertAE
import pytest
from rdp import rdp
nice_line = np.array([44, 95, 26, 91, 22, 90, 21, 90,
19, 89, 17, 89, 15, 87, 15, 86, 16, 85,
20, 83, 26, 81, 28, 80, 30, 79, 32, 74,
32, 72, 33, 71, 34, 70, 38, 68, 43, 66,
49, 64, 52, 63, 52, 62, 53, 59, 54, 57,
56, 56, 57, 56, 58, 56, 59, 56, 60, 56,
61, 55, 61, 55, 63, 55, 64, 55, 65, 54,
67, 54, 68, 54, 76, 53, 82, 52, 84, 52,
87, 51, 91, 51, 93, 51, 95, 51, 98, 50,
105, 50, 113, 49, 120, 48, 127, 48, 131, 47,
134, 47, 137, 47, 139, 47, 140, 47, 142, 47,
145, 46, 148, 46, 152, 46, 154, 46, 155, 46,
159, 46, 160, 46, 165, 46, 168, 46, 169, 45,
171, 45, 173, 45, 176, 45, 182, 45, 190, 44,
204, 43, 204, 43, 207, 43, 215, 40, 215, 38,
215, 37, 200, 37, 195, 41]).reshape(77, 2)
@pytest.fixture
def line(length=100):
arr = 5 * np.random.random_sample((length, 2))
return arr.cumsum(0)
@pytest.fixture
def line3d(length=150):
arr = 5 * np.random.random_sample((length, 3))
return arr.cumsum(0)
@pytest.fixture
def inf():
return float("inf")
def test_two():
"""
Point sequence with only two elements.
"""
assertAE(rdp(np.array([[0, 0], [4, 4]])),
np.array([[0, 0], [4, 4]]))
def test_hor():
"""
Horizontal line.
"""
assertAE(rdp(np.array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0]).reshape(5, 2)),
np.array([0, 0, 4, 0]).reshape(2, 2))
def test_ver():
"""
Vertical line.
"""
assertAE(rdp(np.array([0, 0, 0, 1, 0, 2, 0, 3, 0, 4]).reshape(5, 2)),
np.array([0, 0, 0, 4]).reshape(2, 2))
def test_diag():
"""
Diagonal line.
"""
assertAE(rdp(np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]).reshape(5, 2)),
np.array([0, 0, 4, 4]).reshape(2, 2))
def test_3d():
"""
3 dimensions.
"""
assertAE(rdp(np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4])
.reshape(5, 3)),
np.array([0, 0, 0, 4, 4, 4]).reshape(2, 3))
def test_eps0():
"""
Epsilon being to small to be simplified.
"""
assertAE(rdp(np.array([0, 0, 5, 1, 10, 1]).reshape(3, 2)),
np.array([0, 0, 5, 1, 10, 1]).reshape(3, 2))
def test_eps1():
"""
Epsilon large enough to be simplified.
"""
assertAE(rdp(np.array([0, 0, 5, 1, 10, 1]).reshape(3, 2), 1),
|
np.array([0, 0, 10, 1])
|
numpy.array
|
import os
import pdb # noqa:F401
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, root)
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from src.functions_to_approximate import borehole_wrapper_iter
from src.functions_to_approximate import borehole_wrapper_vectorize
from src.functions_to_approximate import borehole_readable
from src.functions_to_approximate import zhou_phi_readable
from src.functions_to_approximate import zhou_phi_vectorize
from src.functions_to_approximate import zhou_phi_numba
from src.functions_to_approximate import zhou_readable
from src.functions_to_approximate import zhou_vectorize
from src.functions_to_approximate import zhou_numba
# from src.functions_to_approximate import borehole_step_numba_iter
# from src.functions_to_approximate import borehole_step_numba_vectorize
#########################################################################
# FIXTURES
#########################################################################
# BOREHOLE FUNCTION
@pytest.fixture
def setup_borehole_on_domain():
out = {}
out["input"] = np.array(
object=[
[0.1, 1000, 100000, 1000, 100, 800, 1500, 10000],
[0.1, 1000, 100000, 1000, 100, 800, 1500, 10000],
],
dtype=float,
)
return out
@pytest.fixture
def setup_borehole_truncated_input():
out = {}
out["input"] = np.array(
object=[[0.1, 1000, 100000, 1000, 100], [0.1, 1000, 100000, 1000, 100]],
dtype=float,
)
return out
@pytest.fixture
def setup_borehole_large_set():
grid_min = np.array(
object=[0.05, 100.0, 63070.0, 990.0, 63.1, 700.0, 1120.0, 1500.0], dtype=float,
)
grid_max = np.array(
object=[0.15, 50000.0, 115600.0, 1110.0, 116.0, 820.0, 1680.0, 15000.0],
dtype=float,
)
np.random.seed(121)
input = []
for _ in range(10000):
input_tmp = np.random.uniform(0.0, 1.0, len(grid_min))
input.append(input_tmp)
out = {}
out["input"] = (
input * grid_min + (np.ones((10000, len(grid_min))) - input) * grid_max
)
return out
# ZHOU (1998) FUNCTION
@pytest.fixture
def setup_zhou_on_domain():
out = {}
out["input"] = np.array(
object=[
[1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3, 1 / 3],
[2 / 3, 2 / 3, 2 / 3, 2 / 3, 2 / 3, 2 / 3, 2 / 3, 2 / 3],
],
dtype=float,
)
return out
@pytest.fixture
def setup_zhou_phi_on_domain():
out = {}
out["input"] = np.array(
object=[0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25], dtype=float,
)
return out
@pytest.fixture
def setup_zhou_large_set():
grid_min = np.array(object=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=float,)
grid_max = np.array(object=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], dtype=float,)
np.random.seed(123)
input = []
for _ in range(10000):
input_tmp = np.random.uniform(0.0, 1.0, len(grid_min))
input.append(input_tmp)
out = {}
out["input"] = (
input * grid_min + (np.ones((10000, len(grid_min))) - input) * grid_max
)
return out
#########################################################################
# TESTS
#########################################################################
# BOREHOLE FUNCTION
def test_borehole_readable_on_domain(setup_borehole_on_domain):
expected = np.array(
object=[
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
],
dtype=float,
)
actual = borehole_readable(**setup_borehole_on_domain)
assert_array_equal(actual, expected)
def test_borehole_numba_iter_on_domain(setup_borehole_on_domain):
expected = np.array(
object=[
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
],
dtype=float,
)
actual = borehole_wrapper_iter(**setup_borehole_on_domain)
assert_array_equal(actual, expected)
def test_borehole_numba_vectorize_on_domain(setup_borehole_on_domain):
expected = np.array(
object=[
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
np.pi * 40000000 / (1001 * np.log(10000) + 3000000),
],
dtype=float,
)
actual = borehole_wrapper_vectorize(**setup_borehole_on_domain)
assert_array_equal(actual, expected)
def test_borehole_readable_truncated_input(setup_borehole_truncated_input):
expected = np.array(
object=[
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
],
dtype=float,
)
actual = borehole_readable(**setup_borehole_truncated_input)
assert_array_almost_equal(actual, expected)
def test_borehole_numba_iter_truncated_input(setup_borehole_truncated_input):
expected = np.array(
object=[
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
],
dtype=float,
)
actual = borehole_wrapper_iter(**setup_borehole_truncated_input)
assert_array_almost_equal(actual, expected)
def test_borehole_numba_vectorize_truncated_input(setup_borehole_truncated_input):
expected = np.array(
object=[
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
np.pi * 48000000 / (1001 * np.log(10000) + (2800000000 / 825)),
],
dtype=float,
)
actual = borehole_wrapper_vectorize(**setup_borehole_truncated_input)
assert_array_almost_equal(actual, expected, decimal=12)
def test_borehole_readable_equals_numba_iter(setup_borehole_large_set):
actual_readable = borehole_readable(**setup_borehole_large_set)
actual_numba = borehole_wrapper_iter(**setup_borehole_large_set)
assert_array_almost_equal(actual_readable, actual_numba, decimal=12)
def test_borehole_numba_iter_equals_numba_vectorize(setup_borehole_large_set):
actual_numba = borehole_wrapper_iter(**setup_borehole_large_set)
actual_vectorize = borehole_wrapper_vectorize(**setup_borehole_large_set)
|
assert_array_almost_equal(actual_numba, actual_vectorize, decimal=12)
|
numpy.testing.assert_array_almost_equal
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pickle as pk
import glob
import string, re
from scipy.signal import butter, filtfilt, cheby1
from sklearn.decomposition import PCA
import parmap
from tqdm import tqdm, trange
import os
class ProcessCalcium():
def __init__(self):
self.verbose = False
def sum_pixels_in_registered_mask(self, data, maskwarp):
#print (" # of trials: ", data.shape[0])
areas = np.unique(maskwarp)
#print (" # of areas: ", areas.shape)
# work in 1D vectors easier to mask
maskwarp1D = maskwarp.reshape(-1)
trial_courses = []
area_ids = []
for k in range(data.shape[0]):
#if k%10==0:
# print ("computing trial: ", k)
time_courses_local = []
# convert to 1D vector to mask faster
#data1D = np.float32(data[k].reshape(181,-1))
data1D = np.float32(data[k].reshape(data[k].shape[0],-1))
for id_ in areas:
idx = np.where(maskwarp1D==id_)[0]
# only keep areas that have at least 10 pixels
if idx.shape[0]>10:
#print ("Area: ", id_)
area_ids.append(id_)#print ("Areas: ", id_)
#print (data1D[:,idx].shape)
temp = data1D[:,idx]
if False:
# compute DFF
F0 = np.nanmean(temp,axis=0)
dFF = (data1D[:,idx]-F0)/F0
else:
# skip dFF computation;
dFF = temp
# save average of all pixesl post DFF
time_courses_local.append(np.nanmean(dFF, axis=1))
#all_times.append(time_courses_local)
trial_courses.append(time_courses_local)
area_ids = np.int32(np.unique(area_ids))
trial_courses = np.float32(trial_courses)
#print ("# trials, # areas, # times: ", trial_courses.shape)
#print ("area ids: ", area_ids.shape)
return area_ids, trial_courses
def fix_trials(self, trial_courses, trial_courses_random):
trial_courses_fixed = trial_courses.reshape(trial_courses.shape[0],-1)
trial_courses_fixed = np.nan_to_num(trial_courses_fixed, nan=9999, posinf=9999, neginf=9999)
idx = np.where(trial_courses_fixed==9999)
trial_courses_fixed[idx]=0
#print ('trial_courses_fixed: ', trial_courses_fixed.shape)
trial_courses_random_fixed = trial_courses_random.copy().reshape(trial_courses_random.shape[0],-1)
#print ('trial_courses_random_fixed: ', trial_courses_random_fixed.shape)
trial_courses_random_fixed[:,idx[1]] = 0
#
trial_courses_fixed = trial_courses_fixed.reshape(trial_courses.shape)
#
trial_courses_random_fixed = trial_courses_random_fixed.reshape(trial_courses_random.shape)
return trial_courses_fixed, trial_courses_random_fixed
#
def get_04_triggers_with_lockout(self, root_dir, recording, lockout_window):
# make sure locs
try:
locs_44threshold = np.load(root_dir + '/tif_files/' + recording + '/' + recording + '_locs44threshold.npy')
except:
print ("locs 44 thrshold missing", recording)
return np.zeros((0),'float32'), np.zeros((0),'float32')
#print ("Locs 44 threshold: ", locs_44threshold)
codes = np.load(root_dir + '/tif_files/' + recording + '/'+recording + '_code44threshold.npy')
code = b'04'
idx = np.where(codes==code)[0]
locs_selected = locs_44threshold[idx]
if locs_selected.shape[0]==0:
return np.zeros((0),'float32'), np.zeros((0),'float32')
diffs = locs_selected[1:]-locs_selected[:-1]
idx = np.where(diffs>lockout_window)[0]
locs_selected_with_lockout = locs_selected[idx+1]
if locs_selected_with_lockout.shape[0]==0:
return np.zeros((0),'float32'), np.zeros((0),'float32')
# ADD FIRST VAL
if locs_selected[0]>lockout_window:
locs_selected_with_lockout = np.concatenate(([locs_selected[0]], locs_selected_with_lockout), axis=0)
# save data
np.savetxt(root_dir + '/tif_files/' + recording + '/'+recording+ "_all_locs_selected.txt" ,
locs_selected)
np.savetxt(root_dir + '/tif_files/' + recording + '/'+recording+ "_lockout_"+str(lockout_window)+
"sec_locs_selected.txt" , locs_selected_with_lockout)
return locs_selected, locs_selected_with_lockout
def find_nearest(self, array, value):
return (np.abs(array-value)).argmin()
def load_reclength(self, filename):
""" Load realtime length of a single session. Probably should be in session, but was quicker to dump here"""
#print ("FILENAME: ", filename)
text_file = open(filename, "r")
lines = text_file.read().splitlines()
event_text = []
for line in lines:
event_text.append(re.split(r'\t+',line))
#Delete false starts from event file
for k in range(len(event_text)-1,-1,-1): #Search backwards for the 1st occurence of "date" indicating last imaging start
#NB: There can be multiple false starts WHICH DON"T LINE UP - NEED TO IGNORE SUCH SESSIONS
if event_text[k][0]=='date':
event_text = event_text[k+2:] #Remove first 2 lines
break
if len(event_text)==0:
reclength = 0
else:
if event_text[-1][2] != "None":
reclength = 0
else:
reclength = float(event_text[-1][3])
return reclength
# FUNCTION TO COMPUTE DFF
def compute_DFF_function(self,
root_dir,
dff_method, # 'globalAverage' or 'slidingWindow'
recording,
locs_selected,
n_sec_window
):
if self.verbose:
print (" computing DFF")
# ###################################################
# ###################################################
# ###################################################
# SET DEFAULT PARAMETERS
#n_sec_window = 10
low_cut = self.low_cut
high_cut = self.high_cut
img_rate = np.loadtxt(root_dir+'/img_rate.txt')
selected_dff_filter = 'butterworth'
# MAKE FILENAMES
tif_files = root_dir+'/tif_files.npy'
event_files = root_dir + '/event_files.npy'
aligned_fname = root_dir + '/tif_files/'+recording + '/'+recording + "_aligned.npy"
rec_filename = root_dir + '/tif_files/'+recording + '/'+recording +'.tif'
n_sec = float(n_sec_window)
# Load aligned/filtered data and find ON/OFF light;
#images_file = self.parent.animal.home_dir+self.parent.animal.name+'/tif_files/'+self.rec_filename+'/'+self.rec_filename+'_aligned.npy'
images_file = aligned_fname
try:
aligned_images = np.load(images_file)
except:
print ("missing aligned images - skipping session", recording)
return np.zeros((0),'float32')
# Find blue light on/off
blue_light_threshold = 400 #Intensity threshold; when this value is reached - imaging light was turned on
start_blue = 0; end_blue = len(aligned_images)
if np.average(aligned_images[0])> blue_light_threshold: #Case #1: imaging starts with light on; need to remove end chunk; though likely bad recording
for k in range(len(aligned_images)):
if np.average(aligned_images[k])< blue_light_threshold:
#self.aligned_images = self.aligned_images[k:]
end_blue = k
break
else: #Case #2: start with light off; remove starting and end chunks;
#Find first light on
for k in range(len(aligned_images)):
if np.average(aligned_images[k])> blue_light_threshold:
start_blue = k
break
#Find light off - count backwards from end of imaging data
for k in range(len(aligned_images)-1,0,-1):
if np.average(aligned_images[k])> blue_light_threshold:
end_blue= k
break
#
filtered_filename = images_file[:-4]+'_'+selected_dff_filter+'_'+str(low_cut)+'hz_'+str(high_cut)+'hz.npy'
if os.path.exists(filtered_filename):
try:
aligned_images = np.load(filtered_filename, allow_pickle=True)
except:
print ("aligned filtered images corrupt... recomputing: ", filtered_filename)
self.filter_data(root_dir, recording)
aligned_images = np.load(filtered_filename)
else:
print ("aligned filtered images missing... recomputing: ", filtered_filename)
self.filter_data(root_dir, recording)
aligned_images = np.load(filtered_filename)
aligned_images = aligned_images[start_blue:end_blue]
#
n_images=len(aligned_images)
# Determine if imaging rate correct
temp_tif_files = np.load(tif_files)
temp_event_files = np.load(event_files)
if len(temp_event_files)==1:
temp_event_files = temp_event_files[0]
if '4TBSSD' in self.main_dir:
suffix = '4TBSSD'
elif '1TB' in self.main_dir:
suffix = '1TB'
else:
print ("New computer need to reset file locations")
suffix = '4TBSATA'
#return None
index = None
for k in range(len(temp_tif_files)):
try:
temp_temp = temp_tif_files[k].decode("utf-8").replace('12TB/in_vivo/tim',suffix).replace(
'10TB/in_vivo/tim',suffix)#.replace("b'/", "'/")
except:
temp_temp = temp_tif_files[k].replace('12TB/in_vivo/tim',suffix).replace(
'10TB/in_vivo/tim',suffix)#.replace("b'/", "'/")
if rec_filename in temp_temp:
index = k
break
if index == None:
print ("DID NOT FIND MATCH between imaging and lever ---- RETURNING ")
# zero out locs selected because session can't be used
fname_out1 = os.path.split(self.fname_04)[0]
fname_out2 = os.path.split(fname_out1)[1]
np.savetxt(self.fname_04[:-4]+"_all_locs_selected.txt" , [])
np.savetxt(fname_out1+'/'+fname_out2+"_all_locs_selected.txt" , [])
#
return np.zeros((0),'float32')
try:
reclength = self.load_reclength(str(temp_event_files[index]).replace("b'",'').replace(
"'",'').replace('12TB/in_vivo/tim',suffix))
except:
reclength = self.load_reclength(str(temp_event_files[index]).replace("b'",'').replace(
"'",'').replace('10TB/in_vivo/tim',suffix))
if reclength ==0:
print ("zero length recording exiting (excitation light failure)", recording)
# zero out locs selected because session can't be used
fname_out1 = os.path.split(self.fname_04)[0]
fname_out2 = os.path.split(fname_out1)[1]
np.savetxt(self.fname_04[:-4]+"_all_locs_selected.txt" , [])
np.savetxt(fname_out1+'/'+fname_out2+"_all_locs_selected.txt" , [])
return np.zeros((0),'float32')
# compute imaging rate: divide number of images between start_led to end_led by the reclength recorded separately
session_img_rate = n_images/reclength
if abs(session_img_rate-float(img_rate))<0.01: #Compare computed session img_rate w. experimentally set img_rate
np.save(images_file.replace('_aligned.npy','')+'_img_rate', session_img_rate)
else:
np.save(images_file.replace('_aligned.npy','')+'_img_rate', session_img_rate)
print ("Imaging rates between aligned and session are incorrect, exiting: ", session_img_rate)
# zero out locs selected because session can't be used
fname_out1 = os.path.split(self.fname_04)[0]
fname_out2 = os.path.split(fname_out1)[1]
np.savetxt(self.fname_04[:-4]+"_all_locs_selected.txt" , [])
np.savetxt(fname_out1+'/'+fname_out2+"_all_locs_selected.txt" , [])
return np.zeros((0),'float32')
#
trigger_times = locs_selected
frame_times = np.linspace(0, reclength, n_images) #Divide up reclength in number of images
img_frame_triggers = []
for i in range(len(trigger_times)):
#img_frame_triggers.append(self.find_previous(frame_times, trigger_times[i]))
img_frame_triggers.append(self.find_nearest(frame_times, trigger_times[i])) #Two different functions possible here;
# EXIT IF ONLY NEED TO DO IS SAVE blue light and img_frame_triggers:
if self.export_blue_light_times==True:
print (" exporting blue light only - exiting ")
fname_out = os.path.split(self.fname_04)[0]
np.savez(fname_out+"/blue_light_frame_triggers.npz",
start_blue=start_blue,
end_blue=end_blue,
img_frame_triggers=img_frame_triggers)
return np.zeros((0),'float32')
#
mean_file = root_dir + '/tif_files/'+recording + '/'+recording + '_aligned_mean.npy'
if os.path.exists(mean_file)==False:
aligned_fname = root_dir + '/tif_files/'+recording + '/'+recording + "_aligned.npy"
images_file = aligned_fname
images_aligned = np.load(images_file)
images_aligned_mean = np.mean(images_aligned, axis=0)
np.save(images_file[:-4]+'_mean', images_aligned_mean)
global_mean = np.load(mean_file)
abstimes = np.load(root_dir + '/tif_files/'+recording + '/'+recording + '_abstimes.npy')
abspositions = np.load(root_dir + '/tif_files/'+recording + '/'+recording + '_abspositions.npy')
data_stm = []; traces = []; locs = []; codes = []
# counter=-1
window = n_sec * session_img_rate #THIS MAY NOT BE GOOD ENOUGH; SHOULD ALWAYS GO BACK AT LEAST X SECONDS EVEN IF WINDOW IS ONLY 1SEC or 0.5sec...
#Alternatively: always compute using at least 3sec window, and then just zoom in
##################################################
##################################################
##################################################
# initialize data stack - helps a bit with speed instead of lists
data_stm = np.zeros((len(img_frame_triggers),(int(window)*2+1), 128, 128))
counter = 0
for trigger in tqdm(img_frame_triggers):
# NOTE: STARTS AND ENDS OF RECORDINGS MAY NOT HAVE PROPER [Ca] DATA; MAY NEED TO SKIP MANUALLY
# load data chunk; make sure it is the right size; otherwise skip
data_chunk = aligned_images[int(trigger-window):int(trigger+window)]
#print (data_chunk.shape[0], window*2+1)
if data_chunk.shape[0] != (int(window)*2+1):
continue
if dff_method == 'globalAverage':
#data_stm.append( #Only need to divide by global mean as original data_chunk did not have mean img added in
temp = (data_chunk-global_mean)/global_mean
data_stm[counter] = temp
elif dff_method == 'slidingWindow': #Use baseline -2*window .. -window
print (" SLIDING WINDOW METHOD NOT USED ANYMORE")
return None
if trigger < (2*window) or trigger>(n_images-window):
continue #Skip if too close to start/end
baseline = np.average(aligned_images[int(trigger-2*window):int(trigger-window)], axis=0)
data_stm.append((data_chunk-baseline)/baseline)
# advance the counter
counter+=1
# NOT USED ANYMORE
# #***PROCESS TRACES - WORKING IN DIFFERENT TIME SCALE
# lever_window = int(120*n_sec) #NB: Lever window is computing in real time steps @ ~120Hz; and discontinuous;
# t = np.linspace(-lever_window*0.0082,
# lever_window*0.0082,
# lever_window*2)
# #
# lever_position_index = self.find_nearest(np.array(abstimes), locs_selected[counter])
# lever_trace = abspositions[int(lever_position_index-lever_window):int(lever_position_index+lever_window)]
#
# if len(lever_trace)!=len(t): #Extraplote missing data
# lever_trace = np.zeros(lever_window*2,dtype=np.float32)
# for k in range(-lever_window,lever_window,1):
# lever_trace[k+lever_window] = self.abspositions[k+lever_window] #Double check this...
#
# traces.append(lever_trace)
# data_stm = np.array(data_stm)
data_stm = data_stm[:counter]
return data_stm
def filter_data(self,
root_dir,
recording,
):
# ###################################################
# ###################################################
# ###################################################
# SET DEFAULT PARAMETERS
#n_sec_window = 10
low_cut = self.low_cut
high_cut = self.high_cut
#img_rate = 30.0
selected_dff_filter = 'butterworth'
# MAKE FILENAMES
generic_mask_fname = root_dir + '/genericmask.txt'
tif_files = root_dir+'tif_files.npy'
event_files = root_dir + 'event_files.npy'
aligned_fname = root_dir + '/tif_files/'+recording + '/'+recording + "_aligned.npy"
# FILTERING STEP
images_file = aligned_fname
filter_type = selected_dff_filter
lowcut = low_cut
highcut = high_cut
fs = self.img_rate
# #Check to see if data requested exists- THIS CHECK WAS ALREADY DONE PRIOR TO ENTERING FUNCTION
# if False:
# if os.path.exists(images_file[:-4]+'_'+filter_type+'_'+str(lowcut)+'hz_'+str(highcut)+'hz.npy'):
# #print ("filtered data already exists...")
# return
#Load aligned images
if os.path.exists(images_file):
images_aligned = np.load(images_file)
else:
print (" ...missing aligned images... NEED TO RUN ALIGN ALGORITHMS", images_file)
return None
# TODO IMPLMENET ALIGNMENT TOOL
#images_aligned = align_images2(self)
#Save mean of images_aligned if not already done
if os.path.exists(images_file[:-4]+'_mean.npy')==False:
images_aligned_mean = np.mean(images_aligned, axis=0)
np.save(images_file[:-4]+'_mean', images_aligned_mean)
else:
images_aligned_mean = np.load(images_file[:-4]+'_mean.npy')
#Load mask - filter only datapoints inside mask
n_pixels = len(images_aligned[0])
generic_coords = np.loadtxt(generic_mask_fname)
generic_mask_indexes=np.zeros((n_pixels,n_pixels))
for i in range(len(generic_coords)): generic_mask_indexes[int(generic_coords[i][0])][int(generic_coords[i][1])] = True
#Filter selection and parameters
if filter_type == 'butterworth':
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
order = 2
b, a = butter(order, [low, high], btype='band')
elif filter_type == 'chebyshev':
nyq = fs / 2.0
order = 4
rp = 0.1
Wn = [lowcut / nyq, highcut / nyq]
b, a = cheby1(order, rp, Wn, 'bandpass', analog=False)
#Load individual pixel time courses; SWITCH TO UNRAVEL HERE****
import time
filtered_array = np.zeros(images_aligned.shape, dtype=np.float16)
now = time.time(); start_time = now
cutoff=n_pixels
#from tqdm import tqdm
#for p1 in tqdm(range(n_pixels)):
for p1 in range(n_pixels):
now=time.time(); n_pixels_in=0
for p2 in range(n_pixels):
if generic_mask_indexes[p1,p2]==False:
filtered_array[:,p1,p2] = np.float16(filtfilt(b, a, images_aligned[:,p1,p2])); n_pixels_in+=1 #filter pixel inside mask
np.save(images_file[:-4]+'_'+filter_type+'_'+str(lowcut)+'hz_'+str(highcut)+'hz',
filtered_array+np.float16(images_aligned_mean))
return
#select code 04/02/07 triggers;
def get_triggers_bodyparts_whole_stack(self, recording):
# find filename
fname = os.path.join(self.main_dir,self.animal_id,'tif_files',recording,
recording+"_"+str(self.feature_quiescence)+"secNoMove_movements.npz")
#print ("FNAME: movement feature: ", fname)
try:
data = np.load(fname, allow_pickle=True)
except:
print ("No video available: ", recording)
return np.zeros((0)), np.zeros((0))
#
labels = data['labels']
for k in range(len(labels)):
if self.feature_name==labels[k]:
feature_id = k
break
#
feat = data['feature_quiescent']
f = []
for k in range(feat.shape[0]):
temp = np.array(feat[k])#.T
if temp.shape[0]>0:
f.append(temp)
feature_movements = np.vstack(f)
# subsample the times from all the data
# 100 trials will give bewtween 30,000 to 90,000 frames for PCA
idx = np.random.choice(np.arange(feature_movements.shape[0]),
min(100,feature_movements.shape[0]))
feature_movements = feature_movements[idx]
if feature_movements.shape[0]<=1:
feature_starts = np.zeros((0),'float32')
else:
feature_starts = feature_movements[:,1]
return feature_starts, []
def get_triggers_bodyparts(self, recording):
# find filename
fname = os.path.join(self.main_dir,self.animal_id,'tif_files',recording,
recording+"_"+str(self.feature_quiescence)+"secNoMove_movements.npz")
#print ("FNAME: movement feature: ", fname)
try:
data = np.load(fname, allow_pickle=True)
except:
print ("No video available: ", recording)
return [], []
#
labels = data['labels']
for k in range(len(labels)):
if self.feature_name==labels[k]:
feature_id = k
break
#
feature_movements = np.array(data['feature_quiescent'][feature_id])
#print (fname, feature_id, feature_movements)
if feature_movements.shape[0]<=1:
feature_starts = np.zeros((0),'float32')
else:
feature_starts = feature_movements[:,1]
#print ("feature nomovements starts/ends: ", feature_movements)
#if self.remove_shift:
# print ("feature starts: ", feature_starts)
return feature_starts, []
#
def compute_trial_courses_ROI_code04_trigger(self,
recording,
root_dir,
feature_name,
lockout_window,
n_sec_window,
recompute,
midline_filter_flag,
save_stm_flag,
transform_data_flag,
use_fixed_filter_flag,
fname_filter,
pca_denoise_flag
):
#
if self.verbose:
print ('')
print (" recording: ", recording)
# SET PARAMETERS
dff_method = 'globalAverage'
#
if self.whole_stack == True:
locs_selected, locs_selected_with_lockout = self.get_triggers_bodyparts_whole_stack(recording)
# must also load locs for code_04 in cases where no movements / videos are present but we still want to
# do dff whole stack
locs_selected_04, _ = self.get_04_triggers_with_lockout(root_dir,
recording,
lockout_window)
# add locs_selected from code_04 in order to make pca object and denoise datastack
if locs_selected.shape[0]==0:
locs_selected = locs_selected_04
elif locs_selected.shape[0]<100:
locs_selected = np.hstack((locs_selected, locs_selected_04))
elif self.features=='code_04':
locs_selected, locs_selected_with_lockout = self.get_04_triggers_with_lockout(root_dir,
recording,
lockout_window)
elif 'left_paw' in self.features:
locs_selected, locs_selected_with_lockout = self.get_triggers_bodyparts_whole_stack(recording)
else:
if self.verbose:
print (" The feature selected can't be found, exiting")
return
#
if len(locs_selected)==0:
if self.verbose:
print (" ... session has no events ", recording)
return
# GENERATE SAVE FILENAMES FOR ALL CODE_04 DATA
fname_04 = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_trial_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
fname_random = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_random_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
# good idea to save these as text to see them after:
fname_locs = fname_04[:-4]+"_locs_selected.txt"
#if self.verbose:
# print ("fname-locs: ", fname_locs)
np.savetxt(fname_locs, locs_selected)
#
self.generate_arrays_ROI_triggered(root_dir,
dff_method,
recording,
locs_selected,
n_sec_window,
fname_04,
fname_random,
recompute,
midline_filter_flag,
save_stm_flag,
transform_data_flag,
use_fixed_filter_flag,
fname_filter,
pca_denoise_flag
)
# just compute PCA and return;
if self.feature_name=="whole_stack":
return
# GENERATE SAVE FILENAMES FOR LOCKOUT DATA
fname_04 = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_lockout_"+str(lockout_window)+"sec_trial_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
fname_random = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_lockout_"+str(lockout_window)+"sec_random_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
#if os.path.exists(fname_04)==False:
self.generate_arrays_ROI_triggered(root_dir,
dff_method,
recording,
locs_selected_with_lockout,
n_sec_window,
fname_04,
fname_random,
recompute,
midline_filter_flag,
save_stm_flag,
transform_data_flag,
use_fixed_filter_flag,
fname_filter,
pca_denoise_flag)
#
def load_trial_courses_ROI_code04_trigger(self,
recording,
root_dir,
feature_name,
lockout_window, # THIS IS THE LOCKOUT WINDOW FOR NO OTHER PULLS
n_sec_window): # THIS IS THE DFF TIEM COURSE WINDOW; e.g. -10..+10sec
# GENERATE SAVE FILENAMES FOR ALL CODE_04 DATA
fname_04 = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_trial_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
fname_random = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_random_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
data_04 = np.load(fname_04)
data_04_random = np.load(fname_random)
# GENERATE SAVE FILENAMES FOR LOCKOUT DATA
fname_04 = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_lockout_"+str(lockout_window)+"sec_trial_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
fname_random = (root_dir + '/tif_files/' + recording + '/' + recording + "_"+feature_name+
"_lockout_"+str(lockout_window)+"sec_random_ROItimeCourses_"+str(n_sec_window)+"sec.npy")
data_04_lockout =
|
np.load(fname_04)
|
numpy.load
|
#!/home/knielbo/virtenvs/teki/bin/python
"""
Driver for application of uncertainty model to trend detection and classification of newspaper content
"""
import os
import argparse
import json
import numpy as np
import scipy as sp
import scipy.stats as stats
import saffine.detrending_method as dm
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({"text.usetex": False,
"font.family": "Times New Roman",
"font.serif": "cmr10",
"mathtext.fontset": "cm",
"axes.unicode_minus": False
})
def normalize(x, lower=-1, upper=1):
""" transform x to x_ab in range [a, b]
"""
x_norm = (upper - lower)*((x - np.min(x)) / (
|
np.max(x)
|
numpy.max
|
# Copyright (c) 2015-2020, Swiss Federal Institute of Technology (ETH Zurich)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Helpers to calculate channel capacities. These are based on following publications:
* <NAME>, <NAME>, and <NAME>. 2016. On the capacity of thermal covert channels in multicores. In Proceedings of the Eleventh European Conference on Computer Systems (EuroSys ’16). Association for Computing Machinery, New York, NY, USA, Article 24, 1–16. DOI:https://doi.org/10.1145/2901318.2901322
* <NAME> and <NAME>. 2018. The security risks of power measurements in multicores. In Proceedings of the 33rd Annual ACM Symposium on Applied Computing (SAC ’18). Association for Computing Machinery, New York, NY, USA, 1585–1592. DOI:https://doi.org/10.1145/3167132.3167301
* Miedl, Philipp, <NAME>, and <NAME>. "Increased reproducibility and comparability of data leak evaluations using ExOT." Proceedings of the 2020 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2020.
"""
import numpy as np
__all__ = ("get_fspectrum", "waterfilling", "capacity_from_connection_matrix")
"""
Signatures
----------
classic_waterfilling :: (p0, Sqq, Shh) -> Capacity float
constrained_waterfilling :: (p0, Sqq, Shh) -> Capacity float
capacity_from_connection_matrix :: (A, T_min) -> Capacity float
"""
def classic_waterfilling(p0, Sqq, Shh):
"""Returns the capacity bound of a given channel determined using classic waterfiling
Args:
p0: Power cap for the waterfilling algorithm as float
Sqq: Noise power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Shh: Channel power spectrum as np.darray shape(N,2) where N is the number of frequency
bins, column 0 holds the frequencies and column 1 the power spectral density.
Returns:
Channel Capaicty: in bits per seconds
"""
_lambda = 1 # Waterfilling parameter
_alpha = 1 # Lagrangian parameter
error = np.inf # Error for input power allocation
Sxx = np.full(Sqq[:, 0].shape, np.nan) # Ideal input power allocation
f_diff = np.concatenate([
|
np.diff(Shh[:, 0])
|
numpy.diff
|
from .util import normalize
from typing import List, Optional, Tuple
from tqdm.autonotebook import trange
import numpy as np
import faiss
import logging
import time
logger = logging.getLogger(__name__)
class FaissIndex:
def __init__(self, index: faiss.Index, passage_ids: List[int] = None):
self.index = index
self._passage_ids = None
if passage_ids is not None:
self._passage_ids = np.array(passage_ids, dtype=np.int64)
def search(self, query_embeddings: np.ndarray, k: int, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
start_time = time.time()
scores_arr, ids_arr = self.index.search(query_embeddings, k)
if self._passage_ids is not None:
ids_arr = self._passage_ids[ids_arr.reshape(-1)].reshape(query_embeddings.shape[0], -1)
logger.info("Total search time: %.3f", time.time() - start_time)
return scores_arr, ids_arr
def save(self, fname: str):
faiss.write_index(self.index, fname)
@classmethod
def build(
cls,
passage_ids: List[int],
passage_embeddings: np.ndarray,
index: Optional[faiss.Index] = None,
buffer_size: int = 50000,
):
if index is None:
index = faiss.IndexFlatIP(passage_embeddings.shape[1])
for start in trange(0, len(passage_ids), buffer_size):
index.add(passage_embeddings[start : start + buffer_size])
return cls(index, passage_ids)
def to_gpu(self):
if faiss.get_num_gpus() == 1:
res = faiss.StandardGpuResources()
self.index = faiss.index_cpu_to_gpu(res, 0, self.index)
else:
cloner_options = faiss.GpuMultipleClonerOptions()
cloner_options.shard = True
self.index = faiss.index_cpu_to_all_gpus(self.index, co=cloner_options)
return self.index
class FaissHNSWIndex(FaissIndex):
def search(self, query_embeddings: np.ndarray, k: int, **kwargs) -> Tuple[np.ndarray, np.ndarray]:
query_embeddings = np.hstack((query_embeddings, np.zeros((query_embeddings.shape[0], 1), dtype=np.float32)))
return super().search(query_embeddings, k)
def save(self, output_path: str):
super().save(output_path)
@classmethod
def build(
cls,
passage_ids: List[int],
passage_embeddings: np.ndarray,
index: Optional[faiss.Index] = None,
buffer_size: int = 50000,
):
sq_norms = (passage_embeddings ** 2).sum(1)
max_sq_norm = float(sq_norms.max())
aux_dims =
|
np.sqrt(max_sq_norm - sq_norms)
|
numpy.sqrt
|
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
# np.seterr(all="raise")
from scipy import linalg
from quocslib.freegradientmethods.DirectSearchMethod import DirectSearchMethod
from quocslib.stoppingcriteria.CMAESStoppingCriteria import CMAESStoppingCriteria
class CMAES(DirectSearchMethod):
callback: callable
def __init__(
self,
settings: dict = None,
stopping_criteria: dict = None,
callback: callable = None,
**kwargs
):
"""
:param dict settings:
:param dict stopping_criteria:
"""
super().__init__()
if callback is not None:
self.callback = callback
# Active the parallelization for the firsts evaluations
self.is_parallelized = settings.setdefault("parallelization", False)
self.is_adaptive = settings.setdefault("is_adaptive", False)
# TODO Create it using dynamical import module
# Stopping criteria object
self.sc_obj = CMAESStoppingCriteria(stopping_criteria)
def run_dsm(
self,
func,
x0,
args=(),
sigma_v: np.array = None,
initial_simplex=None,
max_iterations_number: int = None,
**kwargs
) -> dict:
"""
:param callable func: Function tbe called at every function evaluation
:param np.array x0: initial point
:param tuple args: Further arguments
:param np.array initial_simplex: Starting simplex for the Nelder Mead evaluation
:param int max_iterations_number: Maximum iteration number of function evaluations
:return:
"""
# Creation of the communication function for the Optimizer object
calls_number, func = self._get_wrapper(args, func)
# Set to false is_converged
self.sc_obj.is_converged = False
N = len(x0)
xmean = x0
# Sigma
sigma = 1.0
# coordinate wise standard deviation (step-size) TR 2020_04_15: use ReasonableAmplVar (see later in for loop)
if sigma_v is None or len(sigma_v) != N:
sigma_v = 0.3 * np.ones(
N,
)
# Strategy parameter setting: Selection population size, offspring number TR 2020_04_15: according to The CMA
# Evolution Strategy: A Tutorial (Hansen) this can be increased number of parents/points for recombination
l_pop = int(4 + np.floor(3 * np.log(N)))
mu = int(np.floor(l_pop / 2))
# muXone array for weighted recombination
weights = np.log(mu + 0.5) - np.log(np.linspace(1, mu, num=mu))
# Normalize recombination weights array
weights = weights / np.sum(weights)
# variance-effectiveness
mueff = 1 / sum(weights**2)
# Strategy parameter setting: Adaptation Time constant for cumulation for C
cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)
# t-const for cumulation fror sigma control
cs = (mueff + 2) / (N + mueff + 5)
# learning rate for rank-one update of C
c1 = 2 / ((N + 1.3) ** 2 + mueff)
# learning rate for rank-mu update of C
cmu = np.minimum(1 - c1, 2 * (mueff - 2 + 1 / mueff) / ((N + 2) ** 2 + mueff))
# damping for sigma
damps = 1 + 2 * np.maximum(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs
# Initialize dynamic (internal) strategy parameters and constants
pc = np.zeros((N,))
ps = np.zeros((N,))
B = np.eye(N)
# Build initial D matrix with scale vector
D = sigma_v
# Initial covariance matrix
C = B * np.diag(D**2) * B.T
invsqrtC = B * np.diag(D ** (-1)) * B.T
# Eigenvalue approximation
eigeneval = 0
# Expectation value
chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N**2))
# Total evaluation
counteval = 0
# Initial simplex size
arx = np.zeros((N, l_pop))
# Arguments for stopping criteria
iterations = 1
# terminateReason = -1 # JZ 20161125: introduced this quantity
is_terminated = False
# figure of merit array
fsim = np.zeros(l_pop, dtype=float)
ind = np.zeros(l_pop, dtype=int)
while not self.sc_obj.is_converged:
for k in range(l_pop):
# TR 2020_04_15: Hansen (2016) here also has arz: arz[:,k] = randn(N,) standard normally
# distributed vector TR 2020_04_15: here we should make sigma dependent on the reasonable amplitude
# variation, right?
arx[:, k] = xmean + sigma * B.dot(
D
* np.random.randn(
N,
)
)
# Starting point at the beginning of the SI
if counteval == 0:
arx[:, k] = xmean
# Possible parallelization here (only for open-loop optimization)
fsim[k] = func(arx[:, k], iterations)
counteval += 1
iterations = counteval
# Sort fsim so that lowest value is at 0 and then descending
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
# Checks general stopping criteria
# TR 2020_04_15: Does this part make sense here the way it is? For NM I want to consider the Simplex size
# but here we take the average of the whole population... maybe better use only best value or think about
# what makes sense here
if not is_terminated:
xold = xmean
# Recombination, new mean value
xmean = arx[:, ind[0:mu]].dot(weights)
# TR 2020_04_15: Hansen (2016) here also has zmean:
# zmean = arz(:, arindex[1:mu]).dot(weights) # == Dˆ(-1)*B’*(xmean-xold)/sigma
# New average vector
y = xmean - xold
z = invsqrtC.dot(y)
ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * z / sigma
# hsig = np.linalg.norm(ps) / np.sqrt(1 - (1 - cs) ** (2 * counteval / l_pop)) / chiN < 2 + 4. / (N + 1)
hsig = np.linalg.norm(ps) / np.sqrt(
1 - np.power((1 - cs), (2 * counteval / l_pop))
) / chiN < 1.4 + 2 / (N + 1)
# Evolution path update
pc = (1 - cc) * pc + hsig * np.sqrt(cc * (2 - cc) * mueff) * y / sigma
# Adapt covariance matrix C, i.e. rank mu update
artmp = (1 / sigma) * (arx[:, ind[0:mu]] - np.tile(xold, (mu, 1)).T)
C = (
(1 - c1 - cmu) * C
+ c1 * (np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C)
+ cmu * artmp.dot(np.diag(weights).dot(artmp.T))
)
# Adapt step size sigma
sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))
# Decomposition of C into into B*diag(D.^2)*B' (diagonalization) to achieve O(N^2)
if (counteval - eigeneval) > l_pop / (c1 + cmu) / N / 10:
# eigeneval = counteval
C =
|
np.triu(C)
|
numpy.triu
|
#!/usr/bin/env python3
import os
import ctypes
import socket
from argparse import Namespace
from time import sleep
from queue import Empty
import multiprocessing as mp
import threading
import yaml
import numpy as np
import h5py
from darc.definitions import CONFIG_FILE
# silence the tensorflow logger
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Classifier(mp.Process):
"""
Classify candidates from HDF5 files produced by Extractor
"""
def __init__(self, logger, input_queue, conn, config_file=CONFIG_FILE):
"""
:param Logger logger: Processor logger object
:param Queue input_queue: Input queue for triggers
:param Connection conn: Pipe connection to send output to
:param str config_file: Path to config file
"""
super(Classifier, self).__init__()
self.logger = logger
self.input_queue = input_queue
self.conn = conn
# load config
self.config_file = config_file
self.config = self._load_config()
# create stop event
self.stop_event = mp.Event()
self.input_empty = False
self.model_freqtime = None
self.model_dmtime = None
self.data_freqtime = None
self.data_dmtime = None
self.nfreq_data = None
self.ndm_data = None
self.ntime_data = None
self.candidates_to_visualize = []
self.tf = None
def _load_tensorflow(self):
"""
Load tensorflow into local namespace
"""
# import tensorflow here as apparently it isn't fork-safe, and results
# in a "Could not retrieve CUDA device count" error when
# this Process is forked from another Process
import tensorflow
self.tf = tensorflow
# set GPU visible to classifier
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.gpu)
# set memory growth parameter to avoid allocating all GPU memory
# only one GPU is visible, so always selecting first GPU is fine
# this is only available on tensorflow >= 2.0
if int(self.tf.__version__[0]) >= 2:
gpu = self.tf.config.experimental.list_physical_devices('GPU')[0]
self.tf.config.experimental.set_memory_growth(gpu, True)
# also silence the logger even more
self.tf.get_logger().setLevel('ERROR')
else:
# for TF 1.X, create a session with the required growth parameter
tf_config = self.tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
self.tf.Session(config=tf_config)
def run(self):
"""
Main loop
"""
self.logger.info("Starting classifier thread")
# initalize tensorflow and models
self._load_tensorflow()
self._init_models()
do_stop = False
while not self.stop_event.is_set():
# read file paths from input queue
try:
fname = self.input_queue.get(timeout=.1)
except Empty:
self.input_empty = True
if do_stop:
# run stop in a thread, so processing can continue
self.logger.debug("Running stop")
thread = threading.Thread(target=self.stop)
thread.daemon = True
thread.start()
# then set do_stop to false, so it is not run a second time
do_stop = False
continue
else:
self.input_empty = False
if fname == 'stop':
do_stop = True
else:
# do classification
self._classify(fname)
self.logger.info("Stopping classifier thread")
# send list of candidates to visualize to parent process
self.conn.send(self.candidates_to_visualize)
self.conn.close()
def stop(self):
"""
Stop this thread
"""
# wait until the input queue is empty
if not self.input_empty:
self.logger.debug("Classifier waiting to finish processing")
while not self.input_empty:
sleep(1)
# then stop
self.stop_event.set()
def _load_config(self):
"""
Load configuration
"""
with open(self.config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)['processor']['classifier']
# set config, expanding strings
kwargs = {'home': os.path.expanduser('~'), 'hostname': socket.gethostname()}
for key, value in config.items():
if isinstance(value, str):
config[key] = value.format(**kwargs)
# replace any -1 by infinity
elif value == -1:
config[key] = np.inf
# return as Namespace so the keys can be accessed as attributes
return Namespace(**config)
def _init_models(self):
"""
Load the keras models
"""
# intialise analysis tools
self.model_freqtime = self.tf.keras.models.load_model(os.path.join(self.config.model_dir,
self.config.model_freqtime))
self.model_dmtime = self.tf.keras.models.load_model(os.path.join(self.config.model_dir,
self.config.model_dmtime))
# The model's first prediction takes longer
# pre-empt this by classifying an array of zeros before looking at real data
self.model_freqtime.predict(np.zeros([1, self.config.nfreq, self.config.ntime, 1]))
self.model_dmtime.predict(
|
np.zeros([1, self.config.ndm, self.config.ntime, 1])
|
numpy.zeros
|
from __future__ import division, absolute_import, print_function
import sys
import warnings
from decimal import Decimal
import numpy as np
from numpy.testing import *
class TestEinSum(TestCase):
def test_einsum_errors(self):
# Need enough arguments
assert_raises(ValueError, np.einsum)
assert_raises(ValueError, np.einsum, "")
# subscripts must be a string
assert_raises(TypeError, np.einsum, 0, 0)
# out parameter must be an array
assert_raises(TypeError, np.einsum, "", 0, out='test')
# order parameter must be a valid order
assert_raises(TypeError, np.einsum, "", 0, order='W')
# casting parameter must be a valid casting
assert_raises(ValueError, np.einsum, "", 0, casting='blah')
# dtype parameter must be a valid dtype
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type')
# other keyword arguments are rejected
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0)
# issue 4528 revealed a segfault with this call
assert_raises(TypeError, np.einsum, *(None,)*63)
# number of operands must match count in subscripts string
assert_raises(ValueError, np.einsum, "", 0, 0)
assert_raises(ValueError, np.einsum, ",", 0, [0], [0])
assert_raises(ValueError, np.einsum, ",", [0])
# can't have more subscripts than dimensions in the operand
assert_raises(ValueError, np.einsum, "i", 0)
assert_raises(ValueError, np.einsum, "ij", [0, 0])
assert_raises(ValueError, np.einsum, "...i", 0)
assert_raises(ValueError, np.einsum, "i...j", [0, 0])
assert_raises(ValueError, np.einsum, "i...", 0)
assert_raises(ValueError, np.einsum, "ij...", [0, 0])
# invalid ellipsis
assert_raises(ValueError, np.einsum, "i..", [0, 0])
assert_raises(ValueError, np.einsum, ".i...", [0, 0])
assert_raises(ValueError, np.einsum, "j->..j", [0, 0])
assert_raises(ValueError, np.einsum, "j->.j...", [0, 0])
# invalid subscript character
assert_raises(ValueError, np.einsum, "i%...", [0, 0])
assert_raises(ValueError, np.einsum, "...j$", [0, 0])
assert_raises(ValueError, np.einsum, "i->&", [0, 0])
# output subscripts must appear in input
assert_raises(ValueError, np.einsum, "i->ij", [0, 0])
# output subscripts may only be specified once
assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]])
# dimensions much match when being collapsed
assert_raises(ValueError, np.einsum, "ii", np.arange(6).reshape(2, 3))
assert_raises(ValueError, np.einsum, "ii->i", np.arange(6).reshape(2, 3))
# broadcasting to new dimensions must be enabled explicitly
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3))
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
out=np.arange(4).reshape(2, 2))
def test_einsum_views(self):
# pass-through
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("...", a)
assert_(b.base is a)
b = np.einsum(a, [Ellipsis])
assert_(b.base is a)
b = np.einsum("ij", a)
assert_(b.base is a)
assert_equal(b, a)
b = np.einsum(a, [0, 1])
assert_(b.base is a)
assert_equal(b, a)
# transpose
a = np.arange(6)
a.shape = (2, 3)
b = np.einsum("ji", a)
assert_(b.base is a)
assert_equal(b, a.T)
b = np.einsum(a, [1, 0])
assert_(b.base is a)
assert_equal(b, a.T)
# diagonal
a = np.arange(9)
a.shape = (3, 3)
b = np.einsum("ii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
b = np.einsum(a, [0, 0], [0])
assert_(b.base is a)
assert_equal(b, [a[i, i] for i in range(3)])
# diagonal with various ways of broadcasting an additional dimension
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("...ii->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
b = np.einsum("ii...->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(2, 0, 1)])
b = np.einsum("...ii->i...", a)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("jii->ij", a)
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum(a, [1, 0, 0], [0, 1])
assert_(b.base is a)
assert_equal(b, [a[:, i, i] for i in range(3)])
b = np.einsum("ii...->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
b = np.einsum("i...i->i...", a)
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis])
assert_(b.base is a)
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
b = np.einsum("i...i->...i", a)
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0])
assert_(b.base is a)
assert_equal(b, [[x[i, i] for i in range(3)]
for x in a.transpose(1, 0, 2)])
# triple diagonal
a = np.arange(27)
a.shape = (3, 3, 3)
b = np.einsum("iii->i", a)
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
b = np.einsum(a, [0, 0, 0], [0])
assert_(b.base is a)
assert_equal(b, [a[i, i, i] for i in range(3)])
# swap axes
a = np.arange(24)
a.shape = (2, 3, 4)
b = np.einsum("ijk->jik", a)
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
b = np.einsum(a, [0, 1, 2], [1, 0, 2])
assert_(b.base is a)
assert_equal(b, a.swapaxes(0, 1))
def check_einsum_sums(self, dtype):
# Check various sums. Does many sizes to exercise unrolled loops.
# sum(a, axis=-1)
for n in range(1, 17):
a = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i->", a), np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [0], []),
np.sum(a, axis=-1).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("...i->...", a),
np.sum(a, axis=-1).astype(dtype))
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis]),
np.sum(a, axis=-1).astype(dtype))
# sum(a, axis=0)
for n in range(1, 17):
a = np.arange(2*n, dtype=dtype).reshape(2, n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("i...->...", a),
np.sum(a, axis=0).astype(dtype))
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis]),
np.sum(a, axis=0).astype(dtype))
# trace(a)
for n in range(1, 17):
a = np.arange(n*n, dtype=dtype).reshape(n, n)
assert_equal(np.einsum("ii", a), np.trace(a).astype(dtype))
assert_equal(np.einsum(a, [0, 0]), np.trace(a).astype(dtype))
# multiply(a, b)
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1, 17):
a = np.arange(3*n, dtype=dtype).reshape(3, n)
b = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
assert_equal(np.einsum("..., ...", a, b), np.multiply(a, b))
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis]),
np.multiply(a, b))
# inner(a,b)
for n in range(1, 17):
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("...i, ...i", a, b), np.inner(a, b))
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0]),
np.inner(a, b))
for n in range(1, 11):
a = np.arange(n*3*2, dtype=dtype).reshape(n, 3, 2)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("i..., i...", a, b), np.inner(a.T, b.T).T)
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis]),
np.inner(a.T, b.T).T)
# outer(a,b)
for n in range(1, 17):
a = np.arange(3, dtype=dtype)+1
b = np.arange(n, dtype=dtype)+1
assert_equal(np.einsum("i,j", a, b), np.outer(a, b))
assert_equal(np.einsum(a, [0], b, [1]), np.outer(a, b))
# Suppress the complex warnings for the 'as f8' tests
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ij, j", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1]), np.dot(a, b))
c = np.arange(4, dtype=dtype)
np.einsum("ij,j", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n, dtype=dtype)
assert_equal(np.einsum("ji,j", a.T, b.T), np.dot(b.T, a.T))
assert_equal(np.einsum(a.T, [1, 0], b.T, [1]), np.dot(b.T, a.T))
c = np.arange(4, dtype=dtype)
np.einsum("ji,j", a.T, b.T, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a.T, [1, 0], b.T, [1], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(b.T.astype('f8'),
a.T.astype('f8')).astype(dtype))
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
for n in range(1, 17):
if n < 8 or dtype != 'f2':
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
assert_equal(np.einsum("ij,jk", a, b), np.dot(a, b))
assert_equal(np.einsum(a, [0, 1], b, [1, 2]), np.dot(a, b))
for n in range(1, 17):
a = np.arange(4*n, dtype=dtype).reshape(4, n)
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
c = np.arange(24, dtype=dtype).reshape(4, 6)
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1], b, [1, 2], out=c,
dtype='f8', casting='unsafe')
assert_equal(c,
np.dot(a.astype('f8'),
b.astype('f8')).astype(dtype))
# matrix triple product (note this is not currently an efficient
# way to multiply 3 matrices)
a = np.arange(12, dtype=dtype).reshape(3, 4)
b = np.arange(20, dtype=dtype).reshape(4, 5)
c = np.arange(30, dtype=dtype).reshape(5, 6)
if dtype != 'f2':
assert_equal(np.einsum("ij,jk,kl", a, b, c),
a.dot(b).dot(c))
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3]),
a.dot(b).dot(c))
d = np.arange(18, dtype=dtype).reshape(3, 6)
np.einsum("ij,jk,kl", a, b, c, out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
d[...] = 0
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
dtype='f8', casting='unsafe')
assert_equal(d, a.astype('f8').dot(b.astype('f8')
).dot(c.astype('f8')).astype(dtype))
# tensordot(a, b)
if np.dtype(dtype) != np.dtype('f2'):
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
assert_equal(np.einsum("ijk, jil -> kl", a, b),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
np.tensordot(a, b, axes=([1, 0], [0, 1])))
c = np.arange(10, dtype=dtype).reshape(5, 2)
np.einsum("ijk,jil->kl", a, b, out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
c[...] = 0
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
dtype='f8', casting='unsafe')
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
axes=([1, 0], [0, 1])).astype(dtype))
# logical_and(logical_and(a!=0, b!=0), c!=0)
a = np.array([1, 3, -2, 0, 12, 13, 0, 1], dtype=dtype)
b = np.array([0, 3.5, 0., -2, 0, 1, 3, 12], dtype=dtype)
c = np.array([True, True, False, True, True, False, True, True])
assert_equal(np.einsum("i,i,i->i", a, b, c,
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
dtype='?', casting='unsafe'),
np.logical_and(np.logical_and(a!=0, b!=0), c!=0))
a = np.arange(9, dtype=dtype)
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
# Various stride0, contiguous, and SSE aligned variants
for n in range(1, 25):
a = np.arange(n, dtype=dtype)
if np.dtype(dtype).itemsize > 1:
assert_equal(np.einsum("...,...", a, a), np.multiply(a, a))
assert_equal(np.einsum("i,i", a, a), np.dot(a, a))
assert_equal(np.einsum("i,->i", a, 2), 2*a)
assert_equal(np.einsum(",i->i", 2, a), 2*a)
assert_equal(np.einsum("i,->", a, 2), 2*np.sum(a))
assert_equal(np.einsum(",i->", 2, a), 2*np.sum(a))
assert_equal(np.einsum("...,...", a[1:], a[:-1]),
np.multiply(a[1:], a[:-1]))
assert_equal(np.einsum("i,i", a[1:], a[:-1]),
np.dot(a[1:], a[:-1]))
assert_equal(np.einsum("i,->i", a[1:], 2), 2*a[1:])
assert_equal(np.einsum(",i->i", 2, a[1:]), 2*a[1:])
assert_equal(np.einsum("i,->", a[1:], 2), 2*np.sum(a[1:]))
assert_equal(np.einsum(",i->", 2, a[1:]), 2*np.sum(a[1:]))
# An object array, summed as the data type
a = np.arange(9, dtype=object)
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
assert_equal(b, np.sum(a))
assert_equal(b.dtype, np.dtype(dtype))
# A case which was failing (ticket #1885)
p = np.arange(2) + 1
q = np.arange(4).reshape(2, 2) + 3
r = np.arange(4).reshape(2, 2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1');
def test_einsum_sums_uint8(self):
self.check_einsum_sums('u1');
def test_einsum_sums_int16(self):
self.check_einsum_sums('i2');
def test_einsum_sums_uint16(self):
self.check_einsum_sums('u2');
def test_einsum_sums_int32(self):
self.check_einsum_sums('i4');
def test_einsum_sums_uint32(self):
self.check_einsum_sums('u4');
def test_einsum_sums_int64(self):
self.check_einsum_sums('i8');
def test_einsum_sums_uint64(self):
self.check_einsum_sums('u8');
def test_einsum_sums_float16(self):
self.check_einsum_sums('f2');
def test_einsum_sums_float32(self):
self.check_einsum_sums('f4');
def test_einsum_sums_float64(self):
self.check_einsum_sums('f8');
def test_einsum_sums_longdouble(self):
self.check_einsum_sums(np.longdouble);
def test_einsum_sums_cfloat64(self):
self.check_einsum_sums('c8');
def test_einsum_sums_cfloat128(self):
self.check_einsum_sums('c16');
def test_einsum_sums_clongdouble(self):
self.check_einsum_sums(np.clongdouble);
def test_einsum_misc(self):
# This call used to crash because of a bug in
# PyArray_AssignZero
a = np.ones((1, 2))
b = np.ones((2, 2, 1))
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
np.einsum('ijklm,ijn->', a, b))
# Issue #2027, was a problem in the contiguous 3-argument
# inner loop implementation
a = np.arange(1, 3)
b = np.arange(1, 5).reshape(2, 2)
c = np.arange(1, 9).reshape(4, 2)
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
[[[1, 3], [3, 9], [5, 15], [7, 21]],
[[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_einsum_broadcast(self):
# Issue #2455 change in handling ellipsis
# remove the 'middle broadcast' error
# only use the 'RIGHT' iteration in prepare_op_axes
# adds auto broadcast on left where it belongs
# broadcast on right has to be explicit
A = np.arange(2*3*4).reshape(2,3,4)
B = np.arange(3)
ref = np.einsum('ijk,j->ijk',A, B)
assert_equal(np.einsum('ij...,j...->ij...',A, B), ref)
assert_equal(
|
np.einsum('ij...,...j->ij...',A, B)
|
numpy.einsum
|
# -*- coding: utf-8 -*-
'''
-------------------------------------------------------------------------------------------------
This code accompanies the paper titled "Human injury-based safety decision of automated vehicles"
Author: <NAME>, <NAME>, <NAME>, <NAME>
Corresponding author: <NAME> (<EMAIL>)
-------------------------------------------------------------------------------------------------
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
def resize_rotate(image, angle, l_, w_):
''' resize and rotate the figure. '''
image = cv2.resize(image, (image.shape[1], int(image.shape[0] / (3370 / 8651) * (w_ / l_))))
# grab the dimensions of the image and then determine the center.
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix and the sine and cosine.
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image.
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation.
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image.
return cv2.warpAffine(image, M, (nW, nH), borderValue=(255, 255, 255))
def main():
''' Plot Fig6b. '''
# Load general data.
img_ini_1 = mpimg.imread('../../image/blue_.png')
img_ini_2 = mpimg.imread('../../image/green_.png')
img_ini_3 = mpimg.imread('../../image/orange_.png')
img_ini_4 = mpimg.imread('../../image/red_.png')
# Load parameters.
color = ['gray', '#3B89F0', '#41B571', '#FFB70A', '#FF5050']
veh_l_1, veh_w_1 = 4.825, 1.78
veh_l_2, veh_w_2 = 4.825, 1.78
veh_l_3, veh_w_3 = 4.825, 1.78
veh_l_4, veh_w_4 = 4.825, 1.78
''' Plot Fig6b_1. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.5 / 21 * 45, 3.5))
plt.axis('equal')
plt.xlim((-2.3, 42.7))
plt.ylim((-5, 16))
plt.xticks([], [], family='Times New Roman', fontsize=16)
plt.yticks([], [], family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.05, bottom=0.05, top=0.95, right=0.95)
# Load data.
data = np.load('data/Fig6b_1.npz')
# Plot road information.
ax.plot([26, 26], [6.9, 14.3], color='gray', linewidth=1.8, alpha=0.35)
ax.plot([10.6, 10.6], [6.9, -0.5], color='gray', linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [7, 7], color='orange', linewidth=1, alpha=0.5)
ax.plot([26, 60], [6.8, 6.8], color='orange', linewidth=1, alpha=0.5)
ax.plot([26, 60], [10.6, 10.6], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [3.2, 3.2], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [14.3, 14.3], color='gray', linewidth=2, alpha=0.7)
ax.plot([26, 60], [-0.5, -0.5], color='gray', linewidth=2, alpha=0.7)
ax.plot([10.6, -10], [7, 7], color='orange', linewidth=1, alpha=0.5)
ax.plot([10.6, -10], [6.8, 6.8], color='orange', linewidth=1, alpha=0.5)
ax.plot([10.6, -10], [10.6, 10.6], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([10.6, -10], [3.2, 3.2], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([10.6, -10], [14.3, 14.3], color='gray', linewidth=2, alpha=0.7)
ax.plot([10.6, -10], [-0.5, -0.5], color='gray', linewidth=2, alpha=0.7)
ax.plot([22.5, 22.5], [-4, -10], color='gray', linewidth=2, alpha=0.7)
ax.plot([18.3, 18.3], [-4, -10], color='orange', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.5)
ax.plot([14.1, 14.1], [-4, -10], color='#a6a6a6', linewidth=2, )
ax.plot(26 + 3.5 * np.cos(np.deg2rad(180 - np.arange(101) * 0.9)),
-4 + 3.5 * np.sin(np.deg2rad(180 - np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
ax.plot(10.6 + 3.5 * np.cos(np.deg2rad(90 - np.arange(101) * 0.9)),
-4 + 3.5 * np.sin(np.deg2rad(90 - np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
ax.plot([22.5, 22.5], [17.8, 20], color='gray', linewidth=2, alpha=0.7)
ax.plot([18.3, 18.3], [17.8, 20], color='orange', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.5)
ax.plot([14.1, 36.6], [17.8, 20], color='#a6a6a6', linewidth=2, )
ax.plot(26 + 3.5 * np.cos(np.deg2rad(180 + np.arange(101) * 0.9)),
17.8 + 3.5 * np.sin(np.deg2rad(180 + np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
ax.plot(10.6 + 3.5 * np.cos(np.deg2rad(-90 + np.arange(101) * 0.9)),
17.8 + 3.5 * np.sin(np.deg2rad(-90 + np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
# Plot vehicle information.
img = resize_rotate(img_ini_4, np.rad2deg(data['V1_t']), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0062 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['V1_x'], data['V1_y']), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_3, np.rad2deg(data['V2_t']), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0062 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['V2_x'], data['V2_y']), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_2, np.rad2deg(data['V3_t']), veh_l_3, veh_w_3)
im = OffsetImage(img, zoom=0.0062 * veh_l_3, alpha=1)
ab = AnnotationBbox(im, xy=(data['V3_x'], data['V3_y']), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['V4_t']), veh_l_4, veh_w_4)
im = OffsetImage(img, zoom=0.0062 * veh_l_4, alpha=1)
ab = AnnotationBbox(im, xy=(data['V4_x'], data['V4_y']), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot vehicle velocity.
plt.arrow(data['V1_x'], data['V1_y'], 6, 0, width=0.4, head_width=1, head_length=1.5, linewidth=0,
facecolor=color[4], alpha=1.0, zorder=30)
plt.arrow(data['V2_x'], data['V2_y'], 2.4, 0, width=0.4, head_width=1, head_length=1.5, linewidth=0,
facecolor=color[3], alpha=1.0, zorder=30)
plt.arrow(data['V3_x'], data['V3_y'], 0.98995, 0.98995, width=0.4, head_width=1,
head_length=1.5, linewidth=0, facecolor=color[2], alpha=1.0, zorder=30)
plt.arrow(data['V4_x'], data['V4_y'], -3.2, 0, width=0.4, head_width=1, head_length=1.5, linewidth=0,
facecolor=color[1], alpha=1.0, zorder=30)
# Show.
plt.show()
# plt.savefig('Fig6b_1.png', dpi=600)
plt.close()
''' Plot Fig6b_2. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(1.1 / 5.6 * 14, 1.1))
plt.axis('equal')
plt.xlim((14, 14 + 14))
plt.ylim((1.6, 1.6 + 5.6))
plt.xticks([], [], family='Times New Roman', fontsize=16)
plt.yticks([], [], family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.05, bottom=0.05, top=0.95, right=0.95)
# Load data.
data = np.load('data/Fig6b_2.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_4, np.rad2deg(data['V1_t'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0074 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['V1_x'][-1], data['V1_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_3, np.rad2deg(data['V2_t'][-1]), veh_l_2, veh_w_2)
im = OffsetImage(img, zoom=0.0074 * veh_l_2, alpha=1)
ab = AnnotationBbox(im, xy=(data['V2_x'][-1], data['V2_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['V1_x'], data['V1_y'], color=color[4], linestyle='--', linewidth=1.1, alpha=0.8)
plt.plot(data['V2_x'], data['V2_y'], color=color[3], linestyle='--', linewidth=1.1, alpha=0.8)
# Show.
plt.show()
# plt.savefig('Fig6b_2.png', dpi=600)
plt.close()
''' Plot Fig6b_3. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(1.1 / 5.6 * 14, 1.1))
plt.axis('equal')
plt.xlim((16 + 0.5, 30 + 0.5))
plt.ylim((1.4, 1.4 + 1.1))
plt.xticks([], [], family='Times New Roman', fontsize=16)
plt.yticks([], [], family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.05, bottom=0.05, top=0.95, right=0.95)
# Load data.
data = np.load('data/Fig6b_3.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_4, np.rad2deg(data['V1_t'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0074 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['V1_x'][-1], data['V1_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_2, np.rad2deg(data['V3_t'][-1]), veh_l_3, veh_w_3)
im = OffsetImage(img, zoom=0.0074 * veh_l_3, alpha=1)
ab = AnnotationBbox(im, xy=(data['V3_x'][-1], data['V3_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['V1_x'], data['V1_y'], color=color[4], linestyle='--', linewidth=1.1, alpha=0.8)
plt.plot(data['V3_x'], data['V3_y'], color=color[2], linestyle='--', linewidth=1.1, alpha=0.8)
# Show.
plt.show()
# plt.savefig('Fig6b_3.png', dpi=600)
plt.close()
''' Plot Fig6b_4. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(1.1 / 5.6 * 14, 1.1))
plt.axis('equal')
plt.xlim((16 - 0.9, 30 - 0.9))
plt.ylim((5 - 0.5, 11. - 0.4 - 0.5))
plt.xticks([], [], family='Times New Roman', fontsize=16)
plt.yticks([], [], family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.05, bottom=0.05, top=0.95, right=0.95)
# Load data.
data = np.load('data/Fig6b_4.npz')
# Plot vehicle information.
img = resize_rotate(img_ini_4, np.rad2deg(data['V1_t'][-1]), veh_l_1, veh_w_1)
im = OffsetImage(img, zoom=0.0074 * veh_l_1, alpha=1)
ab = AnnotationBbox(im, xy=(data['V1_x'][-1], data['V1_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
img = resize_rotate(img_ini_1, np.rad2deg(data['V4_t'][-1]), veh_l_4, veh_w_4)
im = OffsetImage(img, zoom=0.0074 * veh_l_4, alpha=1)
ab = AnnotationBbox(im, xy=(data['V4_x'][-1], data['V4_y'][-1]), xycoords='data', pad=0, frameon=False)
ax.add_artist(ab)
# Plot trajectory information.
plt.plot(data['V1_x'], data['V1_y'], color=color[4], linestyle='--', linewidth=1.1, alpha=0.8)
plt.plot(data['V4_x'], data['V4_y'], color=color[1], linestyle='--', linewidth=1.1, alpha=0.8)
# Show.
plt.show()
# plt.savefig('Fig6b_4.png', dpi=600)
plt.close()
''' Plot Fig6b_5. '''
# Basic setup.
fig, ax = plt.subplots(figsize=(3.5 / 21 * 45, 3.5))
plt.axis('equal')
plt.xlim((-2.3, 42.7))
plt.ylim((-5, 16))
plt.xticks([], [], family='Times New Roman', fontsize=16)
plt.yticks([], [], family='Times New Roman', fontsize=16)
plt.subplots_adjust(left=0.05, bottom=0.05, top=0.95, right=0.95)
# Load data.
data = np.load('data/Fig6b_5.npz')
# Plot road information.
ax.plot([26, 26], [6.9, 14.3], color='gray', linewidth=1.8, alpha=0.35)
ax.plot([10.6, 10.6], [6.9, -0.5], color='gray', linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [7, 7], color='orange', linewidth=1, alpha=0.5)
ax.plot([26, 60], [6.8, 6.8], color='orange', linewidth=1, alpha=0.5)
ax.plot([26, 60], [10.6, 10.6], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [3.2, 3.2], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([26, 60], [14.3, 14.3], color='gray', linewidth=2, alpha=0.7)
ax.plot([26, 60], [-0.5, -0.5], color='gray', linewidth=2, alpha=0.7)
ax.plot([10.6, -10], [7, 7], color='orange', linewidth=1, alpha=0.5)
ax.plot([10.6, -10], [6.8, 6.8], color='orange', linewidth=1, alpha=0.5)
ax.plot([10.6, -10], [10.6, 10.6], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([10.6, -10], [3.2, 3.2], color='gray', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.35)
ax.plot([10.6, -10], [14.3, 14.3], color='gray', linewidth=2, alpha=0.7)
ax.plot([10.6, -10], [-0.5, -0.5], color='gray', linewidth=2, alpha=0.7)
ax.plot([22.5, 22.5], [-4, -10], color='gray', linewidth=2, alpha=0.7)
ax.plot([18.3, 18.3], [-4, -10], color='orange', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.5)
ax.plot([14.1, 14.1], [-4, -10], color='#a6a6a6', linewidth=2, )
ax.plot(26 + 3.5 * np.cos(np.deg2rad(180 - np.arange(101) * 0.9)),
-4 + 3.5 * np.sin(np.deg2rad(180 - np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
ax.plot(10.6 + 3.5 * np.cos(np.deg2rad(90 - np.arange(101) * 0.9)),
-4 + 3.5 * np.sin(np.deg2rad(90 - np.arange(101) * 0.9)), color='#a6a6a6', linewidth=2, )
ax.plot([22.5, 22.5], [17.8, 20], color='gray', linewidth=2, alpha=0.7)
ax.plot([18.3, 18.3], [17.8, 20], color='orange', linestyle=(0, (10, 8)), linewidth=1.8, alpha=0.5)
ax.plot([14.1, 36.6], [17.8, 20], color='#a6a6a6', linewidth=2, )
ax.plot(26 + 3.5 * np.cos(np.deg2rad(180 + np.arange(101) * 0.9)),
17.8 + 3.5 * np.sin(np.deg2rad(180 +
|
np.arange(101)
|
numpy.arange
|
import math
import cv2
import numpy as np
import torch
import torchvision.transforms.functional as F
from plasticorigins.detection.centernet.models import (
create_model as create_base,
)
class ResizeForCenterNet:
def __init__(self, fix_res=False):
self.fix_res = fix_res
def __call__(self, image):
if self.fix_res:
new_h = 512
new_w = 512
else:
w, h = image.size
new_h = (h | 31) + 1
new_w = (w | 31) + 1
image = F.resize(image, (new_h, new_w))
return image
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = height + width
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = ((ss - 1.0) / 2.0 for ss in shape)
y, x = np.ogrid[-m : m + 1, -n : n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top : y + bottom, x - left : x + right]
masked_gaussian = gaussian[
radius - top : radius + bottom, radius - left : radius + right
]
if (
min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0
): # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def blob_for_bbox(bbox, heatmap, downsampling_factor=None):
if downsampling_factor is not None:
left, top, w, h = (
bbox_coord // downsampling_factor for bbox_coord in bbox
)
else:
left, top, w, h = (bbox_coord for bbox_coord in bbox)
right, bottom = left + w, top + h
ct_int = None
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(left + right) / 2, (top + bottom) / 2], dtype=np.float32
)
ct_int = ct.astype(np.int32)
heatmap = draw_umich_gaussian(heatmap, ct_int, radius)
return heatmap, ct_int
def pre_process_centernet(image, meta=None, fix_res=True):
scale = 1.0
mean = [0.408, 0.447, 0.47]
std = [0.289, 0.274, 0.278]
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if fix_res:
inp_height, inp_width = 512, 512
c = np.array([new_width / 2.0, new_height / 2.0], dtype=np.float32)
s = max(height, width) * 1.0
else:
inp_height = (new_height | 31) + 1
inp_width = (new_width | 31) + 1
c =
|
np.array([new_width // 2, new_height // 2], dtype=np.float32)
|
numpy.array
|
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
"""Tests for frustum clipping functions."""
import numpy as np
from argoverse.utils.frustum_clipping import (
clip_segment_v3_plane_n,
cuboid_to_2d_frustum_bbox,
fit_plane_to_point_cloud,
form_left_clipping_plane,
form_low_clipping_plane,
form_near_clipping_plane,
form_right_clipping_plane,
form_top_clipping_plane,
generate_frustum_planes,
plane_point_side_v3,
)
def test_plane_point_side_v3_behind_plane() -> None:
"""Check if a point is in direction of plane normal or on other side."""
p = np.array([1, 1, 1, 0])
v = np.array([-1, -1, -1])
sign = plane_point_side_v3(p, v)
assert sign < 0
def test_plane_point_side_v3_on_plane() -> None:
"""Check if point is in direction of plane normal or on other side."""
p = np.array([1, 1, 1, 0])
v = np.array([0, 0, 0])
sign = plane_point_side_v3(p, v)
assert sign == 0
def test_plane_point_side_v3_point_in_front_of_plane() -> None:
"""Check if point is in direction of plane normal or on other side."""
p = np.array([1, 1, 1, 0])
v = np.array([2, 2, 2])
sign = plane_point_side_v3(p, v)
assert sign > 0
def test_fit_plane_to_point_cloud() -> None:
"""Given a plane with slope +2/1 for +y/+z, find slowly tilting normal away from the plane.
+y
/|
/ |
/ |
------ + z
"""
pc = np.array([[0, 2, 1], [1, 2, 1], [0, 0, 0]])
a, b, c, d = fit_plane_to_point_cloud(pc)
assert np.isclose(d, 0) # touching origin
normal = np.array([a, b, c])
normal /= np.linalg.norm(normal)
gt_normal = np.array([0.0, -1.0, 2.0])
gt_normal /= np.linalg.norm(gt_normal)
# correct y sign if needed, slope is all what matters
# (ratio between +y/+z should be -1/2)
if normal[1] > 0:
normal *= -1
assert np.allclose(gt_normal, normal)
def test_clip_segment_v3_plane_n_all_outside() -> None:
r"""Test clipping line segments when all points are outside the view.
normals point into this frustum
\\ // \\ /
\\ //
\\//
o---------o line segment
"""
# p1, p2: pair of 3d vectors defining a line segment.
# planes: a sequence of (4 floats): `(x, y, z, d)`.
p1 = np.array([-1, -0.5, 4.0])
p2 = np.array([1.0, -0.5, 3.0])
planes = [np.array([-1.0, 2.0, 0.0, 0.0]), np.array([1.0, 2.0, 0.0, 0.0])]
# Returns 2 vector triplets (representing the clipped segment)
# or (None, None) meaning the segment is entirely outside the frustun.
p1_clip, p2_clip = clip_segment_v3_plane_n(p1, p2, planes)
assert p1_clip == None # noqa (ignore pycodestyle E711)
assert p2_clip == None # noqa (ignore pycodestyle E711)
print(p1_clip, p2_clip)
def test_clip_segment_v3_plane_n_clip_twice() -> None:
r"""Test clipping line segments twice.
The normals point into this frustum
\\ / \\ /
o-\\ --//--o line segment
\\//
"""
# p1, p2: pair of 3d vectors defining a line segment.
# planes: a sequence of (4 floats): `(x, y, z, d)`.
p1 = np.array([2, 0.5, 0.0])
p2 = np.array([-2, 0.5, 0.0])
planes = [np.array([-1.0, 2.0, 0.0, 0.0]), np.array([1.0, 2.0, 0.0, 0.0])]
# Returns 2 vector triplets (representing the clipped segment)
# or (None, None) meaning the segment is entirely outside the frustun.
p1_clip, p2_clip = clip_segment_v3_plane_n(p1, p2, planes)
assert np.allclose(p1_clip, np.array([1, 0.5, 0.0]))
assert np.allclose(p2_clip, np.array([-1, 0.5, 0.0]))
def test_clip_segment_v3_plane_n_subsumed_in_frustum() -> None:
r"""Test clipping line segments that are subsumed in the frustum.
The normals point into this frustum
\\ // o---o \\ /
\\ //
\\//
Line segment is entirely inside the frustum this time, so stays intact.
"""
# p1, p2: pair of 3d vectors defining a line segment.
# planes: a sequence of (4 floats): `(x, y, z, d)`.
p1 = np.array([1.0, 2, 0.0])
p2 = np.array([-1.0, 2.0, 0.0])
planes = [np.array([-1.0, 2.0, 0.0, 0.0]), np.array([1.0, 2.0, 0.0, 0.0])]
# Returns 2 vector triplets (representing the clipped segment)
# or (None, None) meaning the segment is entirely outside the frustun.
p1_clip, p2_clip = clip_segment_v3_plane_n(p1.copy(), p2.copy(), planes)
assert np.allclose(p1_clip, p1)
assert np.allclose(p2_clip, p2)
def test_clip_segment_v3_plane_n() -> None:
r"""Test clipping line segment. Expect that the bottom point will be clipped to the origin.
The normals point into this frustum
\\ // o \\ /
\\ | //
\\//
|
o line segment half in, half out
"""
# p1, p2: pair of 3d vectors defining a line segment.
# planes: a sequence of (4 floats): `(x, y, z, d)`.
p1 = np.array([0.0, 1.0, 0.0])
p2 = np.array([0.0, -1.0, 0.0])
planes = [np.array([-1.0, 2.0, 0.0, 0.0]), np.array([1.0, 2.0, 0.0, 0.0])]
# Returns 2 vector triplets (representing the clipped segment)
# or (None, None) meaning the segment is entirely outside the frustun.
p1_clip, p2_clip = clip_segment_v3_plane_n(p1.copy(), p2.copy(), planes)
assert np.allclose(p1_clip, p1)
assert np.allclose(p2_clip, np.zeros(3))
def test_form_right_clipping_plane() -> None:
"""Test form_right_clipping_plane(). Use 4 points to fit the right clipping plane."""
fx = 10.0
img_width = 30
right_plane = form_right_clipping_plane(fx, img_width)
Y_OFFSET = 10 # arbitrary extent down the imager
right = np.array(
[
[0, 0, 0],
[img_width / 2.0, 0, fx],
[0, Y_OFFSET, 0],
[img_width / 2.0, Y_OFFSET, fx],
]
)
a, b, c, d = fit_plane_to_point_cloud(right)
right_plane_gt = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
# x-component of normal should point in negative direction.
if right_plane_gt[0] > 0:
right_plane_gt *= -1
assert np.allclose(right_plane, right_plane_gt)
def test_form_left_clipping_plane() -> None:
"""Test form_left_clipping_plane(). Use 4 points to fit the left clipping plane."""
fx = 10.0
img_width = 30
left_plane = form_left_clipping_plane(fx, img_width)
Y_OFFSET = 10
left = np.array(
[
[0, 0, 0],
[-img_width / 2.0, 0, fx],
[0, Y_OFFSET, 0],
[-img_width / 2.0, Y_OFFSET, fx],
]
)
a, b, c, d = fit_plane_to_point_cloud(left)
left_plane_gt = -1 * np.array([a, b, c, d])
# enforce that plane normal points into the frustum
if left_plane_gt[0] < 0:
left_plane_gt *= -1
assert np.allclose(left_plane, left_plane_gt)
def test_form_top_clipping_plane() -> None:
"""Test form_top_clipping_plane(). Use 3 points to fit the TOP clipping plane."""
fx = 10.0
img_height = 45
top_plane = form_top_clipping_plane(fx, img_height)
img_width = 1000.0
top_pts = np.array(
[
[0, 0, 0],
[-img_width / 2, -img_height / 2, fx],
[img_width / 2, -img_height / 2, fx],
]
)
a, b, c, d = fit_plane_to_point_cloud(top_pts)
top_plane_gt = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
if top_plane_gt[1] < 0:
# y-coord of normal should point in pos y-axis dir(down) on top-clipping plane
top_plane_gt *= -1
assert top_plane_gt[1] > 0 and top_plane_gt[2] > 0
assert np.allclose(top_plane, top_plane_gt)
def test_form_low_clipping_plane() -> None:
"""Test form_low_clipping_plane()."""
fx = 12.0
img_height = 35
low_plane = form_low_clipping_plane(fx, img_height)
img_width = 10000
low_pts = np.array(
[
[0, 0, 0],
[-img_width / 2, img_height / 2, fx],
[img_width / 2, img_height / 2, fx],
]
)
a, b, c, d = fit_plane_to_point_cloud(low_pts)
low_plane_gt = np.array([a, b, c, d])
# enforce that plane normal points into the frustum
# y-coord of normal should point in neg y-axis dir(up) on low-clipping plane
# z-coord should point in positive z-axis direction (away from camera)
if low_plane_gt[1] > 0:
low_plane_gt *= -1
assert low_plane_gt[1] < 0 and low_plane_gt[2] > 0
assert np.allclose(low_plane, low_plane_gt)
def test_form_near_clipping_plane() -> None:
"""Test form_near_clipping_plane(). Use 4 points to fit the near clipping plane."""
img_width = 10
img_height = 15
near_clip_dist = 30.0
near_plane = form_near_clipping_plane(near_clip_dist)
near = np.array(
[
[img_width / 2, 0, near_clip_dist],
[-img_width / 2, 0, near_clip_dist],
[img_width / 2, -img_height / 2.0, near_clip_dist],
[img_width / 2, img_height / 2.0, near_clip_dist],
]
)
a, b, c, d = fit_plane_to_point_cloud(near)
near_plane_gt = np.array([a, b, c, d])
assert np.allclose(near_plane, near_plane_gt)
def test_generate_frustum_planes_ring_cam() -> None:
"""Test generate_frustum_planes() for a ring camera.
Skew is 0.0.
"""
near_clip_dist = 6.89 # arbitrary value
K = np.eye(3)
# Set "focal_length_x_px_"
K[0, 0] = 1402.4993697398709
# Set "focal_length_y_px_"
K[1, 1] = 1405.1207294310225
# Set "focal_center_x_px_"
K[0, 2] = 957.8471720086527
# Set "focal_center_y_px_"
K[1, 2] = 600.442948946496
camera_name = "ring_front_right"
img_height = 1200
img_width = 1920
planes = generate_frustum_planes(K, camera_name, near_clip_dist=near_clip_dist)
if planes is None:
assert False
left_plane, right_plane, near_plane, low_plane, top_plane = planes
fx = K[0, 0]
left_plane_gt = np.array([fx, 0.0, img_width / 2.0, 0.0])
right_plane_gt = np.array([-fx, 0.0, img_width / 2.0, 0.0])
near_plane_gt = np.array([0.0, 0.0, 1.0, -near_clip_dist])
low_plane_gt = np.array([0.0, -fx, img_height / 2.0, 0.0])
top_plane_gt = np.array([0.0, fx, img_height / 2.0, 0.0])
assert np.allclose(left_plane, left_plane_gt / np.linalg.norm(left_plane_gt))
assert np.allclose(right_plane, right_plane_gt / np.linalg.norm(right_plane_gt))
assert np.allclose(low_plane, low_plane_gt / np.linalg.norm(low_plane_gt))
assert np.allclose(top_plane, top_plane_gt / np.linalg.norm(top_plane_gt))
assert np.allclose(near_plane, near_plane_gt)
def test_generate_frustum_planes_stereo() -> None:
"""Test generate_frustum_planes() for a stereo camera.
Skew is 0.0.
"""
near_clip_dist = 3.56 # arbitrary value
K = np.eye(3)
# Set "focal_length_x_px_"
K[0, 0] = 3666.534329132812
# Set "focal_length_y_px_"
K[1, 1] = 3673.5030423482513
# Set "focal_center_x_px_"
K[0, 2] = 1235.0158218941356
# Set "focal_center_y_px_"
K[1, 2] = 1008.4536901420888
camera_name = "stereo_front_left"
img_height = 2056
img_width = 2464
planes = generate_frustum_planes(K, camera_name, near_clip_dist=near_clip_dist)
if planes is None:
assert False
left_plane, right_plane, near_plane, low_plane, top_plane = planes
fx = K[0, 0]
left_plane_gt = np.array([fx, 0.0, img_width / 2.0, 0.0])
right_plane_gt =
|
np.array([-fx, 0.0, img_width / 2.0, 0.0])
|
numpy.array
|
import warnings
import matplotlib.pyplot as plt
import numpy as np
class RandomWalk2D:
def __init__(self, grid_size=3, end_states=[(0,0)], rewards=[1], exploration=.1, move_cost=0):
self.n = grid_size
self.end_states = end_states
self.move_cost = move_cost
self.rewards = rewards
self.e = exploration
self.n_actions = 4
self.actions = [(-1,0), (1,0), (0,-1), (0,1)]
# invalid move penality for first time
self.q = np.ones((self.n, self.n, self.n_actions)) * -99999
self.c = np.zeros((self.n, self.n, self.n_actions), dtype=int)
self.policy = np.zeros((self.n,self.n), dtype=int)
for state, value in
|
np.ndenumerate(self.policy)
|
numpy.ndenumerate
|
from __future__ import absolute_import
import xarray as xr
import h5py
import numpy as np
import pandas as pd
import datetime
import scipy
import scipy.interpolate
import os
#turn off warnings so i can use the progressbar
import warnings
warnings.filterwarnings('ignore')
class GPMDPR():
"""
Author: <NAME>. This class is intended to help with the efficient processing of GPM-DPR radar files.
Currently, xarray cannot read NASA's HDF files directly (2A.GPM.DPR*). So here is an attempt to do so.
Once in xarray format, the effcient search functions can be used.
**NOTE 1: Currently, I do not have this function pass all variables through (there is quite the list of them.
Maybe in the future I will generalize it to do so. But right now its a bit tedious to code up all the units and such
**NOTE 2: Outerswath code not ready yet. Do not turn the flag on
Feel free to reach out to me on twitter (@dopplerchase) or email <EMAIL>
For your reference, please check out the ATBD: https://pps.gsfc.nasa.gov/GPMprelimdocs.html
"""
def __init__(self,filename=[],boundingbox=None,outer_swath=False,auto_run=True):
""" Initializes things.
filename: str, path to GPM-DPR file
boundingbox: list of floats, if you would like to cut the gpm to a lat lon box
send in a list of [lon_min,lon_mat,lat_min,lat_max]
"""
self.filename = filename
self.xrds = None
self.datestr=None
self.height= None
self.corners = boundingbox
self.retrieval_flag = 0
self.interp_flag = 0
self.outer_swath = outer_swath
#determine if you have to use the file variable name changes
if (filename.find('X') >= 0):
self.legacy = False
self.v07 = False
elif (filename.find('V9') >= 0):
self.legacy = False
self.v07 = True
else:
self.legacy = True
if auto_run:
#this reads the hdf5 file
self.read()
#this calculates the range height for the 2D cross-sections
self.calc_heights()
#this will convert the hdf to an xarray dataset
self.toxr()
def read(self):
"""
This method simply reads the HDF file and gives it to the class.
"""
self.hdf = h5py.File(self.filename,'r')
if self.legacy:
###set some global parameters
#whats the common shape of the DPR files
if self.outer_swath:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
else:
shape = self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:].shape
self.along_track = np.arange(0,shape[0])
self.cross_track = np.arange(0,shape[1])
self.range = np.arange(0,shape[2])
def calc_heights(self):
""" Here we calculate the atitude above mean sea level. Surprisingly this was not
provided in version 6, but is included in the new version. Please not there is a
difference between this method and the supplied heights in the new version. It
seems to be less than 200 m error. Just keep that in mind!"""
x2 = 2. * 17 #total degrees is 48 (from -17 to +17)
re = 6378. #radius of the earth km
theta = -1 *(x2/2.) + (x2/48.)*np.arange(0,49) #break the -17 to 17 into equal degrees
theta2 = np.zeros(theta.shape[0]+1)
theta = theta - 0.70833333/2. #shift thing to get left edge for pcolors
theta2[:-1] = theta
theta2[-1] = theta[-1] + 0.70833333
theta = theta2 * (np.pi/180.) #convert to radians
prh = np.zeros([49,176]) #set up matrix
for i in np.arange(0,176): #loop over num range gates
for j in np.arange(0,49): #loop over scans
a = np.arcsin(((re+407)/re)*np.sin(theta[j]))-theta[j] #407 km is the orbit height, re radius of earth,
prh[j,i] = (176-(i))*0.125*np.cos(theta[j]+a) #more geometry
da = xr.DataArray(prh[:,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS_full.nc')
da = xr.DataArray(prh[12:37,:], dims=['cross_track','range'])
da.to_netcdf('./HEIGHTS.nc')
def toxr(self,ptype=None,clutter=False,echotop=False,precipflag=10):
"""
This is the main method of the package. It directly creates the xarray dataset from the HDF file.
To save computational time, it does first check to see if you set a box of interest.
Then it uses xarray effcient searching to make sure there are some profiles in that box.
"""
#set the precip type of interest. If none, give back all data...
self.ptype= ptype
self.snow = False
self.precip = False
if (self.ptype=='precip') or (self.ptype=='Precip') or \
(self.ptype=='PRECIP') or (self.ptype=='snow') or \
(self.ptype=='Snow') or (self.ptype=='SNOW'):
self.precip=True
if (self.ptype=='snow') or (self.ptype=='Snow') or (self.ptype=='SNOW'):
self.snow=True
#set the killflag to false. If this is True at the end, it means no points in the box were found.
self.killflag = False
#first thing first, check to make sure there are points in the bounding box.
#cut points to make sure there are points in your box.This should save you time.
if self.corners is not None:
#load data out of hdf
if self.outer_swath:
if self.legacy:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
#shove it into a dataarray
da = xr.DataArray(np.zeros(lons.shape), dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats)})
#cut the the edges of the box
da = da.where((da.lons >= self.corners[0]) & \
(da.lons <= self.corners[1]) & \
(da.lats >= self.corners[2]) & \
(da.lats <= self.corners[3]),drop=False)
#okay, now drop nans
da = da.dropna(dim='along_track',how='all')
#if there are no profiles, the len is 0, and we will set the kill flag
if da.along_track.shape[0]==0:
self.killflag = True
#if there were no points it will not waste time with processing or io stuff
if self.killflag:
pass
else:
if self.datestr is None:
self.parse_dtime()
if self.height is None:
if self.legacy:
if self.outer_swath:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['NS']['Longitude'].shape[0],1,1))
self.height = height
else:
height = xr.open_dataarray('./HEIGHTS_full.nc')
height = height.values[np.newaxis,:,:]
height = np.tile(height,(self.hdf['FS']['Longitude'].shape[0],1,1))
self.height = height
if self.corners is None:
if self.legacy:
if self.outer_swath:
lons = self.hdf['NS']['Longitude'][:,:]
lats = self.hdf['NS']['Latitude'][:,:]
else:
lons = self.hdf['NS']['Longitude'][:,12:37]
lats = self.hdf['NS']['Latitude'][:,12:37]
else:
lons = self.hdf['FS']['Longitude'][:,:]
lats = self.hdf['FS']['Latitude'][:,:]
if self.legacy:
if self.outer_swath:
#need to fill the outerswath with nans
flagSurfaceSnowfall = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*255
flagSurfaceSnowfall[:,12:37] = self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:]
da = xr.DataArray(flagSurfaceSnowfall,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
flagPrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
flagPrecip[:,12:37] = self.hdf['MS']['PRE']['flagPrecip'][:,:]
da = xr.DataArray(flagPrecip,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
typePrecip[:,12:37] = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
kanearsurf = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
kanearsurf[:,12:37] = self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:]
da = xr.DataArray(kanearsurf,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
MSKa_c = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa_c[:,12:37,:] = self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:]
da = xr.DataArray(MSKa_c,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
MSKa = np.ones([len(self.along_track),len(self.cross_track),len(self.range)],dtype=float)*-9999
MSKa[:,12:37,:] = self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:]
da = xr.DataArray(MSKa,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
if self.corners is not None:
self.setboxcoords()
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['MS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['NS']['CSF']['binBBTop'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['NS']['CSF']['binBBBottom'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['MS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['NS']['SLV']['phaseNearSurface'][:,12:37]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['NS']['SLV']['precipRateNearSurface'][:,12:37]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrectedNearSurface'][:,12:37],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrectedNearSurface'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['zFactorCorrected'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['epsilon'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['MS']['SLV']['zFactorCorrected'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['NS']['PRE']['zFactorMeasured'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['MS']['PRE']['zFactorMeasured'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['precipRate'][:,12:37,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['NS']['SLV']['paramDSD'][:,12:37,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
else:
da = xr.DataArray(self.hdf['FS']['Experimental']['flagSurfaceSnowfall'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=255)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'experimental flag to diagnose snow at surface'
#make xr dataset
self.xrds = da.to_dataset(name = 'flagSurfaceSnow')
#
#ADD BBtop and Bottom
da = xr.DataArray(self.hdf['FS']['CSF']['binBBTop'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBTop'] = da
da = xr.DataArray(self.hdf['FS']['CSF']['binBBBottom'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'ind of BBtop'
self.xrds['binBBBottom'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['flagPrecip'][:,:],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose precip at surface' + \
'11 is precip from both, 10 is preicp from just Ku-band'
#fill dataset
self.xrds['flagPrecip'] = da
#
typePrecip = self.hdf['FS']['CSF']['typePrecip'][:]
typePrecip = np.asarray(typePrecip,dtype=float)
ind = np.where(typePrecip == -1111)
typePrecip[ind] = np.nan
ind = np.where(typePrecip == -9999)
typePrecip[ind] = np.nan
typePrecip = np.trunc(typePrecip/10000000)
typePrecip = np.asarray(typePrecip,dtype=int)
da = xr.DataArray(typePrecip, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose raintype. If 1: Strat. If 2: Conv. If 3:other '
self.xrds['typePrecip'] = da
#Get the phaseNearSurface (0 is snow, 1 is mixed 2, 2.55 is missing )
phaseNearSurface = self.hdf['FS']['SLV']['phaseNearSurface'][:,:]/100
phaseNearSurface[phaseNearSurface == 2.55] = -9999
phaseNearSurface =np.asarray(np.trunc(phaseNearSurface),dtype=int)
da = xr.DataArray(phaseNearSurface, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to diagnose near surface phase.'+ \
'0 is snow, 1 is mixed, 2 is rain. This is included to compare to Skofronick-Jackson 2019'
self.xrds['phaseNearSurface'] = da
#Get the precipRateNearSurf (needed for skofronick-jackson 2019 comparison)
precipRateNearSurface = self.hdf['FS']['SLV']['precipRateNearSurface'][:,:]
da = xr.DataArray(precipRateNearSurface,
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.fillna(value=-9999)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Near surface R from the GPM-DPR algo.'
self.xrds['precipRateNearSurface'] = da
if clutter:
self.get_highest_clutter_bin()
da = xr.DataArray(self.dummy,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove ground clutter'
self.xrds['clutter'] = da
#note, the v07 files use zFactorFinalNearSurf... have to adjust the key here
if self.v07:
temp_key = 'zFactorFinalNearSurface'
else:
temp_key = 'zFactorCorrectedNearSurface'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,0],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ku'
da = da.where(da >= 12)
self.xrds['nearsurfaceKu'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,1],
dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'near surface Ka'
da = da.where(da >= 15)
self.xrds['nearsurfaceKa'] = da
#note, the v07 files use zFactorFinal.. have to adjust the key here
if self.v07:
temp_key = 'zFactorFinal'
else:
temp_key = 'zFactorCorrected'
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 12)
self.xrds['NSKu_c'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['epsilon'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.fillna(value=-9999.9)
da = da.where(da >= 0)
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'epsilon value for retrieval'
self.xrds['epsilon'] = da
da = xr.DataArray(self.hdf['FS']['SLV'][temp_key][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'corrected KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
da = da.where(da >= 15)
self.xrds['MSKa_c'] = da
if echotop:
self.echotop()
da = xr.DataArray(self.dummy2,
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'flag to remove noise outside cloud/precip top'
self.xrds['echotop'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KuPR'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['NSKu'] = da
da = xr.DataArray(self.hdf['FS']['PRE']['zFactorMeasured'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'measured KaPR, MS scan'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['MSKa'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['precipRate'][:,:,:],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm hr^-1'
da.attrs['standard_name'] = 'retrieved R, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
self.xrds['R'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,1],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Dm, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Dm_dpr'] = da
da = xr.DataArray(self.hdf['FS']['SLV']['paramDSD'][:,:,:,0],
dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],lons),
'lats': (['along_track','cross_track'],lats),
'time': (['along_track','cross_track'],self.datestr),
'alt':(['along_track', 'cross_track','range'],self.height)})
da.attrs['units'] = 'dBNw'
da.attrs['standard_name'] = 'retrieved Nw, from DPR algo'
if clutter:
da = da.where(self.xrds.clutter==0)
if echotop:
da = da.where(self.xrds.echotop==0)
da = da.where(da >= 0)
self.xrds['Nw_dpr'] = da
if self.precip:
#change this to 10 if you want to relax the conditions, because the ka band has bad sensativity
self.xrds = self.xrds.where(self.xrds.flagPrecip>=precipflag)
# if self.snow:
# self.xrds = self.xrds.where(self.xrds.flagSurfaceSnow==1)
if self.corners is not None:
self.setboxcoords()
#to reduce size of data, drop empty cross-track sections
# self.xrds = self.xrds.dropna(dim='along_track',how='all')
#as before, makes sure there is data...
if self.xrds.along_track.shape[0]==0:
self.killflag = True
def get_highest_clutter_bin(self):
"""
This method makes us ground clutter conservative by supplying a clutter mask to apply to the fields.
It is based off the algorithim output of 'binClutterFreeBottom', which can be a bit conservative (~ 1km)
"""
if self.legacy:
if self.outer_swath:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['NS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = self.hdf['MS']['PRE']['binClutterFreeBottom'][:]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
else:
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ku = np.reshape(ku,[1,ku.shape[0],ku.shape[1]])
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
ka = np.reshape(ka,[1,ka.shape[0],ka.shape[1]])
both = np.vstack([ku,ka])
pick_max = np.argmin(both,axis=0)
ku = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,:]
ka = np.ones([len(self.along_track),len(self.cross_track)],dtype=int)*-9999
ka[:,12:37] = self.hdf['FS']['PRE']['binClutterFreeBottom'][:,12:37]
inds_to_pick = np.zeros(ku.shape,dtype=int)
ind = np.where(pick_max == 0)
inds_to_pick[ind] = ku[ind]
ind = np.where(pick_max == 1)
inds_to_pick[ind] = ka[ind]
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]:] = 1
self.dummy = np.ma.asarray(dummy_matrix,dtype=int)
def echotop(self):
"""
This method takes the already clutter filtered data for the corrected reflectivity and cuts the
noisy uncorrected reflectivity to the same height. Again, the method is a bit conservative, but is
a good place to start.
"""
if self.legacy:
if self.outer_swath:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.MSKa_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.MSKa_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
else:
#HEADS UP, will default to using Ku in the outerswath because there is no Ka
keeper = self.range
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu_c.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu_c)] = 9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,:inds_to_pick[i,j]] = 1
self.dummy2 = np.ma.asarray(dummy_matrix,dtype=int)
def setboxcoords(self):
"""
This method sets all points outside the box to nan.
"""
if len(self.corners) > 0:
self.ll_lon = self.corners[0]
self.ur_lon = self.corners[1]
self.ll_lat = self.corners[2]
self.ur_lat = self.corners[3]
self.xrds = self.xrds.where((self.xrds.lons >= self.ll_lon) & (self.xrds.lons <= self.ur_lon) & (self.xrds.lats >= self.ll_lat) & (self.xrds.lats <= self.ur_lat),drop=False)
else:
print('ERROR, not boxcoods set...did you mean to do this?')
def parse_dtime(self):
"""
This method creates datetime objects from the hdf file in a timely mannor.
Typically run this after you already filtered for precip/snow to save additional time.
"""
if self.legacy:
if self.outer_swath:
year = self.hdf['NS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['NS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['NS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['NS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['NS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['NS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['MS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['MS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['MS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['MS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['MS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['MS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,25))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
else:
year = self.hdf['FS']['ScanTime']['Year'][:]
ind = np.where(year == -9999)[0]
year = np.asarray(year,dtype=str)
year = list(year)
month = self.hdf['FS']['ScanTime']['Month'][:]
month = np.asarray(month,dtype=str)
month = np.char.rjust(month, 2, fillchar='0')
month = list(month)
day = self.hdf['FS']['ScanTime']['DayOfMonth'][:]
day = np.asarray(day,dtype=str)
day = np.char.rjust(day, 2, fillchar='0')
day = list(day)
hour = self.hdf['FS']['ScanTime']['Hour'][:]
hour = np.asarray(hour,dtype=str)
hour = np.char.rjust(hour, 2, fillchar='0')
hour = list(hour)
minute = self.hdf['FS']['ScanTime']['Minute'][:]
minute = np.asarray(minute,dtype=str)
minute = np.char.rjust(minute, 2, fillchar='0')
minute = list(minute)
second = self.hdf['FS']['ScanTime']['Second'][:]
second = np.asarray(second,dtype=str)
second = np.char.rjust(second, 2, fillchar='0')
second = list(second)
datestr = [year[i] +"-"+ month[i]+ "-" + day[i] + \
' ' + hour[i] + ':' + minute[i] + ':' + second[i] for i in range(len(year))]
datestr = np.asarray(datestr,dtype=str)
datestr[ind] = '1970-01-01 00:00:00'
datestr = np.reshape(datestr,[len(datestr),1])
datestr = np.tile(datestr,(1,49))
self.datestr = np.asarray(datestr,dtype=np.datetime64)
def run_retrieval(self,path_to_models=None,old=False,notebook=False):
"""
This method is a way to run our neural network trained retreival to get Dm in snowfall.
Please see this AMS presentation until the paper comes out: *LINK HERE*.
This method requires the use of tensorflow. So go install that.
"""
#load scalers
from pickle import load
import tensorflow as tf
from tensorflow.python.keras import losses
#set number of threads = 1, this was crashing my parallel code, If in notebook comment this
if notebook:
pass
else:
tf.config.threading.set_inter_op_parallelism_threads(1)
# tf.config.threading.set_intra_op_parallelism_threads(1)
# print('Number of threads set to {}'.format(tf.config.threading.get_inter_op_parallelism_threads()))
if old:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y.pkl', 'rb'))
else:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X_V2.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y_V2.pkl', 'rb'))
#supress warnings. skrews up my progress bar when running in parallel
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
if path_to_models is None:
print('Please insert path to NN models')
else:
if old:
model = tf.keras.models.load_model(path_to_models + 'NN_4by8.h5',custom_objects=None,compile=True)
else:
model = tf.keras.models.load_model(path_to_models + 'NN_6by8.h5',custom_objects=None,compile=True)
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = self.xrds.NSKu.values
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = self.xrds.MSKa.values
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
T = self.xrds['T'].values - 273.15 #expects in degC
T = T.reshape([T.shape[0],T.shape[1]*T.shape[2]])
T = T.reshape([T.shape[0]*T.shape[1]])
#Make sure we only run in on non-nan values.
ind_masked = np.isnan(Ku)
ind_masked2 = np.isnan(Ka)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask = np.zeros(Ka.shape)
T_nomask = np.zeros(T.shape)
Ku_nomask[~ind_masked] = Ku[~ind_masked]
Ka_nomask[~ind_masked] = Ka[~ind_masked]
T_nomask[~ind_masked] = T[~ind_masked]
ind = np.where(Ku_nomask!=0)[0]
#scale the input vectors by the mean that it was trained with
X = np.zeros([Ku_nomask.shape[0],3])
X[:,0] = (Ku_nomask - scaler_X.mean_[0])/scaler_X.scale_[0] #ku
X[:,1] = ((Ku_nomask - Ka_nomask)- scaler_X.mean_[1])/scaler_X.scale_[1] #dfr
X[:,2] = (T_nomask - scaler_X.mean_[2])/scaler_X.scale_[2] #T
#
yhat = model.predict(X[ind,0:3],batch_size=len(X[ind,0]))
yhat = scaler_y.inverse_transform(yhat)
yhat[:,1] = 10**yhat[:,1] #unlog Dm liquid
yhat[:,2] = 10**yhat[:,2] #unlog Dm solid
ind = np.where(Ku_nomask!=0)[0]
Nw = np.zeros(Ku_nomask.shape)
Nw[ind] = np.squeeze(yhat[:,0])
Nw = Nw.reshape([shape_step2[0],shape_step2[1]])
Nw = Nw.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Nw = np.ma.masked_where(Nw==0.0,Nw)
da = xr.DataArray(Nw, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'log(m^-4)'
da.attrs['standard_name'] = 'retrieved Nw from the NN (Chase et al. 2020)'
da = da.where(da > 0.)
self.xrds['Nw'] = da
Dm = np.zeros(Ku_nomask.shape)
Dm[ind] = np.squeeze(yhat[:,1])
Dm = Dm.reshape([shape_step2[0],shape_step2[1]])
Dm = Dm.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm = np.ma.masked_where(Dm==0.0,Dm)
da = xr.DataArray(Dm, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Liquid Eq. Dm from the NN (Chase et al. 2020)'
self.xrds['Dm'] = da
Dm_frozen = np.zeros(Ku_nomask.shape)
Dm_frozen[ind] = np.squeeze(yhat[:,2])
Dm_frozen = Dm_frozen.reshape([shape_step2[0],shape_step2[1]])
Dm_frozen = Dm_frozen.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm_frozen = np.ma.masked_where(Dm_frozen==0.0,Dm_frozen)
da = xr.DataArray(Dm_frozen, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'retrieved Frozen Dm from the NN (Chase et al. 2020)'
self.xrds['Dm_frozen'] = da
Nw = 10**Nw #undo log, should be in m^-4
Dm = Dm/1000. # convert to m ^4
IWC = (Nw*(Dm)**4*1000*np.pi)/4**(4) # the 1000 is density of water (kg/m^3)
IWC = IWC*1000 #convert to g/m^3
da = xr.DataArray(IWC, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^{-3}'
da.attrs['standard_name'] = 'Calc IWC from retrieved Nw and Dm from the NN (Chase et al. 2020)'
self.xrds['IWC'] = da
self.retrieval_flag = 1
def get_ENV(self,ENVFILENAME=None):
hdf_env = h5py.File(ENVFILENAME)
temperature = hdf_env['NS']['VERENV']['airTemperature'][:,12:37,:]
da = xr.DataArray(temperature, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.where(da > 0)
da.attrs['units'] = 'K'
da.attrs['standard_name'] = 'GPM-DPR ENV data'
self.xrds["T"] = da
def get_merra(self,interp1=True,interp2=False,getsurf=True):
"""
This method matches up the *closest* MERRA-2 profiles.
To do so it uses the xarray.sel command.
Please note this is not generalized. The files structure of my MERRA-2 files is a bit particular.
In theory you could point this into your own directory where those files are. Or even use a different
reanalysis (e.g., ERA)
"""
time = self.xrds.time.values
orig_shape = time.shape
time = np.reshape(time,[orig_shape[0]*orig_shape[1]])
dates = pd.to_datetime(time,infer_datetime_format=True)
dates = dates.to_pydatetime()
dates = np.reshape(dates,[orig_shape[0],orig_shape[1]])
year = dates[0,0].year
month = dates[0,0].month
day = dates[0,0].day
if month < 10:
month = '0'+ str(month)
else:
month = str(month)
if day <10:
day = '0' + str(day)
else:
day = str(day)
ds_url = '/data/accp/a/snesbitt/merra-2/PROFILE/'+ str(year) + '/' + 'MERRA2_400.inst6_3d_ana_Np.'+ str(year) + month + day+ '.nc4'
###load file
merra = xr.open_dataset(ds_url,chunks={'lat': 361, 'lon': 576})
###
#select the closest profile to the lat, lon, time
sounding = merra.sel(lon=self.xrds.lons,lat=self.xrds.lats,time=self.xrds.time,method='nearest')
sounding.load()
self.sounding = sounding
if interp1:
self.interp_MERRA(keyname='T')
self.interp_MERRA(keyname='U')
self.interp_MERRA(keyname='V')
self.interp_MERRA(keyname='QV')
self.interp_flag = 1
elif interp2:
self.interp_MERRA_V2(keyname='T')
self.interp_MERRA_V2(keyname='U')
self.interp_MERRA_V2(keyname='V')
self.interp_MERRA_V2(keyname='QV')
self.interp_flag = 1
if getsurf:
ds_url ='/data/accp/a/snesbitt/merra-2/SURFACE/'+ str(year) + '/' + 'MERRA2_400.tavg1_2d_slv_Nx.'+str(year) + month + day +'.nc4'
###load file
merra = xr.open_dataset(ds_url)
###
#select the closest profile to the lat, lon, time
gpmcoords = merra.sel(lon=self.xrds.lons,lat=self.xrds.lats,time=self.xrds.time,method='nearest')
da = xr.DataArray(gpmcoords.T2M.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2M.units
da.attrs['standard_name'] = gpmcoords.T2M.standard_name
self.xrds['T2M'] = da
da = xr.DataArray(gpmcoords.T2MWET.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2MWET.units
da.attrs['standard_name'] = gpmcoords.T2MWET.standard_name
self.xrds['T2MWET'] = da
da = xr.DataArray(gpmcoords.T2MDEW.values, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = gpmcoords.T2MDEW.units
da.attrs['standard_name'] = gpmcoords.T2MDEW.standard_name
self.xrds['T2MDEW'] = da
if self.snow:
self.xrds = self.xrds.where(self.xrds.T2MWET-273.15 <= 0)
#to reduce size of data, drop empty cross-track sections
self.xrds = self.xrds.dropna(dim='along_track',how='all')
def interp_MERRA(self,keyname=None):
"""
This interpolates the MERRA data from the self.get_merra method, to the same veritcal levels as the GPM-DPR
NOTE: I am not sure this is optimized! Not very fast..., but if you want you can turn it off
"""
H_Merra = self.sounding.H.values
H_gpm = self.xrds.alt.values
new_variable = np.zeros(H_gpm.shape)
for i in self.sounding.along_track.values:
for j in self.sounding.cross_track.values:
#fit func
da = xr.DataArray(self.sounding[keyname].values[i,j,:], [('height', H_Merra[i,j,:]/1000)])
da = da.interp(height=H_gpm[i,j,:])
new_variable[i,j,:] = da.values
da = xr.DataArray(new_variable, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.attrs['units'] = self.sounding[keyname].units
da.attrs['standard_name'] = 'Interpolated ' + self.sounding[keyname].standard_name + ' to GPM height coord'
self.xrds[keyname] = da
return da
def interp_MERRA_V2(self,keyname=None):
"""This is an effcient way of doing linear interpolation of the MERRA soundings """
x = self.sounding['H'].values
y = self.sounding[keyname].values
z = self.xrds.alt.values*1000 #convert to m
interped = np.zeros(self.xrds.alt.values.shape)
for i in np.arange(0,len(self.cross_track)):
interped[:,i,:] = interp_2(x[:,i,:],y[:,i,:],z[0,i,:])
da = xr.DataArray(interped, dims=['along_track', 'cross_track','range'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time),
'alt':(['along_track', 'cross_track','range'],self.xrds.alt)})
da.attrs['units'] = self.sounding[keyname].units
da.attrs['standard_name'] = 'Interpolated ' + self.sounding[keyname].standard_name + ' to GPM height coord'
self.xrds[keyname] = da
return da
def extract_nearsurf(self):
"""
Since we are often concerned with whats happening at the surface, this will extract the variables just above
the clutter.
"""
if self.legacy:
if self.outer_swath:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
else:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
else:
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(49,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = -9999
inds_to_pick = np.argmax(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.lowest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=True)
self.grab_variable(keyname='NSKu_c',nearsurf=True)
self.grab_variable(keyname='MSKa',nearsurf=True)
self.grab_variable(keyname='MSKa_c',nearsurf=True)
self.grab_variable(keyname='R',nearsurf=True)
self.grab_variable(keyname='Dm_dpr',nearsurf=True)
self.grab_variable(keyname='alt',nearsurf=True)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=True)
self.grab_variable(keyname='IWC',nearsurf=True)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=True)
self.grab_variable(keyname='U',nearsurf=True)
self.grab_variable(keyname='V',nearsurf=True)
self.grab_variable(keyname='QV',nearsurf=True)
def extract_echotop(self):
"""
What are the various parameters found at the echotop. Make sure you ran the echotop bit first.
"""
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
#i'm using the Ka band as the echotop to ensure we have a retrieved param at echotop, not nan
keeper = np.tile(keeper,(self.xrds.MSKa.values.shape[0],1,1))
keeper[np.isnan(self.xrds.MSKa.values)] = +9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
self.highest_gate_index = np.ma.asarray(dummy_matrix,dtype=int)
self.grab_variable(keyname='NSKu',nearsurf=False)
self.grab_variable(keyname='NSKu_c',nearsurf=False)
self.grab_variable(keyname='MSKa',nearsurf=False)
self.grab_variable(keyname='MSKa_c',nearsurf=False)
self.grab_variable(keyname='R',nearsurf=False)
self.grab_variable(keyname='Dm_dpr',nearsurf=False)
self.grab_variable(keyname='alt',nearsurf=False)
if self.retrieval_flag == 1:
self.grab_variable(keyname='Dm',nearsurf=False)
self.grab_variable(keyname='IWC',nearsurf=False)
if self.interp_flag == 1:
self.grab_variable(keyname='T',nearsurf=False)
self.grab_variable(keyname='U',nearsurf=False)
self.grab_variable(keyname='V',nearsurf=False)
self.grab_variable(keyname='QV',nearsurf=False)
def extract_echotopheight_ku(self):
"""
What are the various parameters found at the echotop. Make sure you ran the echotop bit first.
"""
keeper = self.xrds.range.values
keeper = np.reshape(keeper,[1,keeper.shape[0]])
keeper = np.tile(keeper,(25,1))
keeper = np.reshape(keeper,[1,keeper.shape[0],keeper.shape[1]])
keeper = np.tile(keeper,(self.xrds.NSKu.values.shape[0],1,1))
keeper[np.isnan(self.xrds.NSKu.values)] = +9999
inds_to_pick = np.argmin(keeper,axis=2)
dummy_matrix = np.ma.zeros([inds_to_pick.shape[0],inds_to_pick.shape[1],176])
#note, for all nan columns, it will say its 0, or the top of the GPM index, which should alway be nan anyway
for i in np.arange(0,dummy_matrix.shape[0]):
for j in np.arange(0,dummy_matrix.shape[1]):
dummy_matrix[i,j,inds_to_pick[i,j]] = 1
ind = np.where(dummy_matrix == 0)
variable = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
variable2 = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
variable[ind[0],ind[1]] = np.nan
variable2[ind[0],ind[1]] = np.nan
ind = np.where(dummy_matrix == 1)
variable[ind[0],ind[1]] = self.xrds['alt'].values[ind[0],ind[1],ind[2]]
variable2[ind[0],ind[1]] = self.xrds['NSKu'].values[ind[0],ind[1],ind[2]]
da = xr.DataArray(variable, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the KuPR echo top'
self.xrds['alt_echoTopKuPR'] = da
da = xr.DataArray(variable2, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'KuPR reflectvity at its echotop'
self.xrds['NSKu_echoTopKuPR'] = da
def grab_variable(self,keyname=None,nearsurf=True,):
"""
This goes along with the self.extract_nearsurf() or self.extract_echotop()
"""
if keyname is None:
print('please supply keyname')
else:
variable = np.zeros([self.xrds.along_track.shape[0],self.xrds.cross_track.values.shape[0]])
if nearsurf:
ind = np.where(self.lowest_gate_index == 0)
else:
ind = np.where(self.highest_gate_index == 0)
variable[ind[0],ind[1]] = np.nan
if nearsurf:
ind = np.where(self.lowest_gate_index == 1)
else:
ind = np.where(self.highest_gate_index == 1)
variable[ind[0],ind[1]] = self.xrds[keyname].values[ind[0],ind[1],ind[2]]
da = xr.DataArray(variable, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
if nearsurf:
if keyname=='alt':
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the near-surface bin'
self.xrds[keyname+'_nearSurf'] = da
else:
da.attrs['units'] = self.xrds[keyname].units
da.attrs['standard_name'] = 'near-surface' + self.xrds[keyname].standard_name
self.xrds[keyname+'_nearSurf'] = da
else:
if keyname=='alt':
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'altitude of the echoTop'
self.xrds[keyname+'_echoTop'] = da
else:
da.attrs['units'] = self.xrds[keyname].units
da.attrs['standard_name'] = 'echo-top' + self.xrds[keyname].standard_name
self.xrds[keyname+'_echoTop'] = da
def get_physcial_distance(self,reference_point = None):
"""
This method uses pyproj to calcualte distances between lats and lons.
reference_point is a list or array conisting of two entries, [Longitude,Latitude]
Please note that this intentionally uses an older version of pyproj (< version 2.0, i used 1.9.5.1)
This is because it preserves how the function is called.
"""
if reference_point is None and self.reference_point is None:
print('Error, no reference point found...please enter one')
else:
#this is envoke the pyproj package. Please note this must be an old version** < 2.0
from pyproj import Proj
p = Proj(proj='aeqd', ellps='WGS84', datum='WGS84', lat_0=reference_point[1], lon_0=reference_point[0])
#double check to make sure this returns 0 meters
x,y = p(reference_point[0],reference_point[1])
if np.sqrt(x**2 + y**2) != 0:
'something isnt right with the projection. investigate'
else:
ind = np.isnan(self.xrds.NSKu_nearSurf.values)
x = np.zeros(self.xrds.lons.values.shape)
y = np.zeros(self.xrds.lats.values.shape)
x[~ind],y[~ind] = p(self.xrds.lons.values[~ind],self.xrds.lats.values[~ind])
x[ind] = np.nan
y[ind] = np.nan
da = xr.DataArray(np.sqrt(x**2 + y**2)/1000, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'distance, way of the crow (i.e. direct), to the reference point'
self.xrds['distance'] = da
def get_TMAX(self):
#get the column max temperature in each profile
ind_tmax = (self.xrds['T'].argmax(axis=2)).values
#make the index the right shape
shp = np.array(self.xrds['T'].shape)
dim_idx = list(np.ix_(*[np.arange(i) for i in shp[:-1]]))
dim_idx.append(ind_tmax)
#grab the temperature
tmax = self.xrds['T'].values[tuple(dim_idx)]
#grab its altitude
alt = self.xrds['T'].alt.values
alt_tmax = alt[tuple(dim_idx)]
#determine the lapse rate across the echo
lapse = np.zeros(self.xrds.NSKu.shape)
#cut the last value
lapse[:,:,0] = np.nan
#grab the temps
T_nan = np.copy(self.xrds['T'].values)
#nan out values outside the echo
T_nan[np.isnan(self.xrds.NSKu.values)] = np.nan
#calc the lapse rate across the echo, convert to per km
lapse[:,:,1:] = np.diff(T_nan,axis=2)/0.125
#change sign
lapse = lapse*-1
#take mean
lapse2d = np.nanmean(lapse,axis=2)
#okay make stability flag
orig_shape = lapse2d.shape
lapse2d = np.reshape(lapse2d,[orig_shape[0]*orig_shape[1]])
stability_flag = np.ones(lapse2d.shape,dtype=int)*-9999
abs_unstable = np.where(lapse2d <= -10)
stability_flag[abs_unstable] = 0
ind_cond = np.where(lapse2d > -10)
ind_cond2 = np.where(lapse2d <= -6)
cond_unstable = np.intersect1d(ind_cond,ind_cond2)
stability_flag[cond_unstable] = 1
abs_stable = np.where(lapse2d > -6)
stability_flag[abs_stable] = 2
#make them 2d to put back into the dataset
stability_flag = np.reshape(stability_flag,[orig_shape[0],orig_shape[1]])
lapse2d = np.reshape(lapse2d,[orig_shape[0],orig_shape[1]])
da = xr.DataArray(tmax, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'K'
da.attrs['standard_name'] = 'Max temperature in the column'
self.xrds['TMAX'] = da
da = xr.DataArray(alt_tmax, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'km'
da.attrs['standard_name'] = 'Alt of max temperature'
self.xrds['TMAX_alt'] = da
da = xr.DataArray(lapse2d, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'deg C or deg K'
da.attrs['standard_name'] = 'Mean lapse rate across the NSKu echo'
self.xrds['lapse2d'] = da
da = xr.DataArray(stability_flag, dims=['along_track', 'cross_track'],
coords={'lons': (['along_track','cross_track'],self.xrds.lons),
'lats': (['along_track','cross_track'],self.xrds.lons),
'time': (['along_track','cross_track'],self.xrds.time)})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'Stability flag, 0 is absolutely unstable, 1 is cond. unstable, 2 is stable,-9999 means no precip echo'
self.xrds['stability_flag'] = da
def interp_2(x, y, z):
""" This is from this discussion: https://stackoverflow.com/questions/14559687/scipy-fast-1-d-interpolation-without-any-loop"""
rows, cols = x.shape
row_idx = np.arange(rows).reshape((rows,) + (1,) * z.ndim)
col_idx = np.argmax(x.reshape(x.shape + (1,) * z.ndim) > z, axis=1) - 1
ret = y[row_idx, col_idx + 1] - y[row_idx, col_idx]
ret /= x[row_idx, col_idx + 1] - x[row_idx, col_idx]
ret *= z - x[row_idx, col_idx]
ret += y[row_idx, col_idx]
return ret
###############################################################################################################################
###############################################################################################################################
###############################################################################################################################
class APR():
"""
Author: <NAME>. This class is intended to help with APR-2/APR-3 files.
Currently supported campaigns: gcpex, olympex
Feel free to reach out to me on twitter (@dopplerchase) or email <EMAIL>
"""
def __init__(self):
self.initialzied = True
self.T3d = False
def read(self,filename,campaign='gcpex'):
"""
===========
This is for reading in apr3 hdf files from OLYMPEX and return them all in one dictionary
===========
filename = filename of the apr3 file
"""
if campaign=='gcpex':
self.campaign = campaign
from pyhdf.SD import SD, SDC
apr = {}
flag = 0
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'zhh95' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
hdf = SD(filename, SDC.READ)
#What used to be here is a searching param looking for W-band, since we know there is no W in GCPEX, just autofill it.
alt = hdf.select('alt3D')
lat = hdf.select('lat')
lon = hdf.select('lon')
roll = hdf.select('roll').get()
time = hdf.select('scantime').get()
surf = hdf.select('surface_index').get()
isurf = hdf.select('isurf').get()
plane = hdf.select('alt_nav').get()
radar = hdf.select(radar_freq) #ku
radar2 = hdf.select(radar_freq2) #ka
radar4 = hdf.select(radar_freq4) #ldr
vel = hdf.select(vel_str)
lon3d = hdf.select('lon3D')
lat3d = hdf.select('lat3D')
alt3d = hdf.select('alt3D')
lat3d_scale = hdf.select('lat3D_scale').get()[0][0]
lon3d_scale = hdf.select('lon3D_scale').get()[0][0]
alt3d_scale = hdf.select('alt3D_scale').get()[0][0]
lat3d_offset = hdf.select('lat3D_offset').get()[0][0]
lon3d_offset = hdf.select('lon3D_offset').get()[0][0]
alt3d_offset = hdf.select('alt3D_offset').get()[0][0]
alt = alt.get()
ngates = alt.shape[0]
lat = lat.get()
lon = lon.get()
lat3d = lat3d.get()
lat3d = (lat3d/lat3d_scale) + lat3d_offset
lon3d = lon3d.get()
lon3d = (lon3d/lon3d_scale) + lon3d_offset
alt3d = alt3d.get()
alt3d = (alt3d/alt3d_scale) + alt3d_offset
radar_n = radar.get()
radar_n = radar_n/100.
radar_n2 = radar2.get()
radar_n2 = radar_n2/100.
radar_n4 = radar4.get()
radar_n4 = radar_n4/100.
vel_n = vel.get()
vel_n = vel_n/100.
#Quality control (use masked values
radar_n = np.ma.masked_where(radar_n<=-99,radar_n)
radar_n2 = np.ma.masked_where(radar_n2<=-99,radar_n2)
radar_n4 = np.ma.masked_where(radar_n4<=-99,radar_n4)
vel = np.ma.masked_where(vel_n <= -99,vel_n)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
radar_n3 = np.ma.zeros(radar_n.shape)
radar_n3 = np.ma.masked_where(radar_n3 == 0,radar_n3)
if campaign == 'olympex':
self.campaign = campaign
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'z95s' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
import h5py
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores']['vel14c'][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
roll = hdf['lores']['roll'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n'][:]
radar_scanning = hdf['lores']['z95s'][:]
radar3 = radar_scanning
w_flag = 1
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s'][:]
print('No vv, using hh')
else:
radar3 = np.ma.zeros(radar.shape)
radar3 = np.ma.masked_where(radar3==0,radar3)
w_flag = 1
print('No W band')
#Quality control (masked where invalid)
radar_n = np.ma.masked_where(radar <= -99,radar)
radar_n2 = np.ma.masked_where(radar2 <= -99,radar2)
radar_n3 = np.ma.masked_where(radar3 <= -99,radar3)
radar_n4 = np.ma.masked_where(radar4 <= -99,radar4)
vel = np.ma.masked_where(vel <= -99,vel)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n3 = np.ma.masked_where(np.isnan(radar_n3),radar_n3)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
if campaign == 'camp2ex':
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'zhh95' #W
radar_freq4 = 'ldrhh14' #LDR
vel_str = 'vel14' #Doppler
##
import h5py
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores'][vel_str][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
roll = hdf['lores']['roll'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n'][:]
radar_scanning = hdf['lores']['z95s'][:]
radar3 = radar_scanning
w_flag = 1
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s'][:]
print('No vv, using hh')
else:
radar3 = np.ma.zeros(radar.shape)
radar3 = np.ma.masked_where(radar3==0,radar3)
w_flag = 1
print('No W band')
#Quality control (masked where invalid)
radar_n = np.ma.masked_where(radar <= -99,radar)
radar_n2 = np.ma.masked_where(radar2 <= -99,radar2)
radar_n3 = np.ma.masked_where(radar3 <= -99,radar3)
radar_n4 = np.ma.masked_where(radar4 <= -99,radar4)
vel = np.ma.masked_where(vel <= -99,vel)
#Get rid of nans, the new HDF has builtin
radar_n = np.ma.masked_where(np.isnan(radar_n),radar_n)
radar_n2 = np.ma.masked_where(np.isnan(radar_n2),radar_n2)
radar_n3 = np.ma.masked_where(np.isnan(radar_n3),radar_n3)
radar_n4 = np.ma.masked_where(np.isnan(radar_n4),radar_n4)
vel = np.ma.masked_where(np.isnan(vel),vel)
##convert time to datetimes
time_dates = np.empty(time.shape,dtype=object)
for i in np.arange(0,time.shape[0]):
for j in np.arange(0,time.shape[1]):
tmp = datetime.datetime.utcfromtimestamp(time[i,j])
time_dates[i,j] = tmp
#Create a time at each gate (assuming it is the same down each ray, there is a better way to do this)
time_gate = np.empty(lat3d.shape,dtype=object)
for k in np.arange(0,lat3d.shape[0]):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,time_dates.shape[1]):
time_gate[k,i,j] = time_dates[i,j]
time3d = np.copy(time_gate)
da = xr.DataArray(radar_n,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Reflectivity'
#make xr dataset
self.xrds = da.to_dataset(name = 'Ku')
#
da = xr.DataArray(radar_n2,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ka-band Reflectivity'
#add to xr dataset
self.xrds['Ka'] = da
da = xr.DataArray(vel,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Doppler Vel'
#add to xr dataset
self.xrds['DopKu'] = da
#
da = xr.DataArray(radar_n3,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'W-band Reflectivity'
#add to xr dataset
self.xrds['W'] = da
#
da = xr.DataArray(radar_n4,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(radar_n.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],lon3d),
'lat3d':(['range','cross_track','along_track'],lat3d),
'time3d': (['range','cross_track','along_track'],time3d),
'alt3d':(['range','cross_track','along_track'],alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dB'
da.attrs['standard_name'] = 'LDR at Ku-band '
#add to xr dataset
self.xrds['LDR'] = da
#
da = xr.DataArray(roll,dims={'cross_track':np.arange(0,24),'along_track':np.arange(radar_n.shape[2])})
da.attrs['units'] = 'degrees'
da.attrs['standard_name'] = 'Left/Right Plane Roll'
#add to xr dataset
self.xrds['Roll'] = da
#
da = xr.DataArray(isurf,dims={'cross_track':np.arange(0,24),'along_track':np.arange(radar_n.shape[2])})
da.attrs['units'] = 'none'
da.attrs['standard_name'] = 'index of best guess for location of surfce'
#add to xr dataset
self.xrds['surf'] = da
#
def determine_ground_lon_lat(self,near_surf_Z=True):
mean_alt = self.xrds.alt3d.mean(axis=(1,2))
ind_sealevel = find_nearest(mean_alt,0)
g_lon = self.xrds.lon3d[ind_sealevel,:,:]
g_lat = self.xrds.lat3d[ind_sealevel,:,:]
da = xr.DataArray(g_lon,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'degress longitude'
da.attrs['standard_name'] = 'Surface Longitude'
self.xrds['g_lon'] = da
da = xr.DataArray(g_lat,dims={'cross_track':g_lat.shape[0],'along_track':np.arange(g_lat.shape[1])})
da.attrs['units'] = 'degress latitude'
da.attrs['standard_name'] = 'Surface latitude'
self.xrds['g_lat'] = da
if near_surf_Z:
ind_nearsurf = find_nearest(mean_alt,1100)
g_Ku = self.xrds.Ku[ind_nearsurf,:,:]
da = xr.DataArray(g_Ku,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Near_surf Z'
self.xrds['Ku_nearsurf'] = da
g_Ka = self.xrds.Ka[ind_nearsurf,:,:]
da = xr.DataArray(g_Ka,dims={'cross_track':g_lon.shape[0],'along_track':np.arange(g_lon.shape[1])})
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Near_surf Z'
self.xrds['Ka_nearsurf'] = da
def setboxcoords(self,nanout=True):
"""
This method sets all points outside the box to nan.
"""
if len(self.corners) > 0:
self.ll_lon = self.corners[0]
self.ur_lon = self.corners[1]
self.ll_lat = self.corners[2]
self.ur_lat = self.corners[3]
if nanout:
self.xrds = self.xrds.where((self.xrds.g_lon >= self.ll_lon) & (self.xrds.g_lon <= self.ur_lon) & (self.xrds.g_lat >= self.ll_lat) & (self.xrds.g_lat <= self.ur_lat),drop=False)
else:
print('ERROR, not boxcoods set...did you mean to do this?')
def cit_temp_to_apr(self,fl,time_inds,insidebox=True):
self.T3d = True
cit_lon = fl['longitude']['data'][time_inds]
cit_lat = fl['latitude']['data'][time_inds]
cit_alt = fl['altitude']['data'][time_inds]
cit_twc = fl['twc']['data'][time_inds]
cit_T = fl['temperature']['data'][time_inds]
if insidebox:
ind_inbox = np.where((cit_lon >= self.ll_lon) & (cit_lon <= self.ur_lon) & (cit_lat >= self.ll_lat) & (cit_lat <= self.ur_lat))
else:
ind_inbox = np.arange(0,len(fl['temperature']['data'][time_inds]))
bins = np.arange(0,6000,500)
binind = np.digitize(cit_alt[ind_inbox],bins=bins)
df = pd.DataFrame({'Temperature':cit_T[ind_inbox],'Alt':cit_alt[ind_inbox],'binind':binind})
df = df.groupby('binind').mean()
f_T = scipy.interpolate.interp1d(df.Alt.values,df.Temperature.values,fill_value='extrapolate',kind='linear')
T3d = f_T(self.xrds.alt3d.values)
da = xr.DataArray(T3d,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'degC'
da.attrs['standard_name'] = 'Temperature, inferred from Citation Spiral'
#add to xr dataset
self.xrds['T3d'] = da
#
def correct_gaseous(self,filepathtogasploutput=None,Wband=False):
""" This is a method to correct for 02 and H20 attenuation at Ku and Ka band, it requires you to run the gaspl package in matlab. If you wish to learn about this, please email me randyjc2 at illinois.edu """
if filepathtogasploutput is None:
print('Please supply filepath to gaspl output')
return
import scipy.io
import scipy.interpolate
d = scipy.io.loadmat(filepathtogasploutput)
#create interp funcs so we can plug in the apr gate structure
ka_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L'].ravel(),kind='cubic',bounds_error=False)
ku_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L2'].ravel(),kind='cubic',bounds_error=False)
if Wband:
w_func = scipy.interpolate.interp1d(d['alt'].ravel(),d['L3'].ravel(),kind='cubic',bounds_error=False)
k_ku = ku_func(self.xrds.alt3d.values)
k_ka = ka_func(self.xrds.alt3d.values)
k_ku = k_ku*0.03 #conver to db/gate
k_ka = k_ka*0.03 #conver to db/gate
k_ku[np.isnan(k_ku)] = 0
k_ka[np.isnan(k_ka)] = 0
k_ku = 2*np.cumsum(k_ku,axis=(0))
k_ka = 2*np.cumsum(k_ka,axis=(0))
ku_new = self.xrds.Ku.values + k_ku
da = xr.DataArray(ku_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Reflectivity'
self.xrds['Ku'] = da
ka_new = self.xrds.Ka.values + k_ka
da = xr.DataArray(ka_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ka-band Reflectivity'
self.xrds['Ka'] = da
if Wband:
k_w = w_func(self.xrds.alt3d.values)
k_w = k_w*0.03
k_w[np.isnan(k_w)] = 0
k_w = 2*np.cumsum(k_w,axis=(0))
w_new = self.xrds.W.values + k_w
da = xr.DataArray(w_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'W-band Reflectivity'
self.xrds['W'] = da
return
def run_retrieval(self,old=True):
from pickle import load
if old:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y.pkl', 'rb'))
else:
scaler_X = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_X_V2.pkl', 'rb'))
scaler_y = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/scaler_y_V2.pkl', 'rb'))
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = self.xrds.Ku.values
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = self.xrds.Ka.values
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
T = self.xrds.T3d.values
T = T.reshape([T.shape[0],T.shape[1]*T.shape[2]])
T = T.reshape([T.shape[0]*T.shape[1]])
ind_masked = np.isnan(Ku)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask = np.zeros(Ka.shape)
T_nomask = np.zeros(T.shape)
Ku_nomask[~ind_masked] = Ku[~ind_masked]
Ka_nomask[~ind_masked] = Ka[~ind_masked]
T_nomask[~ind_masked] = T[~ind_masked]
ind = np.where(Ku_nomask!=0)[0]
#scale the input vectors by the mean that it was trained with
X = np.zeros([Ku_nomask.shape[0],3])
X[:,0] = (Ku_nomask - scaler_X.mean_[0])/scaler_X.scale_[0] #ku
X[:,1] = ((Ku_nomask - Ka_nomask)- scaler_X.mean_[1])/scaler_X.scale_[1] #dfr
X[:,2] = (T_nomask - scaler_X.mean_[2])/scaler_X.scale_[2] #T
#
import tensorflow as tf
from tensorflow.python.keras import losses
if old:
model=tf.keras.models.load_model('/data/gpm/a/randyjc2/DRpy/drpy/models/NN_4by8.h5',
custom_objects=None,compile=True)
else:
model=tf.keras.models.load_model('/data/gpm/a/randyjc2/DRpy/drpy/models/NN_6by8.h5',
custom_objects=None,compile=True)
if self.T3d:
yhat = model.predict(X[ind,0:3],batch_size=len(X[ind,0]))
yhat = scaler_y.inverse_transform(yhat)
yhat[:,1] = 10**yhat[:,1]
yhat[:,2] = 10**yhat[:,2]
else:
print('ERROR, No temperautre data')
ind = np.where(Ku_nomask!=0)[0]
Nw = np.zeros(Ku_nomask.shape)
Nw[ind] = np.squeeze(yhat[:,0])
Nw = Nw.reshape([shape_step2[0],shape_step2[1]])
Nw = Nw.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Nw = np.ma.masked_where(Nw==0.0,Nw)
da = xr.DataArray(Nw,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'log(m^-4)'
da.attrs['standard_name'] = 'Retireved Liquid Eq. Nw'
self.xrds['Nw'] = da
Dm = np.zeros(Ku_nomask.shape)
Dm[ind] = np.squeeze(yhat[:,1])
Dm = Dm.reshape([shape_step2[0],shape_step2[1]])
Dm = Dm.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm = np.ma.masked_where(Dm==0.0,Dm)
da = xr.DataArray(Dm,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'Retireved Liquid Eq. Dm'
self.xrds['Dm'] = da
Dm_frozen = np.zeros(Ku_nomask.shape)
Dm_frozen[ind] = np.squeeze(yhat[:,2])
Dm_frozen = Dm_frozen.reshape([shape_step2[0],shape_step2[1]])
Dm_frozen = Dm_frozen.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
Dm_frozen = np.ma.masked_where(Dm_frozen==0.0,Dm_frozen)
da = xr.DataArray(Dm_frozen,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'mm'
da.attrs['standard_name'] = 'Retireved Frozen Dm'
self.xrds['Dm_frozen'] = da
Nw = 10**Nw #undo log, should be in m^-4
Dm = Dm/1000. # convert to m ^4
IWC = (Nw*(Dm)**4*1000*np.pi)/4**(4) # the 1000 is density of water (kg/m^3)
IWC = IWC*1000 #convert to g/m^3
da = xr.DataArray(IWC,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^-3'
da.attrs['standard_name'] = 'Retireved IWC, calculated from Nw and Dm'
self.xrds['IWC'] = da
def get_legacy(self):
from pickle import load
#load GPM-DPR v06 average relations
reg_kupr = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/LinearRegression_KUPR.pkl', 'rb'))
reg_kapr = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/LinearRegression_KAPR.pkl', 'rb'))
#load newly trained exp relations
reg_ku = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/LinearRegression_KU.pkl', 'rb'))
reg_ka = load(open('/data/gpm/a/randyjc2/DRpy/drpy/models/LinearRegression_KA.pkl', 'rb'))
#now we have to reshape things to make sure they are in the right shape for the NN model [n_samples,n_features]
Ku = 10**(self.xrds.Ku.values/10)
shape_step1 = Ku.shape
Ku = Ku.reshape([Ku.shape[0],Ku.shape[1]*Ku.shape[2]])
shape_step2 = Ku.shape
Ku = Ku.reshape([Ku.shape[0]*Ku.shape[1]])
Ka = 10**(self.xrds.Ka.values/10)
Ka = Ka.reshape([Ka.shape[0],Ka.shape[1]*Ka.shape[2]])
Ka = Ka.reshape([Ka.shape[0]*Ka.shape[1]])
ind_masked = np.isnan(Ku)
Ku_nomask = np.zeros(Ku.shape)
Ka_nomask = np.zeros(Ka.shape)
Ku_nomask[~ind_masked] = Ku[~ind_masked]
Ka_nomask[~ind_masked] = Ka[~ind_masked]
ind = np.where(Ku_nomask!=0)[0]
#Do Ku-band relations
X = np.zeros([Ku_nomask.shape[0],1])
X[:,0] = Ku_nomask
#kuexp
yhat = 10**reg_ku.predict(np.log10(X[ind,:]))
IWC = np.zeros(Ku_nomask.shape)
IWC[ind] = np.squeeze(yhat)
IWC = IWC.reshape([shape_step2[0],shape_step2[1]])
IWC = IWC.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
IWC = np.ma.masked_where(IWC==0.0,IWC)
da = xr.DataArray(IWC,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^-3'
da.attrs['standard_name'] = 'Retireved IWC from new exp (ku-band)'
self.xrds['IWC_kuexp'] = da
#kuprexp
yhat = 10**reg_kupr.predict(np.log10(X[ind,:]))
IWC = np.zeros(Ku_nomask.shape)
IWC[ind] = np.squeeze(yhat)
IWC = IWC.reshape([shape_step2[0],shape_step2[1]])
IWC = IWC.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
IWC = np.ma.masked_where(IWC==0.0,IWC)
da = xr.DataArray(IWC,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^-3'
da.attrs['standard_name'] = 'Retireved IWC from kupr exp'
self.xrds['IWC_kupr'] = da
#Do Ka-band relations
X = np.zeros([Ku_nomask.shape[0],1])
X[:,0] = Ka_nomask
#kuexp
yhat = 10**reg_ka.predict(np.log10(X[ind,:]))
IWC = np.zeros(Ku_nomask.shape)
IWC[ind] = np.squeeze(yhat)
IWC = IWC.reshape([shape_step2[0],shape_step2[1]])
IWC = IWC.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
IWC = np.ma.masked_where(IWC==0.0,IWC)
da = xr.DataArray(IWC,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^-3'
da.attrs['standard_name'] = 'Retireved IWC from new exp (ka-band)'
self.xrds['IWC_kaexp'] = da
#kuprexp
yhat = 10**reg_kapr.predict(np.log10(X[ind,:]))
IWC = np.zeros(Ku_nomask.shape)
IWC[ind] = np.squeeze(yhat)
IWC = IWC.reshape([shape_step2[0],shape_step2[1]])
IWC = IWC.reshape([shape_step1[0],shape_step1[1],shape_step1[2]])
IWC = np.ma.masked_where(IWC==0.0,IWC)
da = xr.DataArray(IWC,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'g m^-3'
da.attrs['standard_name'] = 'Retireved IWC from kapr exp'
self.xrds['IWC_kapr'] = da
def mask_lobe(self):
""" MASK SIDELOBES. This is a problem in GCPEX. NEED TO MAKE SURE THEY ARE GONE """
ku_new = np.ma.masked_where(np.ma.abs(self.xrds.DopKu.values) >= 10, self.xrds.Ku.values)
da = xr.DataArray(ku_new,
dims={'range':np.arange(0,550),'cross_track':np.arange(0,24),
'along_track':np.arange(self.xrds.Ku.shape[2])},
coords={'lon3d': (['range','cross_track','along_track'],self.xrds.lon3d),
'lat3d': (['range','cross_track','along_track'],self.xrds.lat3d),
'time3d': (['range','cross_track','along_track'],self.xrds.time3d),
'alt3d':(['range','cross_track','along_track'],self.xrds.alt3d)})
da.fillna(value=-9999)
da.attrs['units'] = 'dBZ'
da.attrs['standard_name'] = 'Ku-band Reflectivity'
self.xrds['Ku'] = da
def cloudtopmask(self,sigma=1,mindBZ = 10,mask_others=True,freq='Ku'):
import scipy.ndimage
if freq =='Ku':
ku_temp = self.xrds.Ku.values
elif freq =='Ka':
ku_temp = self.xrds.Ka.values
elif freq == 'W':
ku_temp = self.xrds.W.values
ku_temp[np.isnan(ku_temp)] = -99.99
#Maskes Ku cloudtop noise
ku_new = np.zeros(ku_temp.shape)
ku_new = np.ma.masked_where(ku_new == 0.0,ku_new)
for i in np.arange(0,24):
temp = np.copy(ku_temp[:,i,:])
a = scipy.ndimage.filters.gaussian_filter(temp,sigma)
a = np.ma.masked_where(a<mindBZ,a)
temp = np.ma.masked_where(a.mask,temp)
ku_new[:,i,:] =
|
np.ma.copy(temp)
|
numpy.ma.copy
|
from flask import Flask, render_template,request
import numpy as np
import re
import base64
import os
import tensorflow as tf
from PIL import Image
app = Flask(__name__)
app.config['UPLOADED_PHOTOS_DEST'] = '/Upload'
sess= tf.Session()
saver= tf.train.import_meta_graph('Model/saved_model.meta')
saver.restore(sess,tf.train.latest_checkpoint('Model/'))
graph = tf.get_default_graph()
x= graph.get_tensor_by_name("Input:0")
#keep_prob = graph.get_tensor_by_name("keep_prob:0")
prediction = graph.get_tensor_by_name("Prediction:0")
prediction = (tf.nn.softmax(prediction))
def ImageConversion(raw_image):
raw_image = raw_image.decode('utf-8')
img_string = re.search(r'base64,(.*)',raw_image).group(1)
img_string = bytes(img_string,'utf-8')
with open('Upload/Untitled.png','wb') as output:
output.write(base64.decodestring(img_string))
def ImagePreprocessing():
image = Image.open('Upload/Untitled.png').convert('RGB').convert('L')
image = image.resize((75,75))
arr = np.array(image)
arr = 255-arr
while np.sum(arr[0])==0:
arr = arr[1:]
while np.sum(arr[:,0])==0:
arr = arr[:,1:]
while np.sum(arr[-1])==0:
arr = arr[:-1]
while np.sum(arr[:,-1])==0:
arr = arr[:,:-1]
height = arr.shape[0]
width = arr.shape[1]
if height > width:
ratio = height/20
height = int(height/ratio)
width = int(width/ratio)
else:
ratio = width/20
height = int(height/ratio)
width = int(width/ratio)
image = Image.fromarray(arr)
image = image.resize((int(width), int(height)))
arr = np.array(image)
upr_arr = np.zeros(shape=(14-int(height/2), width))
height = height + (14-int(height/2))
lwr_arr =
|
np.zeros(shape=(28-height, width))
|
numpy.zeros
|
import datetime
import pandas as pd
import numpy as np
from pathlib import Path
formatted_text = """
SMILES: \\verb|{internal_smi}|
Nearest TMC-1 molecule (distance): \\verb|{anchor}| ({distance:.2f})
Is DFT optimized?: {not_opt}
| Property | Value |
|---|---|
| Formula | {formula} |
| Molecular weight | {mass:.3f} |
| IUPAC name | {iupac_name} |
| $\mu_{{a,b,c}}$ | {mu_a}, {mu_b}, {mu_c} |
| $A, B, C$ | {A}, {B}, {C} |
| $A_s, B_s, C_s$ | {A_s}, {B_s}, {C_s} |
| Charge, Multiplicity | {charge}, {multiplicity} |
| Predicted log column density | {log_column:.3f}\pm{log_column_unc:.3f} |
| Electronic energy | {e} |
"""
header = """---
title: {title}
author: {author}
date: {date}
titlepage: {titlepage}
header-includes:
- '\\usepackage{{graphicx}}'
---
"""
figure = """
\\begin{{figure}}
\\centering
\\includegraphics[width=0.4\\textwidth]{{{fig_path}}}
\\end{{figure}}
"""
foreword = """
# Foreword
The following molecules were identified to be of astrochemical interest towards the dark
molecular cloud TMC-1 using the unsupervised machine learning methodology described in:
Lee _et al._, "Unsupervised Machine Learning of Interstellar Chemical Inventories" (2021)
This PDF presents preliminary data for 1510 molecules that are of interest to
the chemical inventory of TMC-1, as identified with machine learning. The
molecules are identified based on a Euclidean distance cutoff, providing up to
100 of the closest molecules to those already seen in TMC-1. Structures were generated
from their SMILES strings using `OpenBabel` and `rdkit`, and geometry optimization carried out
using the geomeTRIC program:
> <NAME>.; <NAME>. (2016), _J. Chem, Phys._ 144, 214108. http://dx.doi.org/10.1063/1.4952956
Electronic structure calculations were performed using `psi4`, with both
geometry optimization and dipole moments calculated at at the
$\omega$B97X-D/6-31+G(d) level of theory. Equilibrium dipole moments and
rotational constants are reported in unsigned debye and MHz respectively; for
the latter, we provide effective scaled parameters as well that empirically
correct for vibration-rotation interactions. Please refer to "<NAME>. and
<NAME>. 2020, _J Phys Chem A_, 5, 898" for information regarding their
uncertainties. For molecules where SCF/geometry optimizations failed to
converge, we provide their dipole moments based on the molecular mechanics
structures. These molecules will be indicated by "Is DFT optimized?: False".
The predicted column densities and uncertainties are given with a simple
Gaussian Process with rational quadratic and white noise kernels. Simply put,
the predicted column densities of unseen molecules are given as functions of
distance in chemical space that decays naturally to zero for infinite distance
from other data points. The reader is encouraged to look at the distances
between recommendations and TMC-1 molecules to develop an intuition for how the
predicted column density behaves roughly with distance, and interpret them with
the uncertainties accordingly: as a guide but not to rule out molecules
specifically. Molecules with particularly large uncertainties are likely to be
impactful in constraining the chemistry of the source, even if we provide just
an upper limit.
Finally, there is no real ordering of which the molecules are given. This is
quasi-random, although there are pockets of similar molecules based on how
similar the TMC-1 molecules are between searches.
\\newpage
"""
def format_row(row) -> str:
cols = ["formula", "mass", "iupac_name", "internal_smi", "mu_a", "mu_b", "mu_c",
"A", "B", "C", "charge", "multiplicity", "e", "anchor", "distance", "log_column", "log_column_unc", "not_opt"]
data = {key: row[key] for key in cols}
data["not_opt"] = not data["not_opt"]
for key in ["A", "B", "C"]:
if data["not_opt"]:
# do scaling
value = data[key] * 0.9971
if not np.isnan(value):
data[f"{key}_s"] = f"{value:.4f}"
else:
data[f"{key}_s"] = "$\\infty$"
else:
data[f"{key}_s"] = " - "
if np.isnan(data[key]):
data[key] = "$\\infty$"
else:
data[key] = f"{data[key]:.4f}"
dipole = data[f"mu_{key.lower()}"]
if np.isnan(dipole):
dipole = " - "
else:
dipole = f"{dipole:.1f}"
data[f"mu_{key.lower()}"] = dipole
e = data["e"]
if np.isnan(e):
data["e"] = " - "
else:
data["e"] = f"{e:.5f}"
# escape special character
#data["internal_smi"] = f"""\\begin{{verbatim}} {data["internal_smi"]} \\end{{verbatim}}"""
#data["anchor"] = f"""\\begin{{verbatim}} {data["anchor"]} \\end{{verbatim}}"""
return formatted_text.format_map(data)
def main():
data = pd.read_csv("summary.csv", index_col=0)
data.loc[data["iupac_name"].isna(), "iupac_name"] = ""
with open("report.md", "w+") as write_file:
time = datetime.datetime.now().strftime("%Y-%m-%d")
format_dict = {
"title": "Molecule recommendations for TMC-1",
"author": ["Lee _et al._",],
"date": time,
"titlepage": "true",
}
write_file.write(header.format_map(format_dict))
write_file.write(foreword)
for key in ["mu_a", "mu_b", "mu_c"]:
data.loc[:,key] =
|
np.abs(data.loc[:,key])
|
numpy.abs
|
# -*- coding: utf-8 -*-
"""
PySEBAL_dev_v3.8
@author: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
UNESCO-IHE
September 2017
@author <NAME>
February 2020
"""
#import pdb
import platform
import sys
import os
import re
import shutil
import numpy as np
import datetime
from osgeo import osr
from osgeo import gdal
from math import sin, cos, pi, tan
import time
import subprocess
import numpy.polynomial.polynomial as poly
from openpyxl import load_workbook
from pyproj import Proj, transform
import warnings
def SEBALcode(number,inputExcel):
# Do not show warnings
warnings.filterwarnings('ignore')
# Open Excel workbook
wb = load_workbook(inputExcel)
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
input_folder = r"%s" %str(ws['B%d' %number].value)
output_folder = r"%s" %str(ws['C%d' %number].value)
Image_Type = int(ws['D%d' %number].value) # Type of Image (1=Landsat & 2 = VIIRS & GLOBA-V)
# Create or empty output folder
if os.path.isdir(output_folder):
shutil.rmtree(output_folder)
os.makedirs(output_folder)
# Start log file
filename_logfile = os.path.join(output_folder, 'log.txt')
sys.stdout = open(filename_logfile, 'w')
# Extract the Path to the DEM map from the excel file
DEM_fileName = r"%s" %str(ws['E%d' %number].value) #'DEM_HydroShed_m'
# Print data used from sheet General_Input
print ('.................................................................. ')
print ('......................SEBAL Model running ........................ ')
print ('.................................................................. ')
print ('pySEBAL version 3.3.7.1 Github')
print ('General Input:')
print(('Path to DEM file = %s' %str(DEM_fileName)))
print(('input_folder = %s' %str(input_folder)))
print(('output_folder = %s' %str(output_folder)))
print(('Image_Type = %s' %int(Image_Type)))
print ('................ Input Maps LS or PROBA-V and VIIRS............... ')
if Image_Type == 3:
# Open the VIIRS_PROBAV_Input sheet
ws = wb['MODIS_Input']
# Extract the name of the thermal and quality VIIRS image from the excel file
Name_MODIS_Image_Ref = str(ws['B%d' %number].value) #reflectance
Name_MODIS_Image_NDVI = str(ws['D%d' %number].value) #ndvi
Name_MODIS_Image_LST = str(ws['C%d' %number].value) #land surface temperature
# Create complete path to data
src_FileName_LST = os.path.join(input_folder, '%s.hdf' %Name_MODIS_Image_LST)
src_FileName_NDVI = os.path.join(input_folder, '%s.hdf' %Name_MODIS_Image_NDVI)
src_FileName_Ref = os.path.join(input_folder, '%s.hdf' %Name_MODIS_Image_Ref)
# Calibartion constants Hot Pixels extracted from the excel file
# Old constants now put as zero
Hot_Pixel_Constant = 0 # Hot Pixel Value = Mean_Hot_Pixel + Hot_Pixel_Constant * Std_Hot_Pixel (only for VIIRS images)
# Calibartion constants Cold Pixels from the excel file
Cold_Pixel_Constant = 0 # Cold Pixel Value = Mean_Cold_Pixel + Cold_Pixel_Constant * Std_Cold_Pixel (only for VIIRS images)
# Pixel size of the model
pixel_spacing = int(250)
# UTM Zone of the end results
UTM_Zone = float(ws['G%d' %number].value)
# Print data used from sheet General_Input
print('MODIS Input:')
print('Path to MODIS LST image = %s' %str(Name_MODIS_Image_LST))
print('Path to MODIS NDVI image = %s' %str(Name_MODIS_Image_NDVI))
print('Path to MODIS Reflectance image = %s' %str(Name_MODIS_Image_Ref))
print('Hot Pixel Constant MODIS = %s' %(Hot_Pixel_Constant))
print('Cold Pixel Constant MODIS = %s' %(Cold_Pixel_Constant))
print('UTM Zone = %s' %(UTM_Zone))
print('Pixel size model = %s (Meters)' %(pixel_spacing))
if Image_Type == 2:
# Open the VIIRS_PROBAV_Input sheet
ws = wb['VIIRS_PROBAV_Input']
# Extract the name of the thermal and quality VIIRS image from the excel file
Name_VIIRS_Image_TB = '%s' %str(ws['B%d' %number].value)
Name_VIIRS_Image_QC = '%s' %str(ws['C%d' %number].value)
# Extract the name to the PROBA-V image from the excel file
Name_PROBAV_Image = '%s' %str(ws['D%d' %number].value) # Must be a tiff file
# Calibartion constants Hot Pixels extracted from the excel file
Hot_Pixel_Constant = 0 # Hot Pixel Value = Mean_Hot_Pixel + Hot_Pixel_Constant * Std_Hot_Pixel (only for VIIRS images)
# Calibartion constants Cold Pixels from the excel file
Cold_Pixel_Constant = 0 # Cold Pixel Value = Mean_Cold_Pixel + Cold_Pixel_Constant * Std_Cold_Pixel (only for VIIRS images)
# Pixel size of the model
pixel_spacing = int(100)
# UTM Zone of the end results
UTM_Zone = float(ws['G%d' %number].value)
# Print data used from sheet General_Input
print('VIIRS PROBA-V Input:')
print('Path to Thermal VIIRS image = %s' %str(Name_VIIRS_Image_TB))
print('Path to Quality VIIRS image = %s' %str(Name_VIIRS_Image_QC))
print('Hot Pixel Constant VIIRS = %s' %(Hot_Pixel_Constant))
print('Cold Pixel Constant VIIRS = %s' %(Cold_Pixel_Constant))
print('UTM Zone = %s' %(UTM_Zone))
print('Pixel size model = %s (Meters)' %(pixel_spacing))
if Image_Type == 1:
# Open the Landsat_Input sheet
ws = wb['Landsat_Input']
# Extract Landsat name, number and amount of thermal bands from excel file
Name_Landsat_Image = str(ws['B%d' %number].value) # From glovis.usgs.gov
Landsat_nr = int(ws['C%d' %number].value) # Type of Landsat (LS) image used (LS5, LS7, or LS8)
Bands_thermal = int(ws['D%d' %number].value) # Number of LS bands to use to retrieve land surface
# temperature: 1 = Band 6 for LS_5 & 7, Band 10 for LS_8 (optional)
# Calibartion constants Hot Pixels from the excel file
# Old constants, has been changed to other inputs in excel sheet
Hot_Pixel_Constant = 0 # Hot Pixel Value = Mean_Hot_Pixel + Hot_Pixel_Constant * Std_Hot_Pixel (only for Landsat images)
# Calibartion constants Cold Pixels from the excel file
Cold_Pixel_Constant = 0 # Cold Pixel Value = Mean_Cold_Pixel + Cold_Pixel_Constant * Std_Cold_Pixel (only for Landsat images)
# NEW variables from excel sheet (Landsat_Input)
NDVIhot_low1 = float(ws['G%d' %number].value) # Lower NDVI treshold for hot pixels
NDVIhot_high1 = float(ws['H%d' %number].value) # Higher NDVI treshold for hot pixels
tcoldmin1 = float(ws['E%d' %number].value)
tcoldmax1 = float(ws['F%d' %number].value)
temp_lapse = float(ws['I%d' %number].value)
# Pixel size of the model
pixel_spacing = int(30)
# Print data used from sheet General_Input
print('Landsat Input:')
print('Name of the Landsat Image = %s' %str(Name_Landsat_Image))
print('Landsat number = %s' %str(Landsat_nr))
print('Thermal Bands that will be used = %s' %(Bands_thermal))
print('Hot Pixel Constant Landsat = %s' %(Hot_Pixel_Constant))
print('Cold Pixel Constant Landsat = %s' %(Cold_Pixel_Constant))
print('Pixel size model = %s' %(pixel_spacing))
print('......................... Meteo Data ............................. ')
# Open the Meteo_Input sheet
ws = wb['Meteo_Input']
# ---------------------------- Instantaneous Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C)
# If the data is a value than call this variable 0
Temp_inst_kind_of_data = 0
print('Instantaneous Temperature constant value of = %s (Celcius degrees)' %(Temp_inst))
# if the data is not a value, than open as a string
except:
Temp_inst_name = '%s' %str(ws['B%d' %number].value)
# If the data is a string than call this variable 1
Temp_inst_kind_of_data = 1
print('Map to the Instantaneous Temperature = %s' %(Temp_inst_name))
# ---------------------------- Daily Average Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_24 = float(ws['C%d' %number].value) # daily average Air Temperature (°C)
# If the data is a value than call this variable 0
Temp_24_kind_of_data = 0
print('Daily average Temperature constant value of = %s (Celcius degrees)' %(Temp_24))
# if the data is not a value, than open as a string
except:
Temp_24_name = '%s' %str(ws['C%d' %number].value)
# If the data is a string than call this variable 1
Temp_24_kind_of_data = 1
print('Map to the Daily average Temperature = %s' %(Temp_24_name))
# ---------------------------- Instantaneous Relative humidity ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%)
# If the data is a value than call this variable 0
RH_inst_kind_of_data = 0
print('Instantaneous Relative humidity constant value of = %s (percentage)' %(RH_inst))
# if the data is not a value, than open as a string
except:
RH_inst_name = '%s' %str(ws['D%d' %number].value)
# If the data is a string than call this variable 1
RH_inst_kind_of_data = 1
print('Map to the Instantaneous Relative humidity = %s' %(RH_inst_name))
# ---------------------------- daily average Relative humidity ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
RH_24 = float(ws['E%d' %number].value) # daily average Relative humidity (%)
# If the data is a value than call this variable 0
RH_24_kind_of_data = 0
print('Daily average Relative humidity constant value of = %s (percentage)' %(RH_24))
# if the data is not a value, than open as a string
except:
RH_24_name = '%s' %str(ws['E%d' %number].value)
# If the data is a string than call this variable 1
RH_24_kind_of_data = 1
print('Map to the Daily average Relative humidity = %s' %(RH_24_name))
# ---------------------------- instantaneous Wind Speed ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Wind_inst = float(ws['G%d' %number].value) # instantaneous Wind Speed (m/s)
# If the data is a value than call this variable 0
Wind_inst_kind_of_data = 0
print('Instantaneous Wind Speed constant value of = %s (m/s)' %(Wind_inst))
# if the data is not a value, than open as a string
except:
Wind_inst_name = '%s' %str(ws['G%d' %number].value)
# If the data is a string than call this variable 1
Wind_inst_kind_of_data = 1
print('Map to the Instantaneous Wind Speed = %s' %(Wind_inst_name))
# ---------------------------- daily Wind Speed ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Wind_24 = float(ws['H%d' %number].value) # daily Wind Speed (m/s)
# If the data is a value than call this variable 0
Wind_24_kind_of_data = 0
print('Daily Wind Speed constant value of = %s (m/s)' %(Wind_24))
# if the data is not a value, than open as a string
except:
Wind_24_name = '%s' %str(ws['H%d' %number].value)
# If the data is a string than call this variable 1
Wind_24_kind_of_data = 1
print('Map to the Daily Wind Speed = %s' %(Wind_24_name))
# Height of the wind speed measurement
zx = float(ws['F%d' %number].value) # Height at which wind speed is measured
print('Height at which wind speed is measured = %s (m)' %(zx))
# Define the method of radiation (1 or 2)
Method_Radiation_24=int(ws['I%d' %number].value) # 1=Transm_24 will be calculated Rs_24 must be given
# 2=Rs_24 will be determined Transm_24 must be given
print('Method for daily radiation (1=Rs_24, 2=Transm_24) = %s' %(Method_Radiation_24))
# if method radiation == 1
# ---------------------------- daily Surface Solar Radiation ------------
# Open meteo data, first try to open as value, otherwise as string (path)
if Method_Radiation_24 == 1:
try:
Rs_24 = float(ws['J%d' %number].value) # daily Surface Solar Radiation (W/m2) only required when Method_Radiation_24 = 1
# If the data is a value than call this variable 0
Rs_24_kind_of_data = 0
print('Daily Surface Solar Radiation constant value of = %s (W/m2)' %(Rs_24))
# if the data is not a value, than open as a string
except:
Rs_24_name = '%s' %str(ws['J%d' %number].value)
# If the data is a string than call this variable 1
Rs_24_kind_of_data = 1
print('Map to the Daily Surface Solar Radiation = %s' %(Rs_24_name))
# if method radiation == 2
# ---------------------------- daily transmissivity ------------
# Open meteo data, first try to open as value, otherwise as string (path)
if Method_Radiation_24 == 2:
try:
Transm_24 = float(ws['K%d' %number].value) # daily transmissivity, Typical values between 0.65 and 0.8 only required when Method_Radiation_24 = 2
# If the data is a value than call this variable 0
Transm_24_kind_of_data = 0
print('Daily transmissivity constant value of = %s' %(Transm_24))
# if the data is not a value, than open as a string
except:
Transm_24_name = '%s' %str(ws['K%d' %number].value)
# If the data is a string than call this variable 1
Transm_24_kind_of_data = 1
print('Map to the Daily transmissivity = %s' %(Transm_24_name))
# Define the method of instataneous radiation (1 or 2)
Method_Radiation_inst = int(ws['L%d' %number].value) # 1=Transm_inst will be calculated Rs_inst must be given
print('Method for instantaneous radiation (1=Rs_inst, 2=Transm_inst) = %s' %(Method_Radiation_inst)) # 2=Rs_24 will be determined Transm_24 must be given
# if method instantaneous radiation == 1
# ---------------------------- Instantaneous Surface Solar Radiation ------------
# Open meteo data, first try to open as value, otherwise as string (path)
if Method_Radiation_inst == 1:
try:
Rs_in_inst = float(ws['M%d' %number].value) # Instantaneous Surface Solar Radiation (W/m2) only required when Method_Radiation_inst = 1
# If the data is a value than call this variable 0
Rs_in_inst_kind_of_data = 0
print('Instantaneous Surface Solar Radiation constant value of = %s (W/m2)' %(Rs_in_inst))
# if the data is not a value, than open as a string
except:
Rs_in_inst_name = '%s' %str(ws['M%d' %number].value)
# If the data is a string than call this variable 1
Rs_in_inst_kind_of_data = 1
print('Map to the Instantaneous Surface Solar Radiation = %s' %(Rs_in_inst_name))
# if method instantaneous radiation == 2
# ---------------------------- Instantaneous transmissivity------------
# Open meteo data, first try to open as value, otherwise as string (path)
if Method_Radiation_inst == 2:
try:
Transm_inst = float(ws['N%d' %number].value) # Instantaneous transmissivity, Typical values between 0.70 and 0.85 only required when Method_Radiation_inst = 2
# If the data is a value than call this variable 0
Transm_inst_kind_of_data=0
print('Instantaneous transmissivity constant value of = %s' %(Transm_inst))
# if the data is not a value, than open as a string
except:
Transm_inst_name = '%s' %str(ws['N%d' %number].value)
# If the data is a string than call this variable 1
Transm_inst_kind_of_data = 1
print('Map to the Instantaneous transmissivity = %s' %(Transm_inst_name))
# ------------------------------------------------------------------------
# General constants that could be changed by the user:
print('...................... General Constants ......................... ')
# Data for Module 2 - Spectral and Thermal bands
L_SAVI = 0.5 # Constant for SAVI
print('General constants for Module 2:')
print('Constant for SAVI (L) = %s' %(L_SAVI))
# Data for Module 3 - Vegetation properties
Apparent_atmosf_transm = 0.89 # This value is used for atmospheric correction of broad band albedo. This value is used for now, would be better to use tsw.
path_radiance = 0.03 # Recommended, Range: [0.025 - 0.04], based on Bastiaanssen (2000).
print('General constants for Module 3:')
print('Atmospheric correction of broad band albedo = %s' %(Apparent_atmosf_transm))
print('Path Radiance = %s' %(path_radiance))
# Data for Module 4 - Surface temperature, Cloud, Water, and Snow mask
Rp = 0.91 # Path radiance in the 10.4-12.5 µm band (W/m2/sr/µm)
tau_sky = 0.866 # Narrow band transmissivity of air, range: [10.4-12.5 µm]
surf_temp_offset = 3 # Surface temperature offset for water
Temperature_offset_shadow = -1 # Temperature offset for detecting shadow
Maximum_shadow_albedo = 0.1 # Minimum albedo value for shadow
Temperature_offset_clouds = -3 # Temperature offset for detecting clouds
Minimum_cloud_albedo = 0.4 # Minimum albedo value for clouds
print('General constants for Module 4:')
print('Narrow band transmissivity of air = %s' %(tau_sky))
print('Surface temperature offset for water = %s (Kelvin)' %(surf_temp_offset))
print('Temperature offset for detecting shadow = %s (Kelvin)' %(Temperature_offset_shadow))
print('Maximum albedo value for shadow = %s' %(Maximum_shadow_albedo))
print('Temperature offset for detecting clouds = %s (Kelvin)' %(Temperature_offset_clouds))
print('Minimum albedo value for clouds = %s' %(Minimum_cloud_albedo))
# Data for Module 6 - Turbulence
print('General constants for Module 6:')
surf_roughness_equation_used = 1 # NDVI model = 1, Raupach model = 2
print('NDVI model(1), Raupach model(2) = %s' %(surf_roughness_equation_used))
try:
h_obst = float(ws['O%d' %number].value) # Obstacle height
h_obst_kind_of_data = 0
print('Obstacle height constant value of = %s (Meter)' %(h_obst))
except:
h_obst_name = '%s' %str(ws['O%d' %number].value)
h_obst_kind_of_data=1 # Obstacle height (m) -Replace for map based on Land use?
print('Map to the Obstacle height = %s' %(h_obst_name))
#NDVIhot_low = 0.03 # Lower NDVI treshold for hot pixels
#NDVIhot_high = 0.20 # Higher NDVI treshold for hot pixels
print('Lower NDVI treshold for hot pixels = %s' %(NDVIhot_low1))
print('Higher NDVI treshold for hot pixels = %s' %(NDVIhot_high1))
# Data for Module 12 - Soil moisture
# Open soil input sheet
ws = wb['Soil_Input']
print('General constants for Module 12:')
# ---------------------------- Saturated soil moisture content topsoil ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Theta_sat_top = float(ws['B%d' %number].value) # Saturated soil moisture content topsoil
# If the data is a value than call this variable 0
Theta_sat_top_kind_of_data = 0
print('Saturated soil moisture content topsoil constant value of = %s (cm3/cm3)' %(Theta_sat_top))
# if the data is not a value, than open as a string
except:
Theta_sat_top_name = '%s' %str(ws['B%d' %number].value)
# If the data is a value than call this variable 1
Theta_sat_top_kind_of_data = 1
print('Map to the Saturated soil moisture content topsoil = %s' %(Theta_sat_top_name))
# ---------------------------- Saturated soil moisture content subsoil ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Theta_sat_sub = float(ws['C%d' %number].value) # Saturated soil moisture content subsoil
# If the data is a value than call this variable 0
Theta_sat_sub_kind_of_data = 0
print('Saturated soil moisture content subsoil constant value of = %s (cm3/cm3)' %(Theta_sat_sub))
# if the data is not a value, than open as a string
except:
Theta_sat_sub_name = '%s' %str(ws['C%d' %number].value)
# If the data is a value than call this variable 1
Theta_sat_sub_kind_of_data = 1
print('Map to the Saturated soil moisture content subsoil = %s' %(Theta_sat_sub_name))
# ---------------------------- Residual soil moisture content topsoil ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Theta_res_top = float(ws['D%d' %number].value) # Residual soil moisture content
# If the data is a value than call this variable 0
Theta_res_top_kind_of_data = 0
print('Residual soil moisture content topsoil constant value of = %s (cm3/cm3)' %(Theta_res_top))
# if the data is not a value, than open as a string
except:
Theta_res_top_name = '%s' %str(ws['D%d' %number].value)
# If the data is a value than call this variable 1
Theta_res_top_kind_of_data = 1
print('Map to the Residual soil moisture content topsoil = %s' %(Theta_res_top_name))
# ---------------------------- Residual soil moisture content subsoil ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Theta_res_sub = float(ws['E%d' %number].value) # Residual soil moisture content subsoil
# If the data is a value than call this variable 0
Theta_res_sub_kind_of_data = 0
print('Residual soil moisture content subsoil constant value of = %s (cm3/cm3)' %(Theta_res_sub))
# if the data is not a value, than open as a string
except:
Theta_res_sub_name = '%s' %str(ws['E%d' %number].value)
# If the data is a value than call this variable 1
Theta_res_sub_kind_of_data = 1
print('Map to the residual soil moisture content subsoil = %s' %(Theta_res_sub_name))
# ---------------------------- Wilting point ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Soil_moisture_wilting_point = float(ws['G%d' %number].value) # Wilting point
# If the data is a value than call this variable 0
Soil_moisture_wilting_point_kind_of_data = 0
print('Soil moisture wilting point constant value of = %s (cm3/cm3)' %(Soil_moisture_wilting_point))
# if the data is not a value, than open as a string
except:
Soil_moisture_wilting_point_name = '%s' %str(ws['G%d' %number].value)
# If the data is a value than call this variable 1
Soil_moisture_wilting_point_kind_of_data = 1
print('Map to the soil moisture wilting point = %s' %(Soil_moisture_wilting_point_name))
# ---------------------------- Depletion factor ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
depl_factor = float(ws['H%d' %number].value) # Depletion factor
# If the data is a value than call this variable 0
depl_factor_kind_of_data = 0
print('Depletion factor constant value of = %s' %(depl_factor))
# if the data is not a value, than open as a string
except:
depl_factor_name = '%s' %str(ws['H%d' %number].value)
# If the data is a value than call this variable 1
depl_factor_kind_of_data = 1
print('Map to the Depletion factor = %s' %(depl_factor_name))
Light_use_extinction_factor = 0.5 # Light use extinction factor for Bear's Law
print('Light use extinction factor for Bears Law = %s' %(Light_use_extinction_factor))
# ---------------------------- Fraction field capacity ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
Field_Capacity = float(ws['F%d' %number].value) # Field capacity divided by saturation capacity
# If the data is a value than call this variable 0
Field_Capacity_kind_of_data = 0
print('Fraction field capacity and saturation constant value= %s' %(Field_Capacity))
# if the data is not a value, than open as a string
except:
Field_Capacity_name = '%s' %str(ws['F%d' %number].value)
# If the data is a value than call this variable 1
Field_Capacity_kind_of_data = 1
print('Map to the Fraction field capacity and saturation = %s' %(Field_Capacity_name))
print('General constants for Module 13:')
# ---------------------------- Light Use Efficiency ------------
# Open soil data, first try to open as value, otherwise as string (path)
try:
LUEmax = float(ws['I%d' %number].value) # Field capacity divided by saturation capacity
# If the data is a value than call this variable 0
LUEmax_kind_of_data = 0
print('Light use efficiency constant value = %s' %(LUEmax))
# if the data is not a value, than open as a string
except:
LUEmax_name = '%s' %str(ws['I%d' %number].value)
# If the data is a value than call this variable 1
LUEmax_kind_of_data = 1
print('Map to the Light use efficiency = %s' %(LUEmax_name))
# Data for Module 13 - Biomass production
Th = 35.0 # Upper limit of stomatal activity
Kt = 23.0 # Optimum conductance temperature (°C), range: [17 - 19]
Tl = 0.0 # Lower limit of stomatal activity
rl = 130 # Bulk stomatal resistance of the well-illuminated leaf (s/m)
print('Upper limit of stomatal activity = %s' %(Th))
print('Optimum conductance temperature = %s (Celcius Degrees)' %(Kt))
print('Lower limit of stomatal activity= %s' %(Tl))
print('Bulk stomatal resistance of the well-illuminated leaf = %s (s/m)' %(rl))
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# --- Extract general info from Landsat or VIIRS metadata: DOY, hour, minutes
if Image_Type == 1:
# the path to the MTL file of landsat
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
# read out the general info out of the MTL file
year, DOY, mon, day, hour, minutes, UTM_Zone, Sun_elevation = info_general_metadata(Landsat_meta_fileName) # call definiition info_general_metadata
# define the kind of sensor and resolution of the sensor
sensor1 = 'L%d' % Landsat_nr
sensor2 = 'L%d' % Landsat_nr
sensor3 = 'L%d' % Landsat_nr
res1 = '30m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
if Image_Type == 2:
#Get time from the VIIRS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE VIIRS NAME CORRECT example: VIIRS_SVIO5_npp_d20160601_t1103128_e1108532_b23808_c20160601170854581426_noaa_ops.tif npp_viirs_i05_20150701_124752_wgs84_fit.tif)
Total_Day_VIIRS = Name_VIIRS_Image_TB.split('_')[3]
Total_Time_VIIRS = Name_VIIRS_Image_TB.split('_')[4]
# Get the information out of the VIIRS name
year = int(Total_Day_VIIRS[1:5])
month = int(Total_Day_VIIRS[5:7])
day = int(Total_Day_VIIRS[7:9])
Startdate = '%d-%02d-%02d' % (year,month,day)
DOY = datetime.datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday
hour = int(Total_Time_VIIRS[1:3])
minutes = int(Total_Time_VIIRS[3:5])
# define the kind of sensor and resolution of the sensor
sensor1 = 'PROBAV'
sensor2 = 'VIIRS'
res1 = '375m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
if Image_Type == 3:
#Get time from the MODIS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE MODIS NAME CORRECT example: MOD13Q1.A2008129.h18v05.006.2015175090913.hdf)
Total_Day_MODIS = Name_MODIS_Image_LST.split('.')[-4][1:]
# Get the information out of the VIIRS name
year = int(Total_Day_MODIS[0:4])
DOY = day = int(Total_Day_MODIS[4:7])
# define the kind of sensor and resolution of the sensor
sensor1 = 'MODIS'
sensor2 = 'MODIS'
res1 = '1000m'
res2 = '250m'
res3 = '500m'
#pdb.set_trace()
# ------------------------------------------------------------------------
# Define the output maps names
proyDEM_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'proy_DEM_%s.tif' %res2)
slope_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'slope_%s.tif' %res2)
aspect_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'aspect_%s.tif' %res2)
radiation_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Ra_inst_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
phi_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'phi_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
radiation_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Ra24_mountain_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
cos_zn_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'cos_zn_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#Atmos_pressure_fileName = os.path.join(output_folder, 'Output_meteo', 'atmos_pressure_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#Psychro_c_fileName = os.path.join(output_folder, 'Output_meteo', 'psychro_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
water_mask_temp_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Water_mask_temporary_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
veg_cover_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_vegt_cover_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
lai_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_lai_average_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#nitrogen_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_nitrogen_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#tir_emissivity_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_tir_emissivity_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#fpar_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_fpar_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#b10_emissivity_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_b10_emissivity_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#cloud_mask_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_cloud_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_surface_temp_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
temp_surface_sharpened_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_surface_temp_sharpened_%s_%s_%s_%s_%s.tif' %(sensor1, res1, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
snow_mask_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_snow_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
water_mask_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_water_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#shadow_mask_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_shadow_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Rn_24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Rn_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#rn_inst_fileName = os.path.join(output_folder, 'Output_energy_balance', '%s_Rn_inst_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#g_inst_fileName = os.path.join(output_folder, 'Output_energy_balance', '%s_G_inst_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
temp_corr_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_temp_corr_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
ts_dem_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_ts_dem_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#surf_rough_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_surface_roughness_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
hot_pixels_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_hot_pixels_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
cold_pixels_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_cold_pixels_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#h_inst_fileName = os.path.join(output_folder, 'Output_energy_balance', '%s_h_inst_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#min_bulk_surf_res_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_%s_min_bulk_surf_resis_24_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
EF_inst_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_EFinst_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#LE_inst_fileName = os.path.join(output_folder, 'Output_energy_balance', '%s_LEinst_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
ETref_24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_ETref_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
ETA_24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_ETact_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
ETP_24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_ETpot_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
ET_24_deficit_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_ET_24_deficit_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
AF_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Advection_Factor_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
kc_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_kc_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
kc_max_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_kc_max_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#bulk_surf_res_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_bulk_surf_resis_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#total_soil_moisture_fileName = os.path.join(output_folder, 'Output_soil_moisture', '%s_Total_soil_moisture_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#top_soil_moisture_fileName = os.path.join(output_folder, 'Output_soil_moisture', '%s_Top_soil_moisture_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#RZ_SM_fileName = os.path.join(output_folder, 'Output_soil_moisture', '%s_Root_zone_moisture_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#SM_stress_trigger_fileName = os.path.join(output_folder, 'Output_soil_moisture', '%s_Moisture_stress_trigger_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#moisture_stress_biomass_fileName = os.path.join(output_folder, 'Output_biomass_production', '%s_Moisture_stress_biomass_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#irrigation_needs_fileName = os.path.join(output_folder, 'Output_soil_moisture', '%s_irrigation_needs_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Tact24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Tact_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Eact24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Eact_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Tpot24_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_Tpot_24_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
T24_deficit_fileName = os.path.join(output_folder, 'Output_evapotranspiration', '%s_T_24_deficit_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#LUE_fileName = os.path.join(output_folder, 'Output_biomass_production', '%s_LUE_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Biomass_prod_fileName = os.path.join(output_folder, 'Output_biomass_production', '%s_Biomass_production_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Biomass_wp_fileName = os.path.join(output_folder, 'Output_biomass_production', '%s_Biomass_wp_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Biomass_deficit_fileName = os.path.join(output_folder, 'Output_biomass_production', '%s_Biomass_deficit_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
dst_FileName_DEM = os.path.join(output_folder, 'Output_radiation_balance', 'proyDEM_%s.tif' %res1)
dst_FileName_Ra_inst = os.path.join(output_folder, 'Output_radiation_balance', 'Ra_inst_%s_%s_%s.tif' %(res1, year, DOY))
dst_FileName_phi = os.path.join(output_folder, 'Output_radiation_balance', 'phi_%s_%s_%s.tif' %(res1, year, DOY))
# Define name that is only needed in Image type 1 (Landsat)
if Image_Type == 1:
ndvi_fileName2 = os.path.join(output_folder, 'Output_vegetation', '%s_NDVI_%s_%s_%s_%s_%s.tif' %(sensor3, res3, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#QC_Map_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_quality_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
proyDEM_fileName_90 = os.path.join(output_folder, 'Output_temporary', 'proy_DEM_90.tif')
# Names for PROBA-V and VIIRS option
if Image_Type == 2:
proyVIIRS_QC_fileName = os.path.join(output_folder, 'Output_VIIRS', '%s_QC_proy_%s_%s_%s_%s_%s.tif' %(sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#proyPROBAV_Cloud_Mask_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_cloud_mask_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#proyVIIRS_Cloud_Mask_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_cloud_mask_%s_%s_%s_%s_%s.tif' %(sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
proyDEM_fileName_375 = os.path.join(output_folder, 'Output_temporary', 'proy_DEM_375.tif')
proyDEM_fileName_400 = os.path.join(output_folder, 'Output_temporary', 'proy_DEM_400.tif')
# Names for PROBA-V and VIIRS option
if Image_Type == 3:
proyMODIS_QC_fileName = os.path.join(output_folder, 'Output_MODIS', '%s_QC_proy_%s_%s_%s_%s_%s.tif' %(sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
proyDEM_fileName_1000 = os.path.join(output_folder, 'Output_temporary', 'proy_DEM_1000.tif')
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# --- Empty folder with output maps before starting -
# --------------Calulation Landsat----------------------------------------
print('---------------------------------------------------------')
print('------------------ General info -------------------------')
print('---------------------------------------------------------')
print('General info: ')
print(' DOY: ', DOY)
if not Image_Type == 3:
print(' Hour: ', hour)
print(' Minutes: ', '%0.3f' % minutes)
print(' UTM_Zone: ', UTM_Zone)
print('---------------------------------------------------------')
print('-------------------- Open DEM ---------------------------')
print('---------------------------------------------------------')
# Open DEM and create Latitude and longitude files
lat, lon, lat_fileName, lon_fileName = DEM_lat_lon(DEM_fileName, output_folder)
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing, UTM_Zone = UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape = [ncol, nrow]
# Read out the DEM band and print the DEM properties
data_DEM = band.ReadAsArray(0, 0, ncol, nrow)
#data_DEM[data_DEM<0] = 1
print('Projected DEM - ')
print(' Size: ', ncol, nrow)
print(' Upper Left corner x, y: ', ulx_dem, ',', uly_dem)
print(' Lower right corner x, y: ', lrx_dem, ',', lry_dem)
# 2) Latitude File - reprojection
# Define output name of the latitude file
lat_fileName_rep = os.path.join(output_folder, 'Output_radiation_balance',
'latitude_proj_%s_%s_%s_%s_%s.tif' %(res1, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# reproject latitude to the landsat projection and save as tiff file
lat_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset(
lat_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected latitude data
lat_proy = lat_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# 3) Longitude file - reprojection
# Define output name of the longitude file
lon_fileName_rep = os.path.join(output_folder, 'Output_radiation_balance',
'longitude_proj_%s_%s_%s_%s_%s.tif' %(res1, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# reproject longitude to the landsat projection and save as tiff file
lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone)
# Get the reprojected longitude data
lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# Calculate slope and aspect from the reprojected DEM
deg2rad, rad2deg, slope, aspect = Calc_Gradient(data_DEM, pixel_spacing)
# Saving the reprojected maps
save_GeoTiff_proy(dest, data_DEM, proyDEM_fileName, shape, nband = 1)
save_GeoTiff_proy(dest, slope, slope_fileName, shape, nband = 1)
save_GeoTiff_proy(dest, aspect, aspect_fileName, shape, nband = 1)
save_GeoTiff_proy(lon_rep, lon_proy, lon_fileName_rep, shape, nband = 1)
save_GeoTiff_proy(lat_rep, lat_proy, lat_fileName_rep, shape, nband = 1)
print('---------------------------------------------------------')
print('-------------------- Radiation --------------------------')
print('---------------------------------------------------------')
#pdb.set_trace()
if Image_Type == 2:
# Rounded difference of the local time from Greenwich (GMT) (hours):
delta_GTM = round(np.sign(lon[nrow/2, ncol/2]) * lon[nrow/2, ncol/2] * 24 / 360)
if np.isnan(delta_GTM) == True:
delta_GTM = round(np.nanmean(lon) * np.nanmean(lon) * 24 / 360)
hour += delta_GTM
if hour < 0.0:
day -= 1
hour += 24
if hour >= 24:
day += 1
hour -= 24
if Image_Type == 3:
hour, minutes = Modis_Time(src_FileName_LST, epsg_to, proyDEM_fileName)
# Calculation of extraterrestrial solar radiation for slope and aspect
Ra_mountain_24, Ra_inst, cos_zn, dr, phi, delta = Calc_Ra_Mountain(lon, DOY, hour, minutes, lon_proy, lat_proy, slope, aspect)
if Image_Type == 2 or Image_Type == 3:
Sun_elevation = 90 - (np.nanmean(cos_zn) * 180/np.pi)
# Save files created in module 1
save_GeoTiff_proy(dest, cos_zn, cos_zn_fileName, shape, nband = 1)
save_GeoTiff_proy(dest, Ra_mountain_24, radiation_fileName, shape, nband = 1)
save_GeoTiff_proy(dest, Ra_inst, radiation_inst_fileName, shape, nband = 1 )
save_GeoTiff_proy(dest, phi, phi_fileName, shape, nband = 1 )
# Resample DEM related maps to resolution of clipped Landsat images - 30 m
# 2) DEM
DEM_resh = Reshape_Reproject_Input_data(proyDEM_fileName, dst_FileName_DEM, proyDEM_fileName)
lsc = gdal.Open(proyDEM_fileName)
# Get the extend of the remaining landsat file after clipping based on the DEM file
y_size_lsc = lsc.RasterYSize
x_size_lsc = lsc.RasterXSize
shape_lsc = [x_size_lsc, y_size_lsc]
# 4) Reshaped instantaneous radiation
Ra_inst = Reshape_Reproject_Input_data(radiation_inst_fileName, dst_FileName_Ra_inst, proyDEM_fileName)
# 5) Reshaped psi
phi = Reshape_Reproject_Input_data(phi_fileName, dst_FileName_phi, proyDEM_fileName)
# 6) Reshape meteo data if needed (when path instead of number is input)
# 6a) Instantaneous Temperature
if Temp_inst_kind_of_data == 1:
try:
Temp_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Temp_inst_input.tif')
Temp_inst = Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, proyDEM_fileName)
except:
print('ERROR: Check the instantenious Temperature input path in the meteo excel tab')
# 6b) Daily Temperature
if Temp_24_kind_of_data == 1:
try:
Temp_24_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Temp_24_input.tif')
Temp_24 = Reshape_Reproject_Input_data(Temp_24_name, Temp_24_fileName, proyDEM_fileName)
except:
print('ERROR: Check the daily Temperature input path in the meteo excel tab')
# 6c) Daily Relative Humidity
if RH_24_kind_of_data == 1:
try:
RH_24_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'RH_24_input.tif')
RH_24 = Reshape_Reproject_Input_data(RH_24_name, RH_24_fileName, proyDEM_fileName)
except:
print('ERROR: Check the instantenious Relative Humidity input path in the meteo excel tab')
# 6d) Instantaneous Relative Humidity
if RH_inst_kind_of_data == 1:
try:
RH_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'RH_inst_input.tif')
RH_inst = Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, proyDEM_fileName)
except:
print('ERROR: Check the daily Relative Humidity input path in the meteo excel tab')
# 6e) Daily wind speed
if Wind_24_kind_of_data == 1:
try:
Wind_24_fileName = os.path.join(output_folder, 'Output_radiation_balance','Wind_24_input.tif')
Wind_24 = Reshape_Reproject_Input_data(Wind_24_name, Wind_24_fileName, proyDEM_fileName)
Wind_24[Wind_24 < 1.5] = 1.5
except:
print('ERROR: Check the daily wind input path in the meteo excel tab')
# 6f) Instantaneous wind speed
if Wind_inst_kind_of_data == 1:
try:
Wind_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Wind_inst_input.tif')
Wind_inst = Reshape_Reproject_Input_data(Wind_inst_name, Wind_inst_fileName, proyDEM_fileName)
Wind_inst[Wind_inst < 1.5] = 1.5
except:
print('ERROR: Check the instantenious wind input path in the meteo excel tab')
# 6g) Daily incoming Radiation
if Method_Radiation_24 == 1:
if Rs_24_kind_of_data == 1:
try:
Net_radiation_daily_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Ra_24_input.tif')
Rs_24 = Reshape_Reproject_Input_data(Rs_24_name, Net_radiation_daily_fileName, proyDEM_fileName)
except:
print('ERROR: Check the daily net radiation input path in the meteo excel tab')
# 6h) Instantaneous incoming Radiation
if Method_Radiation_inst == 1:
if Rs_in_inst_kind_of_data == 1:
try:
Net_radiation_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Ra_in_inst_input.tif')
Rs_in_inst = Reshape_Reproject_Input_data(Rs_in_inst_name, Net_radiation_inst_fileName, proyDEM_fileName)
except:
print('ERROR: Check the instanenious net radiation input path in the meteo excel tab')
# 6i) Daily Transmissivity
if Method_Radiation_24 == 2:
if Transm_24_kind_of_data == 1:
try:
Transm_24_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Transm_24_input.tif')
Transm_24 = Reshape_Reproject_Input_data(Transm_24_name, Transm_24_fileName, proyDEM_fileName)
except:
print('ERROR: Check the daily transmissivity input path in the meteo excel tab')
# 6j) Instantaneous Transmissivity
if Method_Radiation_inst == 2:
if Transm_inst_kind_of_data == 1:
try:
Transm_inst_fileName = os.path.join(output_folder, 'Output_radiation_balance', 'Transm_inst_input.tif')
Transm_inst = Reshape_Reproject_Input_data(Transm_inst_name, Transm_inst_fileName, proyDEM_fileName)
except:
print('ERROR: Check the instantenious transmissivity input path in the meteo excel tab')
# 6k) Theta saturated topsoil
if Theta_sat_top_kind_of_data == 1:
try:
Theta_sat_top_fileName = os.path.join(output_folder, 'Output_temporary','Theta_sat_top_input.tif')
Theta_sat_top = Reshape_Reproject_Input_data(Theta_sat_top_name, Theta_sat_top_fileName, proyDEM_fileName)
except:
print('ERROR: Check the saturated top soil input path in the soil excel tab')
# 6l) Theta saturated subsoil
if Theta_sat_sub_kind_of_data == 1:
try:
Theta_sat_sub_fileName = os.path.join(output_folder, 'Output_temporary','Theta_sat_sub_input.tif')
Theta_sat_sub =Reshape_Reproject_Input_data(Theta_sat_sub_name,Theta_sat_sub_fileName,proyDEM_fileName)
except:
print('ERROR: Check the saturated sub soil input path in the soil excel tab')
# 6m) Theta residual topsoil
if Theta_res_top_kind_of_data == 1:
try:
Theta_res_top_fileName = os.path.join(output_folder, 'Output_temporary','Theta_res_top_input.tif')
Theta_res_top=Reshape_Reproject_Input_data(Theta_res_top_name,Theta_res_top_fileName,proyDEM_fileName)
except:
print('ERROR: Check the residual top soil input path in the soil excel tab')
# 6n) Theta residual subsoil
if Theta_res_sub_kind_of_data == 1:
try:
Theta_res_sub_fileName = os.path.join(output_folder, 'Output_temporary','Theta_res_sub_input.tif')
Theta_res_sub=Reshape_Reproject_Input_data(Theta_res_sub_name,Theta_res_sub_fileName,proyDEM_fileName)
except:
print('ERROR: Check the residual sub soil input path in the soil excel tab')
# 6o) Wilting point
if Soil_moisture_wilting_point_kind_of_data == 1:
try:
Soil_moisture_wilting_point_fileName = os.path.join(output_folder, 'Output_temporary','Soil_moisture_wilting_point_input.tif')
Soil_moisture_wilting_point=Reshape_Reproject_Input_data(Soil_moisture_wilting_point_name,Soil_moisture_wilting_point_fileName,proyDEM_fileName)
except:
print('ERROR: Check the wilting point input path in the soil excel tab')
# 6p) Fraction field capacity
if Field_Capacity_kind_of_data == 1:
try:
Field_Capacity_fileName = os.path.join(output_folder, 'Output_temporary','Fraction_Field_Capacity_and_Saturation_input.tif')
Field_Capacity=Reshape_Reproject_Input_data(Field_Capacity_name,Field_Capacity_fileName,proyDEM_fileName)
except:
print('ERROR: Check the field capacity input path in the soil excel tab')
# 6q) Light Use Efficiency
if LUEmax_kind_of_data == 1:
try:
LUEmax_fileName = os.path.join(output_folder, 'Output_temporary','LUEmax_input.tif')
LUEmax=Reshape_Reproject_Input_data(LUEmax_name,LUEmax_fileName,proyDEM_fileName)
except:
print('ERROR: Check the LUE input path in the soil excel tab')
# 6r) Obstacle height
if h_obst_kind_of_data == 1:
try:
h_obst_fileName = os.path.join(output_folder, 'Output_temporary','h_obst_input.tif')
h_obst=Reshape_Reproject_Input_data(h_obst_name,h_obst_fileName,proyDEM_fileName)
except:
print('ERROR: Check the obstacle height input path in the soil excel tab')
# 6s) deplection factor
if depl_factor_kind_of_data == 1:
try:
depl_factor_fileName = os.path.join(output_folder, 'Output_temporary','depl_factor_input.tif')
depl_factor=Reshape_Reproject_Input_data(depl_factor_name,depl_factor_fileName,proyDEM_fileName)
except:
print('ERROR: Check the depletion factor input path in the soil excel tab')
print('---------------------------------------------------------')
print('-------------------- Meteo part 1 -----------------------')
print('---------------------------------------------------------')
# Computation of some vegetation properties
# 1)
#constants:
Temp_lapse_rate = temp_lapse # Temperature lapse rate (°K/m) - 0.0065(ORI)
Gsc = 1367 # Solar constant (W / m2)
SB_const = 5.6703E-8 # Stefan-Bolzmann constant (watt/m2/°K4)
# Atmospheric pressure for altitude:
Pair = 101.3 * np.power((293 - Temp_lapse_rate * DEM_resh) / 293, 5.26)
print('Pair = ', '%0.3f (kPa)' % np.nanmean(Pair))
# Psychrometric constant (kPa / °C), FAO 56, eq 8.:
Psychro_c = 0.665E-3 * Pair
# Saturation Vapor Pressure at the air temperature (kPa):
esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3))
esat_24=0.6108 * np.exp(17.27 * Temp_24 / (Temp_24 + 237.3))
# Actual vapour pressure (kPa), FAO 56, eq 19.:
eact_inst = RH_inst * esat_inst / 100
eact_24 = RH_24 * esat_24 / 100
print('Instantaneous Saturation Vapor Pressure = ', '%0.3f (kPa)' % np.nanmean(esat_inst))
print('Instantaneous Actual vapour pressure = ', '%0.3f (kPa)' % np.nanmean(eact_inst))
print('Daily Saturation Vapor Pressure = ', '%0.3f (kPa)' % np.nanmean(esat_24))
print('Daily Actual vapour pressure = ', '%0.3f (kPa)' % np.nanmean(eact_24))
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#-------------------------Calculations LANDSAT ---------------------------
if Image_Type == 1:
print('---------------------------------------------------------')
print('-------------------- Open Landsat -----------------------')
print('---------------------------------------------------------')
# Define bands used for each Landsat number
if Landsat_nr == 5 or Landsat_nr == 7:
Bands = np.array([1, 2, 3, 4, 5, 7, 6])
elif Landsat_nr == 8:
Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11])
else:
print('Landsat image not supported, use Landsat 7 or 8')
# Open MTL landsat and get the correction parameters
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' %Name_Landsat_Image)
Lmin, Lmax, k1_c, k2_c = info_band_metadata(Landsat_meta_fileName, Bands)
print('Lmin= ', Lmin)
print('Lmax= ', Lmax)
print('k1= ', k1_c)
print('k2= ', k2_c)
# Calibration parameters - comment
#Lmin_L5 = [-2.8, -1.2, -1.5, -0.37, 1.2378, -0.150] # REVISE LS 5 TM
#Lmax_L5 = [296.8, 204.3, 206.2, 27.19, 15.303, 14.38] # REVISE LS 5 TM
# Mean solar exo-atmospheric irradiance for each band (W/m2/microm)
# for the different Landsat images (L5, L7, or L8)
ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44])
ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9])
ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106])
# Open one band - To get the metadata of the landsat images only once (to get the extend)
src_FileName = os.path.join(input_folder, '%s_B2.TIF' %Name_Landsat_Image) # before 10!
ls, band_data, ulx, uly, lrx, lry, x_size_ls, y_size_ls = Get_Extend_Landsat(src_FileName)
print(' Upper Left corner x, y: ', ulx, ', ', uly)
print(' Lower right corner x, y: ', lrx, ', ', lry)
# Crop the Landsat images to the DEM extent -
dst_FileName = os.path.join(output_folder, 'Output_temporary', '%s_cropped_LS_b2_%s_%s_%s.tif' %(sensor1, res1, year, DOY)) # Before 10 !!
dir_name = os.path.dirname(dst_FileName) # Directory of the file
# If the directory does not exist, create it.
if not os.path.exists(dir_name):
os.makedirs(dir_name)
lsc, ulx, uly, lrx, lry, epsg_to = reproject_dataset_example(src_FileName, proyDEM_fileName)
# Get the extend of the remaining landsat file after clipping based on the DEM file
y_size_lsc = lsc.RasterYSize
x_size_lsc = lsc.RasterXSize
shape_lsc = [x_size_lsc, y_size_lsc]
print('--- ')
print('Cropped LANDSAT Image - ')
print(' Size :', x_size_lsc, y_size_lsc)
print(' Upper Left corner x, y: ', ulx, ', ', uly)
print(' Lower right corner x, y: ', lrx, ', ', lry)
# output names for resampling
dst_LandsatMask = os.path.join(output_folder, 'Output_temporary', '%s_cropped_LANDSATMASK_%s_%s_%s.tif' %(sensor1, res1, year, DOY))
# Open Landsat data only if all additional data is not defined.
# Open the Additional input excel sheet
ws = wb['Additional_Input']
# If all additional fields are filled in than do not open the Landsat data
if (ws['B%d' % number].value) is None or (ws['C%d' % number].value) is None or (ws['D%d' % number].value) is None or (ws['E%d' % number].value) is None:
# Collect the landsat Thermal and Spectral data
# 1. Create mask for the landsat images
# 2. Save the Thermal data in a 3D array
# 3. Save the Spectral data in a 3D array
# 1.)
# find clipping extent for landsat images: NTIR is larger than VTIR
# Open original Landsat image for the band number 11
# if landsat 5 or 7 is used then first create a mask for removing the no data stripes
if Landsat_nr == 5 or Landsat_nr == 7:
src_FileName = os.path.join(input_folder, '%s_B6.TIF' % (Name_Landsat_Image)) #open smallest band
if not os.path.exists(src_FileName):
src_FileName = os.path.join(input_folder, '%s_B6_VCID_2.TIF' % (Name_Landsat_Image))
src_FileName_2 = os.path.join(input_folder, '%s_B1.TIF' % (Name_Landsat_Image)) #open smallest band
src_FileName_3 = os.path.join(input_folder, '%s_B3.TIF' % (Name_Landsat_Image)) #open smallest band
src_FileName_4 = os.path.join(input_folder, '%s_B4.TIF' % (Name_Landsat_Image)) #open smallest band
src_FileName_5 = os.path.join(input_folder, '%s_B7.TIF' % (Name_Landsat_Image)) #open smallest band
src_FileName_6 = os.path.join(input_folder, '%s_B2.TIF' % (Name_Landsat_Image)) #open smallest band
src_FileName_7 = os.path.join(input_folder, '%s_B5.TIF' % (Name_Landsat_Image)) #open smallest band
ls_data=Open_landsat(src_FileName,proyDEM_fileName)
ls_data_2=Open_landsat(src_FileName_2,proyDEM_fileName)
ls_data_3=Open_landsat(src_FileName_3,proyDEM_fileName)
ls_data_4=Open_landsat(src_FileName_4,proyDEM_fileName)
ls_data_5=Open_landsat(src_FileName_5,proyDEM_fileName)
ls_data_6=Open_landsat(src_FileName_6,proyDEM_fileName)
ls_data_7=Open_landsat(src_FileName_7,proyDEM_fileName)
# create and save the landsat mask for all images based on band 11 (smallest map)
ClipLandsat=np.ones((shape_lsc[1], shape_lsc[0]))
ClipLandsat=np.where(np.logical_or(np.logical_or(np.logical_or(np.logical_or(np.logical_or(np.logical_or(ls_data==0,ls_data_2==0),ls_data_3==0),ls_data_4==0),ls_data_5==0),ls_data_6==0),ls_data_7==0),0,1)
# If landsat 8 then use landsat band 10 and 11
elif Landsat_nr == 8:
src_FileName_11 = os.path.join(input_folder, '%s_B11.TIF' % (Name_Landsat_Image)) #open smallest band
ls_data_11=Open_landsat(src_FileName_11,proyDEM_fileName)
src_FileName_10 = os.path.join(input_folder, '%s_B10.TIF' % (Name_Landsat_Image)) #open smallest band
ls_data_10=Open_landsat(src_FileName_10, proyDEM_fileName)
# create and save the landsat mask for all images based on band 10 and 11
ClipLandsat=np.ones((shape_lsc[1], shape_lsc[0]))
ClipLandsat=np.where(np.logical_or(ls_data_11==0, ls_data_10==0),0,1)
else:
print('Landsat image not supported, use Landsat 7 or 8')
# Create Cloud mask is BQA map is available (newer version Landsat images)
BQA_LS_Available = 0
if os.path.exists(os.path.join(input_folder, '%s_BQA.TIF' %Name_Landsat_Image)):
src_FileName_BQA = os.path.join(input_folder, '%s_BQA.TIF' %Name_Landsat_Image)
ls_data_BQA = Open_landsat(src_FileName_BQA,proyDEM_fileName)
if Landsat_nr == 8:
Cloud_Treshold = 3000
if Landsat_nr == 5 or Landsat_nr == 7:
Cloud_Treshold = 700
QC_mask_Cloud = np.copy(ls_data_BQA)
QC_mask_Cloud[ls_data_BQA<Cloud_Treshold] = 0
QC_mask_Cloud[ls_data_BQA>=Cloud_Treshold] = 1
BQA_LS_Available = 1
# Open data of the landsat mask
ls_data=Open_landsat(src_FileName, proyDEM_fileName)
# Save Landsat mask as a tiff file
save_GeoTiff_proy(lsc, ClipLandsat, dst_LandsatMask, shape_lsc, nband=1)
# 2.)
# Create 3D array to store the Termal band(s) (nr10(&11) for LS8 and n6 for LS7)
therm_data = Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder,shape_lsc,ClipLandsat, proyDEM_fileName)
# 3.)
# Create 3D array to store Spectral radiance and Reflectivity for each band
Reflect, Spec_Rad = Landsat_Reflect(Bands, input_folder, Name_Landsat_Image, output_folder, shape_lsc, ClipLandsat, Lmax, Lmin, ESUN_L5, ESUN_L7, ESUN_L8, cos_zn, dr, Landsat_nr, proyDEM_fileName)
# save spectral data
#for i in range(0,6):
#spec_ref_fileName = os.path.join(output_folder, 'Output_radiation_balance','%s_spectral_reflectance_B%s_%s_%s_%s_%s_%s.tif' %(Bands[i], sensor1, res3, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
#save_GeoTiff_proy(lsc, Reflect[:, :, i], spec_ref_fileName, shape_lsc, nband=1)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ---- MODULE 3 - Vegetation properties
print('---------------------------------------------------------')
print('-------------------- Module 3 ---------------------------')
print('---------------------------------------------------------')
# Check NDVI
try:
if (ws['B%d' % number].value) is not None:
# Output folder NDVI
ndvi_fileName2 = os.path.join(output_folder, 'Output_vegetation', 'User_NDVI_%s_%s_%s_%s_%s.tif' %(res3, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
NDVI=Reshape_Reproject_Input_data(r'%s' %str(ws['B%d' % number].value),ndvi_fileName2,proyDEM_fileName)
water_mask_temp = np.zeros((shape_lsc[1], shape_lsc[0]))
water_mask_temp[NDVI < 0.0] = 1.0
else:
# use the Landsat reflectance to calculate the surface albede, NDVI and SAVI
NDVI = Calc_NDVI(Reflect)
# save landsat NDVI
ndvi_fileName2 = os.path.join(output_folder, 'Output_vegetation', '%s_NDVI_%s_%s_%s_%s_%s.tif' %(sensor3, res3, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, NDVI, ndvi_fileName2, shape_lsc, nband=1)
# Calculate temporal water mask
water_mask_temp=Water_Mask(shape_lsc,Reflect)
except:
assert "Please check the NDVI input path"
# Check SAVI
try:
if (ws['C%d' % number].value) is not None:
# Output folder SAVI
savi_fileName = os.path.join(output_folder, 'Output_vegetation', 'User_SAVI_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
SAVI=Reshape_Reproject_Input_data(r'%s' %str(ws['C%d' % number].value),savi_fileName,proyDEM_fileName)
else:
# use the Landsat reflectance to calculate the SAVI
SAVI = Calc_SAVI(Reflect, L_SAVI)
# save landsat SAVI
savi_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_SAVI_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, SAVI, savi_fileName, shape_lsc, nband=1)
except:
assert "Please check the SAVI input path"
# Check Surface albedo
try:
if (ws['D%d' % number].value) is not None:
# Output folder surface albedo
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','User_surface_albedo_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
Surf_albedo=Reshape_Reproject_Input_data(r'%s' %str(ws['D%d' % number].value),surface_albedo_fileName,proyDEM_fileName)
else:
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','%s_surface_albedo_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# use the Landsat reflectance to calculate the surface albede, NDVI and SAVI
Surf_albedo = Calc_albedo(Reflect,path_radiance,Apparent_atmosf_transm)
# save landsat surface albedo
save_GeoTiff_proy(lsc, Surf_albedo, surface_albedo_fileName, shape_lsc, nband=1)
except:
assert "Please check the Albedo input path"
# calculate vegetation properties
FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity=Calc_vegt_para(NDVI,SAVI,water_mask_temp,shape_lsc)
# Save output maps that will be used in SEBAL
save_GeoTiff_proy(lsc, water_mask_temp, water_mask_temp_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, FPAR, fpar_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, tir_emis, tir_emissivity_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, Nitrogen, nitrogen_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, vegt_cover, veg_cover_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, LAI, lai_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, b10_emissivity, b10_emissivity_fileName, shape_lsc, nband=1)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ---- MODULE 4 - Surface temperature, Cloud, Water, and Snow mask
print('---------------------------------------------------------')
print('-------------------- Module 4 ---------------------------')
print('---------------------------------------------------------')
# Check if a surface temperature dataset is defined. If so use this one instead of the Landsat, otherwise Landsat
# Check Surface temperature
try:
if (ws['E%d' % number].value) is not None:
# Output folder surface temperature
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation','User_surface_temp_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
temp_surface_sharpened=Reshape_Reproject_Input_data(r'%s' %str(ws['E%d' % number].value),surf_temp_fileName,proyDEM_fileName)
cloud_mask = np.zeros([int(np.shape(temp_surface_sharpened)[1]),int(np.shape(temp_surface_sharpened)[0])])
Thermal_Sharpening_not_needed = 1
else:
# Calculate surface temperature and create a cloud mask
Surface_temp,cloud_mask = Calc_surface_temp(Temp_inst, Landsat_nr, Lmax, Lmin, therm_data, b10_emissivity, k1_c, k2_c, eact_inst, shape_lsc, water_mask_temp, Bands_thermal, Rp, tau_sky, surf_temp_offset, Image_Type)
Thermal_Sharpening_not_needed = 0
# Replace clouds mask is a better one is already created
if BQA_LS_Available == 1:
cloud_mask = QC_mask_Cloud
Surface_temp[cloud_mask == 1] = np.nan
except:
assert "Please check the surface temperature input path"
# Perform Thermal sharpening for the thermal LANDSAT image
if Thermal_Sharpening_not_needed == 0:
# Upscale DEM to 90m
pixel_spacing_upscale=90
dest_90, ulx_dem_90, lry_dem_90, lrx_dem_90, uly_dem_90, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
DEM_90 = dest_90.GetRasterBand(1).ReadAsArray()
Y_raster_size_90 = dest_90.RasterYSize
X_raster_size_90 = dest_90.RasterXSize
shape_90=([X_raster_size_90, Y_raster_size_90])
save_GeoTiff_proy(dest_90, DEM_90, proyDEM_fileName_90, shape_90, nband=1)
# save landsat surface temperature
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation','%s_%s_surface_temp_%s_%s_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, Surface_temp, surf_temp_fileName, shape_lsc, nband=1)
# Upscale NDVI data
dest_up, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
ndvi_fileName2, proyDEM_fileName_90)
NDVI_Landsat_up = dest_up.GetRasterBand(1).ReadAsArray()
# Upscale Thermal data
dest_up, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
surf_temp_fileName, proyDEM_fileName_90)
surface_temp_up = dest_up.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=7
# Apply thermal sharpening
temp_surface_sharpened = Thermal_Sharpening(surface_temp_up, NDVI_Landsat_up, NDVI, Box, dest_up, output_folder, ndvi_fileName2, shape_lsc, lsc, temp_surface_sharpened_fileName)
# Divide temporal watermask in snow and water mask by using surface temperature
snow_mask, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = CalculateSnowWaterMask(NDVI,shape_lsc,water_mask_temp,temp_surface_sharpened)
# Save the water mask
save_GeoTiff_proy(lsc, water_mask, water_mask_fileName, shape_lsc, nband=1)
if Thermal_Sharpening_not_needed == 1:
temp_surface_sharpened[water_mask == 1] = Surface_temp[water_mask == 1]
# save landsat surface temperature
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation','%s_%s_surface_temp_sharpened_%s_%s_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, temp_surface_sharpened, surf_temp_fileName, shape_lsc, nband=1)
# remove low temperature values
temp_surface_sharpened[temp_surface_sharpened<=253.0]=np.nan
# Calculate the tempearture of the water
Temperature_water_std=np.nanstd(temp_surface_sharpened[water_mask != 0])
Temperature_water_mean=np.nanmean(temp_surface_sharpened[water_mask != 0])
print('Mean water Temperature = %0.3f (K)' % Temperature_water_mean)
print('Standard deviation water temperature = %0.3f (K)' % Temperature_water_std)
# Check if Quality dataset is defined. If so use this one instead of using Landsat otherwise landsat
# Check Quality
try:
if (ws['F%d' % number].value) is not None:
# Output folder QC defined by the user
QC_Map_fileName = os.path.join(output_folder, 'Output_temporary', 'User_quality_mask_%s_%s_%s.tif' %(res2, year, DOY))
# Reproject and reshape users NDVI
QC_Map = Reshape_Reproject_Input_data(r'%s' %str(ws['F%d' % number].value),QC_Map_fileName,proyDEM_fileName)
# if the users QC data cannot be reprojected than use the original Landsat data as imported into SEBAL
else:
# if there are no cold water pixels than use cold vegetation pixels
if np.isnan(Temperature_water_mean) == True or Temperature_water_mean < 0.0:
ts_cold_land=ts_moist_veg_min
else:
ts_cold_land=Temperature_water_mean
# Make shadow mask
mask=np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and.reduce((temp_surface_sharpened < (ts_cold_land+Temperature_offset_shadow),Surf_albedo < Maximum_shadow_albedo,water_mask!=1))]=1
shadow_mask=np.copy(mask)
shadow_mask = Create_Buffer(shadow_mask)
# Make cloud mask
if BQA_LS_Available != 1:
mask=np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and.reduce((temp_surface_sharpened < (ts_cold_land+Temperature_offset_clouds),Surf_albedo > Minimum_cloud_albedo,NDVI<0.7,snow_mask!=1))]=1
cloud_mask=np.copy(mask)
cloud_mask = Create_Buffer(cloud_mask)
# Save output maps
#save_GeoTiff_proy(lsc, cloud_mask, cloud_mask_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, snow_mask, snow_mask_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, shadow_mask, shadow_mask_fileName, shape_lsc, nband=1)
# Total Quality Mask
QC_Map = np.empty(cloud_mask.shape)
ClipLandsat_reverse = np.where(ClipLandsat==1,0,1)
Landsat_Mask = cloud_mask + snow_mask + shadow_mask + ClipLandsat_reverse
QC_Map[Landsat_Mask>0] = 1
# Output folder QC defined by the user
#QC_Map_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_quality_mask_%s_%s_%s.tif.tif' %(sensor1, res2, year, DOY))
# Save the PROBA-V NDVI as tif file
#save_GeoTiff_proy(lsc, QC_Map, QC_Map_fileName, shape, nband=1)
#save_GeoTiff_proy(dest, QC_Map, QC_Map_fileName, shape, nband=1)
except:
assert "Please check the quality path"
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#-------------------------Calculations VIIRS and PROBA-V-----------------------
if Image_Type == 2:
print('---------------------------------------------------------')
print('------------------- Collect PROBA-V data ----------------')
print('---------------------------------------------------------')
# Open the Additional input excel sheet
ws = wb['Additional_Input']
# If all additional fields are filled in than do not open the PROBA-V data
if ((ws['B%d' % number].value) or (ws['C%d' % number].value) or (ws['D%d' % number].value)) is None:
# Define the bands that will be used
bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR'
# Set the index number at 0
index=0
# create a zero array with the shape of the reprojected DEM file
data_PROBAV=np.zeros((shape_lsc[1], shape_lsc[0]))
spectral_reflectance_PROBAV=np.zeros([shape_lsc[1], shape_lsc[0], 5])
# constants
n188_float=248 # Now it is 248, but we do not exactly know what this really means and if this is for constant for all images.
# write the data one by one to the spectral_reflectance_PROBAV
for bandnmr in bands:
# Translate the PROBA-V names to the Landsat band names
Band_number = {'SM':7,'B1':8,'B2':10,'B3':9,'B4':11}
# Open the dataset
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.hdf5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
# Open the .hdf file
name_out = os.path.join(input_folder, '%s_test.tif' % (Name_PROBAV_Image))
name_in = g.GetSubDatasets()[Band_number[bandnmr]][0]
# Get environmental variable for windows and Linux
if platform.system() == 'Windows':
SEBAL_env_paths = os.environ["SEBAL"].split(';')
GDAL_env_path = SEBAL_env_paths[0]
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')
else:
SEBAL_env_paths = os.environ["SEBAL"]
GDAL_env_path = SEBAL_env_paths
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate')
# run gdal translate command
FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, name_out)
os.system(FullCmd)
# Open data
dest_PV = gdal.Open(name_out)
Data = dest_PV.GetRasterBand(1).ReadAsArray()
dest_PV = None
# Remove temporary file
os.remove(name_out)
# Define the x and y spacing
Meta_data = g.GetMetadata()
#Lat_Bottom = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LATITUDE'])
Lat_Top = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LATITUDE'])
Lon_Left = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LONGITUDE'])
#Lon_Right = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LONGITUDE'])
Pixel_size = float((Meta_data['LEVEL3_GEOMETRY_VNIR_VAA_MAPPING']).split(' ')[-3])
# Define the georeference of the PROBA-V data
geo_PROBAV=[Lon_Left-0.5*Pixel_size, Pixel_size, 0, Lat_Top+0.5*Pixel_size, 0, -Pixel_size] #0.000992063492063
# Define the name of the output file
PROBAV_data_name=os.path.join(output_folder, 'Output_PROBAV', '%s_%s.tif' % (Name_PROBAV_Image,bandnmr))
dir_name_PROBAV = os.path.dirname(PROBAV_data_name)
# If the directory does not exist, make it.
if not os.path.exists(dir_name_PROBAV):
os.mkdir(dir_name_PROBAV)
# create gtiff output with the PROBA-V band
fmt = 'GTiff'
driver = gdal.GetDriverByName(fmt)
dir_name = os.path.dirname(PROBAV_data_name)
dst_dataset = driver.Create(PROBAV_data_name, int(Data.shape[1]), int(Data.shape[0]), 1,gdal.GDT_Float32)
dst_dataset.SetGeoTransform(geo_PROBAV)
# set the reference info
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
dst_dataset.SetProjection(srs.ExportToWkt())
# write the array in the geotiff band
dst_dataset.GetRasterBand(1).WriteArray(Data)
dst_dataset = None
# Open the PROBA-V band in SEBAL
g=gdal.Open(PROBAV_data_name.replace("\\","/"))
# If the data cannot be opened, change the extension
if g is None:
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tiff' % (Name_PROBAV_Image,bandnmr))
# Reproject the PROBA-V band to match DEM's resolution
PROBAV, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
PROBAV_data_name, proyDEM_fileName)
# Open the reprojected PROBA-V band data
data_PROBAV_DN = PROBAV.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# Define the filename to store the cropped Landsat image
dst_FileName = os.path.join(output_folder, 'Output_PROBAV','proy_PROBAV_%s.tif' % bandnmr)
# close the PROBA-V
g=None
# If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV
if bandnmr != 'SM':
data_PROBAV[:, :]=data_PROBAV_DN//2000
spectral_reflectance_PROBAV[:, :, index]=data_PROBAV[:, :]
# If the band data is the SM band than write the data into the spectral_reflectance_PROBAV and create cloud mask
else:
data_PROBAV[:, :]=data_PROBAV_DN
Cloud_Mask_PROBAV=np.zeros((shape_lsc[1], shape_lsc[0]))
Cloud_Mask_PROBAV[data_PROBAV[:,:]!=n188_float]=1
spectral_reflectance_PROBAV[:, :, index]=Cloud_Mask_PROBAV
#save_GeoTiff_proy(lsc, Cloud_Mask_PROBAV, proyPROBAV_Cloud_Mask_fileName, shape_lsc, nband=1)
# Change the spectral reflectance to meet certain limits
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]<=0,np.nan,spectral_reflectance_PROBAV[:, :, index])
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]>=150,np.nan,spectral_reflectance_PROBAV[:, :, index])
# Save the PROBA-V as a tif file
save_GeoTiff_proy(lsc, spectral_reflectance_PROBAV[:, :, index], dst_FileName, shape_lsc, nband=1)
# Go to the next index
index=index+1
else:
Cloud_Mask_PROBAV=np.zeros((shape_lsc[1], shape_lsc[0]))
#save_GeoTiff_proy(lsc, Cloud_Mask_PROBAV, proyPROBAV_Cloud_Mask_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('----------------- Calculate Vegetation data -------------')
print('---------------------------------------------------------')
# Bands in PROBAV spectral reflectance
# 0 = MS
# 1 = BLUE
# 2 = NIR
# 3 = RED
# 4 = SWIR
# Check if a NDVI, SAVI, or Surface Albedo dataset is defined. If so use this one instead of the PROBAV otherwise PROBAV
# Check NDVI
try:
if (ws['B%d' % number].value) is not None:
# Output folder NDVI defined by the user
ndvi_fileName = os.path.join(output_folder, 'Output_vegetation', 'User_NDVI_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users NDVI
NDVI=Reshape_Reproject_Input_data(r'%s' %str(ws['B%d' % number].value),ndvi_fileName, proyDEM_fileName)
NDVI_PROBAV_MAX = np.nanmax(NDVI)
NDVI_PROBAV_SD = np.nanstd(NDVI)
print('NDVI User max ' , NDVI_PROBAV_MAX)
print('NDVI User sd' , NDVI_PROBAV_SD)
# Create Water mask based on PROBA-V
water_mask = np.zeros((shape_lsc[1], shape_lsc[0]))
water_mask[NDVI<0.0]=1
# if the users NDVI data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Calculate the NDVI based on PROBA-V
n218_memory = spectral_reflectance_PROBAV[:, :, 2] + spectral_reflectance_PROBAV[:, :, 3]
NDVI = np.zeros((shape_lsc[1], shape_lsc[0]))
NDVI[n218_memory != 0] = ( spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] - spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] )/ ( spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] + spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] )
NDVI_PROBAV_MAX = np.nanmax(NDVI)
NDVI_PROBAV_SD = np.nanstd(NDVI)
print('NDVI PROBA-V max ' , NDVI_PROBAV_MAX)
print('NDVI PROBA-V sd' , NDVI_PROBAV_SD)
# Create Water mask based on PROBA-V
water_mask = np.zeros((shape_lsc[1], shape_lsc[0]))
water_mask[np.logical_and(spectral_reflectance_PROBAV[:, :, 2] >= spectral_reflectance_PROBAV[:, :, 3],DEM_resh>0)]=1
# Define users NDVI output name
ndvi_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_NDVI_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V NDVI as tif file
save_GeoTiff_proy(lsc, NDVI, ndvi_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create NDVI"
# Check SAVI
try:
if (ws['C%d' % number].value) is not None:
# Output folder SAVI defined by the user
savi_fileName = os.path.join(output_folder, 'Output_vegetation', 'User_SAVI_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users SAVI
SAVI=Reshape_Reproject_Input_data(r'%s' %str(ws['C%d' % number].value),savi_fileName,proyDEM_fileName)
# if the users SAVI data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Calculate SAVI based on PROBA-V
SAVI = (1+L_SAVI)*(spectral_reflectance_PROBAV[:, :, 3]-spectral_reflectance_PROBAV[:, :, 2])/(L_SAVI+spectral_reflectance_PROBAV[:, :, 2]+spectral_reflectance_PROBAV[:, :, 3])
# Define users SAVI output name
savi_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_SAVI_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V SAVI as tif file
save_GeoTiff_proy(lsc, SAVI, savi_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create SAVI"
# Check surface albedo
try:
if (ws['D%d' % number].value) is not None:
# Output folder surface albedo
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','User_surface_albedo_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users surface albedo
Surf_albedo=Reshape_Reproject_Input_data(r'%s' %str(ws['D%d' % number].value),surface_albedo_fileName,proyDEM_fileName)
# if the users surface albedo data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Calculate surface albedo based on PROBA-V
Surf_albedo = 0.219 * spectral_reflectance_PROBAV[:, :, 1] + 0.361 * spectral_reflectance_PROBAV[:, :, 2] + 0.379 * spectral_reflectance_PROBAV[:, :, 3] + 0.041 * spectral_reflectance_PROBAV[:, :, 4]
# Set limit surface albedo
Surf_albedo = np.minimum(Surf_albedo, 0.6)
# Define users surface albedo output name
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','%s_surface_albedo_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V surface albedo as tif file
save_GeoTiff_proy(lsc, Surf_albedo, surface_albedo_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create Albedo"
# Calculate the Fpar, TIR, Nitrogen, Vegetation Cover, LAI and b10_emissivity based on PROBA-V
FPAR,tir_emis,Nitrogen_PROBAV,vegt_cover,LAI,b10_emissivity_PROBAV=Calc_vegt_para(NDVI,SAVI,water_mask,shape_lsc)
# Save the paramaters as a geotiff
save_GeoTiff_proy(lsc, water_mask, water_mask_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, tir_emis, tir_emissivity_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, Nitrogen_PROBAV, nitrogen_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, vegt_cover, veg_cover_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, LAI, lai_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('------------------- Collect VIIRS data ------------------')
print('---------------------------------------------------------')
# Open Additional_Input sheet in the excel
ws = wb['Additional_Input']
# end result are reprojected maps of:
# 1)
# 2)
# 1) Get the VIIRS Thermal map 100m
# Upscale DEM to 375m
pixel_spacing_upscale = 375
dest_375, ulx_dem_375, lry_dem_375, lrx_dem_375, uly_dem_375, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
DEM_375 = dest_375.GetRasterBand(1).ReadAsArray()
Y_raster_size_375 = dest_375.RasterYSize
X_raster_size_375 = dest_375.RasterXSize
shape_375=([X_raster_size_375, Y_raster_size_375])
save_GeoTiff_proy(dest_375, DEM_375, proyDEM_fileName_375, shape_375, nband=1)
try:
if (ws['E%d' % number].value) is not None:
# Define output folder Thermal VIIRS by the user
proyVIIRS_fileName_100 = os.path.join(output_folder, 'Output_VIIRS','User_TB_%s_%s_%s.tif' %(res2, year, DOY))
# Reshape and reproject the Thermal data given by the user and resample this to a 375m resolution
temp_surface_sharpened = Reshape_Reproject_Input_data(r'%s' %str(ws['E%d' % number].value), proyVIIRS_fileName_100, proyDEM_fileName)
Thermal_Sharpening_not_needed = 1
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = CalculateSnowWaterMask(NDVI,shape_lsc,water_mask,temp_surface_sharpened)
data_VIIRS_QC = np.zeros((shape_lsc[1], shape_lsc[0]))
data_VIIRS_QC[np.isnan(temp_surface_sharpened)] = 1
else:
# Define the VIIRS thermal data name
VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_TB))
# Reproject VIIRS thermal data
VIIRS, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
VIIRS_data_name, proyDEM_fileName)
# Open VIIRS thermal data
data_VIIRS = VIIRS.GetRasterBand(1).ReadAsArray()
# Define the thermal VIIRS output name
proyVIIRS_fileName = os.path.join(output_folder, 'Output_VIIRS','%s_TB_%s_%s_%s.tif' %(sensor2, res2, year, DOY))
# Save the thermal VIIRS data
save_GeoTiff_proy(lsc, data_VIIRS, proyVIIRS_fileName, shape_lsc, nband=1)
Thermal_Sharpening_not_needed = 0
# 2) Get the VIIRS Quality map 100m
# Check Quality
if Name_VIIRS_Image_QC != 'None':
# Define the VIIRS Quality data name
VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_QC))
# Reproject VIIRS Quality data
VIIRS, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
VIIRS_data_name, proyDEM_fileName)
# Open VIIRS Quality data
data_VIIRS_QC = VIIRS.GetRasterBand(1).ReadAsArray()
# Save the reprojected VIIRS dataset QC
save_GeoTiff_proy(lsc, data_VIIRS_QC, proyVIIRS_QC_fileName, shape_lsc, nband=1)
else:
data_VIIRS_QC = np.zeros((shape_lsc[1], shape_lsc[0]))
except:
assert "Please check the VIIRS input path"
# ------ Upscale TIR_Emissivity_PROBAV, cloud mask PROBAV and NDVI for LST calculation at 375m resolution ----
if Thermal_Sharpening_not_needed == 0:
##### VIIRS brightness temperature to land surface temperature
# Create cloud mask VIIRS (100m)
data_VIIRS[NDVI==0]=0
Cloud_Mask_VIIRS=np.zeros((shape_lsc[1], shape_lsc[0]))
Cloud_Mask_VIIRS[data_VIIRS_QC!=0]=1
#save_GeoTiff_proy(lsc, Cloud_Mask_VIIRS, proyVIIRS_Cloud_Mask_fileName, shape_lsc, nband=1)
# Create total VIIRS and PROBA-V cloud mask (100m)
QC_Map=np.zeros((shape_lsc[1], shape_lsc[0]))
QC_Map=np.where(np.logical_or(data_VIIRS_QC==1, Cloud_Mask_PROBAV==1),1,0)
# Set the conditions for the brightness temperature (100m)
term_data=data_VIIRS
term_data=np.where(data_VIIRS>=250, data_VIIRS,0)
brightness_temp=np.zeros((shape_lsc[1], shape_lsc[0]))
brightness_temp=np.where(Cloud_Mask_VIIRS==0,term_data,np.nan)
# Constants
k1=606.399172
k2=1258.78
L_lambda_b10_100=((2*6.63e-34*(3.0e8)**2)/((11.45e-6)**5*(np.exp((6.63e-34*3e8)/(1.38e-23*(11.45e-6)*brightness_temp))-1)))*1e-6
# Get Temperature for 100 and 375m resolution
Temp_TOA_100 = Get_Thermal(L_lambda_b10_100,Rp,Temp_inst,tau_sky,tir_emis,k1,k2)
# Conditions for surface temperature (100m)
n120_surface_temp=np.where(QC_Map==1,np.nan,Temp_TOA_100)
n120_surface_temp=np.where(n120_surface_temp<=250,np.nan,Temp_TOA_100)
n120_surface_temp=np.where(n120_surface_temp>450,np.nan,Temp_TOA_100)
# Save the surface temperature of the VIIRS in 100m resolution
temp_surface_100_fileName_beforeTS = os.path.join(output_folder, 'Output_temporary','%s_%s_surface_temp_before_Thermal_Sharpening_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, DOY))
save_GeoTiff_proy(lsc, n120_surface_temp, temp_surface_100_fileName_beforeTS, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('-------------------- Downscale VIIRS --------------------')
print('---------------------------------------------------------')
################################ Thermal Sharpening #####################################################
# Upscale VIIRS and PROBA-V to 400m
pixel_spacing_upscale = 400
dest_400, ulx_dem_400, lry_dem_400, lrx_dem_400, uly_dem_400, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
DEM_400 = dest_400.GetRasterBand(1).ReadAsArray()
Y_raster_size_400 = dest_400.RasterYSize
X_raster_size_400 = dest_400.RasterXSize
shape_400=([X_raster_size_400, Y_raster_size_400])
save_GeoTiff_proy(dest_400, DEM_400, proyDEM_fileName_400, shape_400, nband=1)
# Upscale thermal band VIIRS from 100m to 400m
VIIRS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
temp_surface_100_fileName_beforeTS, proyDEM_fileName_400)
data_Temp_Surf_400 = VIIRS_Upscale.GetRasterBand(1).ReadAsArray()
# Upscale PROBA-V NDVI from 100m to 400m
NDVI_PROBAV_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
ndvi_fileName, proyDEM_fileName_400)
data_NDVI_400 = NDVI_PROBAV_Upscale.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=9
# Apply the surface temperature sharpening
temp_surface_sharpened = Thermal_Sharpening(data_Temp_Surf_400, data_NDVI_400, NDVI, Box, NDVI_PROBAV_Upscale, output_folder, proyDEM_fileName, shape_lsc, lsc, surf_temp_fileName)
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = CalculateSnowWaterMask(NDVI,shape_lsc,water_mask,temp_surface_sharpened)
# Replace water values
temp_surface_sharpened[water_mask==1] = n120_surface_temp[water_mask == 1]
temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened),n120_surface_temp,temp_surface_sharpened)
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation','%s_%s_surface_temp_sharpened_%s_%s_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, temp_surface_sharpened, surf_temp_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Snow_Mask_PROBAV, snow_mask_fileName, shape_lsc, nband=1)
# Calculate total quality mask
QC_Map=np.where(np.logical_or(data_VIIRS_QC==1, Cloud_Mask_PROBAV==1),1,0)
######################################## End Thermal Sharpening ################################################3
# Check if Quality dataset is defined. If so use this one instead of the PROBAV-VIIRS one
# Check Quality
try:
if (ws['F%d' % number].value) is not None:
# Output folder QC defined by the user
#QC_fileName = os.path.join(output_folder, 'Output_cloud_masked', 'User_quality_mask_%s_%s_%s.tif.tif' %(res2, year, DOY))
# Reproject and reshape users NDVI
QC_Map = Reshape_Reproject_Input_data(r'%s' %str(ws['F%d' % number].value),QC_fileName, proyDEM_fileName)
# Save the QC map as tif file
save_GeoTiff_proy(lsc, QC_Map, QC_fileName, shape_lsc, nband=1)
# if the users NDVI data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Create empty QC map if it not exists
if not 'QC_Map' in locals():
QC_Map = np.zeros((shape_lsc[1], shape_lsc[0]))
# Define users QC output name
#QC_tot_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_quality_mask_%s_%s_%s.tif' %(sensor1, res2, year, DOY))
# Save the QC map as tif file
save_GeoTiff_proy(lsc, QC_Map, QC_tot_fileName, shape_lsc, nband=1)
except:
assert "Please check the VIIRS path, was not able to create VIIRS QC map"
print('---------------------------------------------------------')
print('-------------------- Collect Meteo Data -----------------')
print('---------------------------------------------------------')
# Correct vegetation parameters
NDVI=np.where(QC_Map==1,np.nan,NDVI)
Surf_albedo=np.where(QC_Map==1,np.nan,Surf_albedo)
LAI=np.where(QC_Map==1,np.nan,LAI)
vegt_cover=np.where(QC_Map==1,np.nan,vegt_cover)
SAVI=np.where(QC_Map==1,np.nan,SAVI)
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#-------------------------Calculations MODIS -----------------------------
if Image_Type == 3:
print('---------------------------------------------------------')
print('----------------- Calculate Vegetation data -------------')
print('---------------------------------------------------------')
# Bands in PROBAV spectral reflectance
# 0 = MS
# 1 = BLUE
# 2 = NIR
# 3 = RED
# 4 = SWIR
# Check if a NDVI, SAVI, or Surface Albedo dataset is defined. If so use this one instead of the PROBAV otherwise PROBAV
ws = wb['Additional_Input']
# Check NDVI
try:
if (ws['B%d' % number].value) is not None:
# Output folder NDVI defined by the user
ndvi_fileName = os.path.join(output_folder, 'Output_vegetation', 'User_NDVI_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users NDVI
NDVI=Reshape_Reproject_Input_data(r'%s' %str(ws['B%d' % number].value),ndvi_fileName, proyDEM_fileName)
NDVI_PROBAV_MAX = np.nanmax(NDVI)
NDVI_PROBAV_SD = np.nanstd(NDVI)
print('NDVI User max ' , NDVI_PROBAV_MAX)
print('NDVI User sd' , NDVI_PROBAV_SD)
# Create Water mask based on PROBA-V
water_mask = np.zeros((shape_lsc[1], shape_lsc[0]))
water_mask[NDVI<0.0]=1
# if the users NDVI data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Calculate the NDVI based on MODIS
NDVI = Open_reprojected_hdf(src_FileName_NDVI, 0, epsg_to, 0.0001, proyDEM_fileName)
NDVI_MODIS_MAX = np.nanmax(NDVI)
NDVI_MODIS_SD = np.nanstd(NDVI)
print('NDVI MODIS max ' , NDVI_MODIS_MAX)
print('NDVI MODIS sd' , NDVI_MODIS_SD)
# Create Water mask based on MODIS
water_mask = np.zeros((shape_lsc[1], shape_lsc[0]))
water_mask[NDVI < 0]=1
# Define users NDVI output name
ndvi_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_NDVI_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V NDVI as tif file
save_GeoTiff_proy(lsc, NDVI, ndvi_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create NDVI"
# Check SAVI
try:
if (ws['C%d' % number].value) is not None:
# Output folder SAVI defined by the user
savi_fileName = os.path.join(output_folder, 'Output_vegetation', 'User_SAVI_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users SAVI
SAVI=Reshape_Reproject_Input_data(r'%s' %str(ws['C%d' % number].value),savi_fileName,proyDEM_fileName)
# if the users SAVI data cannot be reprojected than use the original MOD11 data as imported into SEBAL
else:
# Calculate the MOD9 based on MODIS
B1_modis = Open_reprojected_hdf(src_FileName_Ref, 11, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B2
B2_modis = Open_reprojected_hdf(src_FileName_Ref, 12, epsg_to, 0.0001, proyDEM_fileName)
# Calculate SAVI
SAVI = (B2_modis - B1_modis) /(B2_modis + B1_modis + L_SAVI) * (1+L_SAVI)
# Define users SAVI output name
savi_fileName = os.path.join(output_folder, 'Output_vegetation', '%s_SAVI_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V SAVI as tif file
save_GeoTiff_proy(lsc, SAVI, savi_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create SAVI"
# Check surface albedo
try:
if (ws['D%d' % number].value) is not None:
# Output folder surface albedo
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','User_surface_albedo_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reproject and reshape users surface albedo
Surf_albedo=Reshape_Reproject_Input_data(r'%s' %str(ws['D%d' % number].value),surface_albedo_fileName,proyDEM_fileName)
# if the users surface albedo data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Calculate the MOD9 based on MODIS
B1_modis = Open_reprojected_hdf(src_FileName_Ref, 11, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B2
B2_modis = Open_reprojected_hdf(src_FileName_Ref, 12, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B3
B3_modis = Open_reprojected_hdf(src_FileName_Ref, 13, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B4
B4_modis = Open_reprojected_hdf(src_FileName_Ref, 14, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B5
B5_modis = Open_reprojected_hdf(src_FileName_Ref, 15, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B6
B6_modis = Open_reprojected_hdf(src_FileName_Ref, 16, epsg_to, 0.0001, proyDEM_fileName)
# Open and reproject B3
B7_modis = Open_reprojected_hdf(src_FileName_Ref, 17, epsg_to, 0.0001, proyDEM_fileName)
# Calc surface albedo within shortwave domain using a weighting function (Tasumi et al 2008)
Surf_albedo = 0.215 * B1_modis + 0.215 * B2_modis + 0.242 * B3_modis + 0.129 * B4_modis + 0.101 * B5_modis + 0.062 * B6_modis + 0.036 * B7_modis
# Define users surface albedo output name
surface_albedo_fileName = os.path.join(output_folder, 'Output_vegetation','%s_surface_albedo_%s_%s_%s_%s_%s.tif' %(sensor1, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the PROBA-V surface albedo as tif file
save_GeoTiff_proy(lsc, Surf_albedo, surface_albedo_fileName, shape_lsc, nband=1)
except:
assert "Please check the PROBA-V path, was not able to create Albedo"
# Calculate the Fpar, TIR, Nitrogen, Vegetation Cover, LAI and b10_emissivity based on PROBA-V
FPAR, tir_emis, Nitrogen_PROBAV, vegt_cover, LAI, b10_emissivity_PROBAV=Calc_vegt_para(NDVI, SAVI, water_mask, shape_lsc)
# Save the paramaters as a geotiff
save_GeoTiff_proy(lsc, water_mask, water_mask_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, tir_emis, tir_emissivity_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, Nitrogen_PROBAV, nitrogen_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, vegt_cover, veg_cover_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, LAI, lai_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('------------------- Collect MOD9 data -------------------')
print('---------------------------------------------------------')
# Open Additional_Input sheet in the excel
ws = wb['Additional_Input']
# end result are reprojected maps of:
# 1)
# 2)
# 1) Get the VIIRS Thermal map 250m
# Upscale DEM to 1000m
pixel_spacing_upscale=1000
dest_1000, ulx_dem_1000, lry_dem_1000, lrx_dem_1000, uly_dem_1000, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
DEM_1000 = dest_1000.GetRasterBand(1).ReadAsArray()
Y_raster_size_1000 = dest_1000.RasterYSize
X_raster_size_1000 = dest_1000.RasterXSize
shape_1000=([X_raster_size_1000, Y_raster_size_1000])
save_GeoTiff_proy(dest_1000, DEM_1000, proyDEM_fileName_1000, shape_1000, nband=1)
try:
if (ws['E%d' % number].value) is not None:
# Define output folder Thermal VIIRS by the user
proyMODIS_fileName_250 = os.path.join(output_folder, 'Output_MODIS','User_TB_%s_%s_%s_%s_%s.tif' %(res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Reshape and reproject the Thermal data given by the user and resample this to a 375m resolution
temp_surface_sharpened = Reshape_Reproject_Input_data(r'%s' %str(ws['E%d' % number].value), proyMODIS_fileName_250, proyDEM_fileName)
Thermal_Sharpening_not_needed = 1
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = CalculateSnowWaterMask(NDVI,shape_lsc,water_mask,temp_surface_sharpened)
MODIS_QC = np.zeros((shape_lsc[1], shape_lsc[0]))
MODIS_QC[np.isnan(temp_surface_sharpened)] = 1
else:
# Calculate the MOD9 based on MODIS
n120_surface_temp = Open_reprojected_hdf(src_FileName_LST, 0, epsg_to, 0.02, proyDEM_fileName)
# Define the thermal VIIRS output name
proyMODIS_fileName = os.path.join(output_folder, 'Output_MODIS','%s_TB_%s_%s_%s_%s_%s.tif' %(sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
# Save the thermal VIIRS data
save_GeoTiff_proy(lsc, n120_surface_temp, proyMODIS_fileName, shape_lsc, nband=1)
Thermal_Sharpening_not_needed = 0
# 2) Get the MODIS Quality map 1000m
# Calculate the MOD9 based on MODIS
g=gdal.Open(src_FileName_LST, gdal.GA_ReadOnly)
MODIS_QC = Open_reprojected_hdf(src_FileName_LST, 1, epsg_to, 1, proyDEM_fileName)
# Define QC
MODIS_QC[np.logical_and(np.logical_and(MODIS_QC==5, MODIS_QC==17), MODIS_QC==21)] = 0
MODIS_QC[MODIS_QC != 0] = 1
# Save the reprojected VIIRS dataset QC
save_GeoTiff_proy(lsc, MODIS_QC, proyMODIS_QC_fileName, shape_lsc, nband=1)
except:
assert "Please check the MODIS11 input path"
# ------ Upscale TIR_Emissivity_PROBAV, cloud mask PROBAV and NDVI for LST calculation at 375m resolution ----
if Thermal_Sharpening_not_needed == 0:
##### MODIS brightness temperature to land surface temperature
# Create total VIIRS and PROBA-V cloud mask (100m)
QC_Map=np.zeros((shape_lsc[1], shape_lsc[0]))
QC_Map=np.where(MODIS_QC==1,1,0)
# Conditions for surface temperature (100m)
n120_surface_temp=np.where(QC_Map==1,np.nan,n120_surface_temp)
n120_surface_temp[n120_surface_temp<273] = np.nan
# Save the surface temperature of the VIIRS in 100m resolution
temp_surface_250_fileName_beforeTS = os.path.join(output_folder, 'Output_temporary','%s_%s_surface_temp_before_Thermal_Sharpening_%s_%s_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, n120_surface_temp, temp_surface_250_fileName_beforeTS, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('-------------------- Downscale VIIRS --------------------')
print('---------------------------------------------------------')
################################ Thermal Sharpening #####################################################
# Upscale VIIRS and PROBA-V to 400m
pixel_spacing_upscale = 1000
dest_1000, ulx_dem_1000, lry_dem_1000, lrx_dem_1000, uly_dem_1000, epsg_to = reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
DEM_1000 = dest_1000.GetRasterBand(1).ReadAsArray()
Y_raster_size_1000 = dest_1000.RasterYSize
X_raster_size_1000 = dest_1000.RasterXSize
shape_1000=([X_raster_size_1000, Y_raster_size_1000])
save_GeoTiff_proy(dest_1000, DEM_1000, proyDEM_fileName_1000, shape_1000, nband=1)
# Upscale thermal band VIIRS from 100m to 400m
MODIS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
temp_surface_250_fileName_beforeTS, proyDEM_fileName_1000)
data_Temp_Surf_1000 = MODIS_Upscale.GetRasterBand(1).ReadAsArray()
# Upscale PROBA-V NDVI from 100m to 400m
NDVI_MODIS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = reproject_dataset_example(
ndvi_fileName, proyDEM_fileName_1000)
data_NDVI_1000 = NDVI_MODIS_Upscale.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=9
# Apply the surface temperature sharpening
temp_surface_sharpened = Thermal_Sharpening(data_Temp_Surf_1000, data_NDVI_1000, NDVI, Box, NDVI_MODIS_Upscale, output_folder, proyDEM_fileName, shape_lsc, lsc, surf_temp_fileName)
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = CalculateSnowWaterMask(NDVI,shape_lsc,water_mask,temp_surface_sharpened)
# Replace water values
temp_surface_sharpened[water_mask==1] = n120_surface_temp[water_mask == 1]
temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened),n120_surface_temp,temp_surface_sharpened)
surf_temp_fileName = os.path.join(output_folder, 'Output_vegetation','%s_%s_surface_temp_sharpened_%s_%s_%s_%s_%s.tif' %(sensor1, sensor2, res2, year, str(mon).zfill(2), str(day).zfill(2), str(DOY).zfill(3)))
save_GeoTiff_proy(lsc, temp_surface_sharpened, surf_temp_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Snow_Mask_PROBAV, snow_mask_fileName, shape_lsc, nband=1)
######################################## End Thermal Sharpening ################################################3
# Check if Quality dataset is defined. If so use this one instead of the PROBAV-VIIRS one
# Check Quality
try:
if (ws['F%d' % number].value) is not None:
# Output folder QC defined by the user
#QC_fileName = os.path.join(output_folder, 'Output_cloud_masked', 'User_quality_mask_%s_%s_%s.tif.tif' %(res2, year, DOY))
# Reproject and reshape users NDVI
QC_Map = Reshape_Reproject_Input_data(r'%s' %str(ws['F%d' % number].value),QC_fileName, proyDEM_fileName)
# Save the QC map as tif file
save_GeoTiff_proy(lsc, QC_Map, QC_fileName, shape_lsc, nband=1)
# if the users NDVI data cannot be reprojected than use the original PROBA-V data as imported into SEBAL
else:
# Define users QC output name
#QC_tot_fileName = os.path.join(output_folder, 'Output_cloud_masked', '%s_quality_mask_%s_%s_%s.tif' %(sensor1, res2, year, DOY))
# Save the QC map as tif file
save_GeoTiff_proy(lsc, QC_Map, QC_tot_fileName, shape_lsc, nband=1)
except:
assert "Please check the VIIRS path, was not able to create VIIRS QC map"
print('---------------------------------------------------------')
print('-------------------- Collect Meteo Data -----------------')
print('---------------------------------------------------------')
# Correct vegetation parameters
NDVI=np.where(QC_Map==1,np.nan,NDVI)
Surf_albedo=np.where(QC_Map==1,np.nan,Surf_albedo)
LAI=np.where(QC_Map==1,np.nan,LAI)
vegt_cover=np.where(QC_Map==1,np.nan,vegt_cover)
SAVI=np.where(QC_Map==1,np.nan,SAVI)
print('---------------------------------------------------------')
print('----------------------- Meteo ---------------------------')
print('---------------------------------------------------------')
# Precipitable water in the atmosphere (mm):
# W = 0.14 * eact_inst * Pair + 2.1 # <NAME> 1990
# Slope of satur vapour pressure curve at air temp (kPa / °C)
sl_es_24 = 4098 * esat_24 / np.power(Temp_24 + 237.3, 2)
# Daily 24 hr radiation - For flat terrain only !
ws_angle = np.arccos(-np.tan(phi)*tan(delta)) # Sunset hour angle ws
# Extraterrestrial daily radiation, Ra (W/m2):
Ra24_flat = (Gsc/np.pi * dr * (ws_angle * np.sin(phi[nrow//2, ncol//2]) * np.sin(delta) +
np.cos(phi[nrow//2, ncol//2]) * np.cos(delta) * np.sin(ws_angle)))
# calculate the daily radiation or daily transmissivity or daily surface radiation based on the method defined by the user
if Method_Radiation_24==1:
Transm_24 = Rs_24/Ra_mountain_24
if Method_Radiation_24==2:
Rs_24 = Ra_mountain_24 * Transm_24
# Solar radiation from extraterrestrial radiation
Rs_24_flat = Ra24_flat * Transm_24
print('Mean Daily Transmissivity = %0.3f (W/m2)' % np.nanmean(Transm_24))
print('Mean Daily incoming net Radiation = %0.3f (W/m2)' % np.nanmean(Rs_24))
print('Mean Daily incoming net Radiation Flat Terrain = %0.3f (W/m2)' % np.nanmean(Rs_24_flat))
# If method of instantaneous radiation 1 is used than calculate the Transmissivity
if Method_Radiation_inst==1:
Transm_corr=Rs_in_inst/Ra_inst
# If method of instantaneous radiation 2 is used than calculate the instantaneous incomming Radiation
if Method_Radiation_inst==2:
# calculate the transmissivity index for direct beam radiation
Transm_corr = Transm_inst + 2e-5 * DEM_resh
# Instantaneous incoming short wave radiation (W/m2):
Rs_in_inst = Ra_inst * Transm_corr
# Atmospheric emissivity, by Bastiaanssen (1995):
Transm_corr[Transm_corr<0.001]=0.1
Transm_corr[Transm_corr>1]=1
atmos_emis = 0.85 * np.power(-np.log(Transm_corr), 0.09)
# Instantaneous incoming longwave radiation:
lw_in_inst = atmos_emis * SB_const * np.power(Temp_inst + 273.15, 4)
print('Instantaneous longwave incoming radiation = %0.3f (W/m2)' % np.nanmean(lw_in_inst))
print('Atmospheric emissivity = %0.3f' % np.nanmean(atmos_emis))
# calculates the ground heat flux and the solar radiation
Rn_24,rn_inst,g_inst,Rnl_24_FAO =Calc_Meteo(Rs_24,eact_24,Temp_24,Surf_albedo,cos_zn,dr,tir_emis,temp_surface_sharpened,water_mask,NDVI,Transm_24,SB_const,lw_in_inst,Rs_in_inst)
print('Mean Daily Net Radiation (FAO) = %0.3f (W/m2)' % np.nanmean(Rnl_24_FAO))
print('Mean Daily Net Radiation = %0.3f (W/m2)' % np.nanmean(Rn_24))
print('Mean instantaneous Net Radiation = %0.3f (W/m2)' % np.nanmean(rn_inst))
print('Mean instantaneous Ground Heat Flux = %0.3f (W/m2)' % np.nanmean(g_inst))
# Save output maps
#save_GeoTiff_proy(lsc, Rn_24, Rn_24_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, rn_inst, rn_inst_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, g_inst, g_inst_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, Pair, Atmos_pressure_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, Psychro_c, Psychro_c_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('-------------------- Hot/Cold Pixels --------------------')
print('---------------------------------------------------------')
# Temperature at sea level corrected for elevation: ??
ts_dem,air_dens,Temp_corr=Correct_Surface_Temp(Temp_24,temp_surface_sharpened,Temp_lapse_rate,DEM_resh,Pair,dr,Transm_corr,cos_zn,Sun_elevation,deg2rad,QC_Map)
NDVI_land = np.where(NDVI <= 0, np.nan, NDVI)
NDVIhot_low = np.nanpercentile(NDVI_land, NDVIhot_low1)
NDVIhot_high = np.nanpercentile(NDVI_land, NDVIhot_high1)
tcoldmin = np.nanpercentile(ts_dem, tcoldmin1)
tcoldmax = np.nanpercentile(ts_dem, tcoldmax1)
print('hot_minpercentile= %0.3f' % NDVIhot_low1, ', hot_maxpercentile= %0.3f' % NDVIhot_high1, ', cold_minpercentile= %0.3f' % tcoldmin1,', cold_maxpercentile= %0.3f' % tcoldmax1)
print('NDVIhot_low= %0.3f ' % NDVIhot_low, ', NDVIhot_high= %0.3f' % NDVIhot_high, ', tcoldmin= %0.3f (Kelvin)' % tcoldmin,', tcoldmax= %0.3f (Kelvin)' % tcoldmax)
# Selection of hot and cold pixels
# Hot pixels
ts_dem_hot,hot_pixels = Calc_Hot_Pixels(ts_dem,QC_Map, water_mask,NDVI,NDVIhot_low,NDVIhot_high,Hot_Pixel_Constant)
# Cold pixels vegetation
ts_dem_cold_veg = Calc_Cold_Pixels_Veg(NDVI,NDVI_max,NDVI_std, QC_Map,ts_dem,Image_Type, Cold_Pixel_Constant)
# Cold pixels water
ts_dem_cold,cold_pixels,ts_dem_cold_mean = Calc_Cold_Pixels(ts_dem,tcoldmin,tcoldmax,water_mask,QC_Map,ts_dem_cold_veg,Cold_Pixel_Constant)
if np.isnan(ts_dem_cold) == True:
ts_dem_cold = Temp_inst
# Save files
save_GeoTiff_proy(lsc, Temp_corr, temp_corr_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, ts_dem, ts_dem_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, hot_pixels, hot_pixels_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, cold_pixels, cold_pixels_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('----------------- Sensible heat flux --------------------')
print('---------------------------------------------------------')
# Change the minimum windspeed to prevent high values in further calculations
if Wind_inst_kind_of_data == 0:
if Wind_inst<1.5:
Wind_inst=1.5
if Wind_24_kind_of_data == 0:
if Wind_24<1.5:
Wind_24=1.5
# calculate windspeed at the blending height and the friction velocity by using the Raupach model or NDVI
Surf_roughness,u_200,ustar_1,disp_height=Calc_Wind_Speed_Friction(h_obst,Wind_inst,zx,LAI,NDVI,Surf_albedo,water_mask,surf_roughness_equation_used)
#save_GeoTiff_proy(lsc, Surf_roughness, surf_rough_fileName, shape_lsc, nband=1)
# Computation of surface roughness for momentum transport
k_vk = 0.41 # V<NAME> constant
# Sensible heat 1 (Step 5)
# Corrected value for the aerodynamic resistance (eq 41 with psi2 = psi1):
rah1 = np.log(2.0/0.01) / (k_vk * ustar_1)
i=0
L, psi_m200_stable, psi, psi_m200,h_inst,dT, slope_dt, offset_dt = sensible_heat(
rah1, ustar_1, rn_inst, g_inst, ts_dem, ts_dem_hot, ts_dem_cold,
air_dens, temp_surface_sharpened, k_vk,QC_Map, hot_pixels, slope)
# do the calculation iteratively 10 times
for i in range(1,10):
L,psi,psi_m200,psi_m200_stable,h_inst,ustar_corr,rah_corr,dT, slope_dt, offset_dt = Iterate_Friction_Velocity(k_vk,u_200,Surf_roughness,g_inst,rn_inst, ts_dem, ts_dem_hot, ts_dem_cold,air_dens, temp_surface_sharpened,L,psi,psi_m200,psi_m200_stable,QC_Map, hot_pixels, slope)
# Save files
#save_GeoTiff_proy(lsc, h_inst, h_inst_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('-------------------- Evaporation ------------------------')
print('---------------------------------------------------------')
# calculate reference net radiation
Rn_ref, Refl_rad_water, rah_grass=Calc_Rn_Ref(shape_lsc,water_mask,Rn_24,Ra_mountain_24,Transm_24,Rnl_24_FAO,Wind_24)
# Calculate rah of PM for the ET act (dT after iteration) and ETpot (4 degrees)
rah_pm_act=((np.log((2.0-0.0)/(Surf_roughness*0.1))*np.log((2.0-0.0)/(Surf_roughness)))/(k_vk*1.5**2))*((1-5*(-9.82*dT*(2.0-0.0))/((273.15+Temp_inst)*1.5**2))**(-0.75))
rah_pm_act[rah_pm_act<25]=25
rah_pm_pot=((np.log((2.0-0.0)/(Surf_roughness*0.1))*np.log((2.0-0.0)/(Surf_roughness)))/(k_vk*1.5**2))*((1-5*(-9.82*4.0*(2.0-0.0))/((273.15+Temp_inst)*1.5**2))**(-0.75))
rah_pm_pot[rah_pm_pot<25]=25
# calculate reference potential evaporation.
ETpot_24,ETref_24,Lhv,rs_min=Calc_Ref_Pot_ET(LAI,temp_surface_sharpened,sl_es_24,Rn_ref,air_dens,esat_24,eact_24,rah_grass,Psychro_c,Rn_24,Refl_rad_water,rah_pm_pot,rl)
# Instantaneous evapotranspiration
LE_inst = rn_inst - g_inst - h_inst
# Evaporative fraction
EF_inst=Calc_instantaneous_ET_fraction(LE_inst,rn_inst,g_inst)
# Daily Evaporation and advection factor
ETA_24, AF=Calc_ETact(esat_24,eact_24,EF_inst,Rn_24,Refl_rad_water,Lhv)
# Bulk surface resistance (s/m):
bulk_surf_resis_24=Calc_Bulk_surface_resistance(sl_es_24,Rn_24,Refl_rad_water,air_dens,esat_24,eact_24,rah_pm_act,ETA_24,Lhv,Psychro_c)
# crop factor
kc = ETA_24 / ETref_24 # Crop factor
ETP_24 = np.where(ETpot_24 < ETA_24, ETA_24, ETpot_24)
ET_24_deficit = ETP_24 - ETA_24
kc_max = ETP_24 / ETref_24
# Save files
#save_GeoTiff_proy(lsc, rs_min, min_bulk_surf_res_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, EF_inst, EF_inst_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, LE_inst, LE_inst_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, ETref_24, ETref_24_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, ETA_24, ETA_24_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, ETP_24, ETP_24_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, ET_24_deficit, ET_24_deficit_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, AF, AF_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, kc, kc_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, kc_max, kc_max_fileName, shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, bulk_surf_resis_24, bulk_surf_res_fileName, shape_lsc, nband=1)
print('---------------------------------------------------------')
print('-------------------- Soil Moisture ----------------------')
print('---------------------------------------------------------')
# Calculate soil properties
#SM_stress_trigger, total_soil_moisture, RZ_SM,moisture_stress_biomass,irrigation_needs,top_soil_moisture=Calc_Soil_Moisture(ETA_24,accum_prec_14d,accum_ETo_14d,EF_inst,water_mask,vegt_cover,Theta_sat,Theta_res)
SM_stress_trigger, total_soil_moisture, root_zone_moisture_first, moisture_stress_biomass_first,top_soil_moisture,RZ_SM_NAN = Calc_Soil_Moisture(ETA_24,EF_inst,QC_Map,water_mask,vegt_cover,Theta_sat_top,Theta_sat_sub, Theta_res_top,Theta_res_sub, depl_factor,Field_Capacity,FPAR, Soil_moisture_wilting_point)
# seperation of E and T
Eact_24,Tpot_24,Tact_24,moisture_stress_biomass,T24_deficit,beneficial_fraction,root_zone_moisture_final,top_zone_moisture_final=Separate_E_T(Light_use_extinction_factor,LAI,ETP_24,Theta_res_top, Theta_res_sub,Theta_sat_top,Theta_sat_sub,top_soil_moisture,sl_es_24, Psychro_c,moisture_stress_biomass_first,vegt_cover,ETA_24,SM_stress_trigger,root_zone_moisture_first,total_soil_moisture)
# Irrigation:
irrigation_needs = Classify_Irrigation(moisture_stress_biomass, vegt_cover)
# Save files
save_GeoTiff_proy(lsc, Tact_24, Tact24_fileName,shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Eact_24, Eact24_fileName,shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Tpot_24, Tpot24_fileName,shape_lsc, nband=1)
save_GeoTiff_proy(lsc, T24_deficit, T24_deficit_fileName,shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, total_soil_moisture, total_soil_moisture_fileName,shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, top_zone_moisture_final, top_soil_moisture_fileName,shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, root_zone_moisture_final, RZ_SM_fileName, shape_lsc,nband=1)
#save_GeoTiff_proy(lsc, SM_stress_trigger, SM_stress_trigger_fileName,shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, moisture_stress_biomass, moisture_stress_biomass_fileName,shape_lsc, nband=1)
#save_GeoTiff_proy(lsc, irrigation_needs, irrigation_needs_fileName,shape_lsc, nband=1)
print('---------------------------------------------------------')
print('---------------------- Biomass --------------------------')
print('---------------------------------------------------------')
# calculate biomass production
LUE,Biomass_prod,Biomass_wp,Biomass_deficit = Calc_Biomass_production(LAI,ETP_24,moisture_stress_biomass,ETA_24,Ra_mountain_24,Transm_24,FPAR,esat_24,eact_24,Th,Kt,Tl,Temp_24,LUEmax)
#save_GeoTiff_proy(lsc, LUE, LUE_fileName,shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Biomass_prod, Biomass_prod_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Biomass_wp, Biomass_wp_fileName, shape_lsc, nband=1)
save_GeoTiff_proy(lsc, Biomass_deficit, Biomass_deficit_fileName,shape_lsc, nband=1)
lsc=None
print ('---------------------------------------------------------')
print ('------------Removing Intermediary files------------------')
print ('---------------------------------------------------------')
try:
shutil.rmtree(os.path.join(output_folder, 'Output_temporary'))
shutil.rmtree(os.path.join(output_folder, 'Output_radiation_balance'))
except OSError as e:
print ('Error: folder does not exist')
print ('...................................................................')
print ('............................DONE!..................................')
print ('...................................................................')
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# FUNCTIONS
#-------------------------------------------------------------------------
def Create_Buffer(Data_In):
'''
This function creates a 3D array which is used to apply the moving window
'''
Buffer_area = 2 # A block of 2 times Buffer_area + 1 will be 1 if there is the pixel in the middle is 1
Data_Out=np.empty((len(Data_In),len(Data_In[1])))
Data_Out[:,:] = Data_In
for ypixel in range(1,Buffer_area + 1):
Data_Out[:,0:-ypixel] += Data_In[:,ypixel:]
Data_Out[:,ypixel:] += Data_In[:,:-ypixel]
for xpixel in range(1,Buffer_area + 1):
Data_Out[0:-xpixel,ypixel:] += Data_In[xpixel:,:-ypixel]
Data_Out[xpixel:,ypixel:] += Data_In[:-xpixel,:-ypixel]
Data_Out[0:-xpixel,0:-ypixel] += Data_In[xpixel:,ypixel:]
Data_Out[xpixel:,0:-ypixel] += Data_In[:-xpixel,ypixel:]
if ypixel==1:
Data_Out[xpixel:,:] += Data_In[:-xpixel,:]
Data_Out[:,0:-xpixel] += Data_In[:,xpixel:]
Data_Out[Data_Out>0.1] = 1
Data_Out[Data_Out<=0.1] = 0
return(Data_Out)
def Calc_Biomass_production(LAI,ETP_24,moisture_stress_biomass,ETA_24,Ra_mountain_24,Transm_24,FPAR,esat_24,eact_24,Th,Kt,Tl,Temp_24,LUEmax):
"""
Function to calculate the biomass production and water productivity
"""
Ksolar = Ra_mountain_24 * Transm_24
# Incident Photosynthetically active radiation (PAR, MJ/m2) per time period
PAR = 0.48 * Ksolar
# Aborbed Photosynthetical Active Radiation (APAR) by the vegetation:
APAR = FPAR * PAR
vapor_stress = 0.88 - 0.183 * np.log(esat_24 - eact_24)
vapor_stress_biomass = vapor_stress.clip(0.0, 1.0)
Jarvis_coeff = (Th - Kt) / (Kt - Tl)
heat_stress_biomass = ((Temp_24 - Tl) * np.power(Th - Temp_24, Jarvis_coeff) / ((Kt - Tl) * np.power(Th - Kt, Jarvis_coeff)))
print('vapor stress biomass =', '%0.3f' % np.nanmean(vapor_stress_biomass))
print('heat stress biomass =', '%0.3f' % np.nanmean(heat_stress_biomass))
# Light use efficiency, reduced below its potential value by low
# temperature or water shortage:
LUE = (LUEmax * heat_stress_biomass * vapor_stress_biomass * moisture_stress_biomass)
# Dry matter production (kg/ha/d):
Biomass_prod = APAR * LUE * 0.864 # C3 vegetation
# Water productivity
Biomass_wp = Biomass_prod/ (ETA_24 * 10) # C3 vegetation
Biomass_wp[ETA_24 == 0.0] = 0.0
# Water deficit
Biomass_deficit = (Biomass_prod / moisture_stress_biomass -
Biomass_prod)
return(LUE,Biomass_prod,Biomass_wp,Biomass_deficit)
#------------------------------------------------------------------------------
def Classify_Irrigation(moisture_stress_biomass, vegt_cover):
'''
This function makes a classification with 4 categories which show the irrigation needs
'''
for_irrigation = np.copy(moisture_stress_biomass)
# make a discreed irrigation needs map with the following categories
# Irrigation needs:
# 0: No need for irrigation
# 1: Perhaps irrigate
# 2: Irrigate
# 3: Irrigate immediately
irrigation_needs = np.copy(for_irrigation)
irrigation_needs[np.where(irrigation_needs >= 1.0)] == 0.0
irrigation_needs[np.logical_and(irrigation_needs >= 0.9, irrigation_needs < 1.0)] = 1.0
irrigation_needs[np.where((irrigation_needs >= 0.8) & (irrigation_needs < 0.9))] = 2.0
irrigation_needs[np.where(irrigation_needs < 0.8)] = 3.0
irrigation_needs[vegt_cover <= 0.3] = 0.0
return(irrigation_needs)
#------------------------------------------------------------------------------
def Separate_E_T(Light_use_extinction_factor,LAI,ETP_24,Theta_res_top,Theta_res_sub, Theta_sat_top, Theta_sat_sub, top_soil_moisture,sl_es_24, Psychro_c,moisture_stress_biomass_first,vegt_cover,ETA_24,SM_stress_trigger,root_zone_moisture_first,total_soil_moisture):
'''
Separate the Evapotranspiration into evaporation and Transpiration
'''
# constants
Tpot_24_estimate=(1-np.exp(-Light_use_extinction_factor*LAI))*ETP_24
SE_top = (top_soil_moisture-Theta_res_top)/(Theta_sat_top-Theta_res_top)
Eact_24_estimate=np.minimum(1,1 / np.power(SE_top + 0.1,-2.0))*(ETP_24-Tpot_24_estimate)
#RS_soil = RS_soil_min * np.power(SE_top,-2.0)
#Eact_24_estimate=(sl_es_24+Psychro_c*(1+RS_soil_min/Rah_PM))/(sl_es_24+Psychro_c*(1+RS_soil/Rah_PM))*(ETP_24-Tpot_24_estimate)
n66_memory = moisture_stress_biomass_first * Tpot_24_estimate
# calulate the first estimation of actual daily tranpiration
Tact_24_estimate = np.copy(n66_memory)
Tact_24_estimate[n66_memory > 0.99*ETA_24]=ETA_24[n66_memory > 0.99*ETA_24]
Tact_24_estimate[vegt_cover == 0.0] = 0.0
# calculate the second estimation and end estimation of the actual daily tranpiration
Tact_24 = np.abs((Tact_24_estimate/(Tact_24_estimate + Eact_24_estimate))*ETA_24)
# calculate the actual daily potential transpiration
Tpot_24 = np.copy(Tpot_24_estimate)
Tpot_24[Tpot_24_estimate < Tact_24] = Tact_24[Tpot_24_estimate < Tact_24]
# calculate moisture stress biomass
moisture_stress_biomass = Tact_24 / Tpot_24
# Calculate root zone moisture final
Se_Poly=2.23*np.power(moisture_stress_biomass,3)-3.35*np.power(moisture_stress_biomass,2)+1.98*moisture_stress_biomass+0.07
root_zone_moisture1=Se_Poly*(SM_stress_trigger+0.02-Theta_res_sub)+Theta_res_sub
root_zone_moisture_final=np.where(root_zone_moisture1>root_zone_moisture_first,root_zone_moisture1,root_zone_moisture_first)
# Calculate top zone moisture final
top_zone_moisture1=(total_soil_moisture-root_zone_moisture_final*vegt_cover)/(1-vegt_cover)
top_zone_moisture_final=top_zone_moisture1.clip(Theta_res_top,Theta_sat_top)
# calculate the actual daily evaporation
Eact_24 = ETA_24 - Tact_24
# calculate the Transpiration deficit
T24_deficit = Tpot_24 - Tact_24
# calculate the beneficial fraction
beneficial_fraction=Tact_24 / ETA_24
beneficial_fraction[ETA_24 == 0.0] = 0.0
return(Eact_24,Tpot_24,Tact_24,moisture_stress_biomass,T24_deficit,beneficial_fraction,root_zone_moisture_final,top_zone_moisture_final)
#------------------------------------------------------------------------------
def Calc_Soil_Moisture(ETA_24,EF_inst,QC_Map, water_mask,vegt_cover,Theta_sat_top, Theta_sat_sub,Theta_res_top, Theta_res_sub,depl_factor,Field_Capacity,FPAR, Soil_moisture_wilting_point):
"""
Function to calculate soil characteristics
"""
# constants:
Veg_Cover_Threshold_RZ = 0.9 # Threshold vegetation cover for root zone moisture
# Average fraction of TAW that can be depleted from the root zone
# before stress:
p_factor = depl_factor + 0.04 * (5.0 - ETA_24) # page 163 of FAO 56
# The factor p differs from one crop to another. It normally varies from
# 0.30 for shallow rooted plants at high rates of ETc (> 8 mm d-1)
# to 0.70 for deep rooted plants at low rates of ETc (< 3 mm d-1)
# Critical value under which plants get stressed:
SM_stress_trigger = Field_Capacity - p_factor * (Field_Capacity - Soil_moisture_wilting_point)
EF_inst[EF_inst >= 1.0] = 0.999
# Total soil water content (cm3/cm3):
total_soil_moisture = Theta_sat_sub * np.exp((EF_inst - 1.0) / 0.421) # asce paper Scott et al. 2003
total_soil_moisture[np.logical_or(water_mask == 1.0,QC_Map == 1.0)] = 1.0 # In water and snow is 1
total_soil_moisture[QC_Map == 1.0] = np.nan # Where clouds no data
# Root zone soil moisture:
RZ_SM = np.copy(total_soil_moisture)
RZ_SM[vegt_cover <= Veg_Cover_Threshold_RZ] = np.nan
if np.isnan(np.nanmean(RZ_SM)) == True:
Veg_Cover_Threshold_RZ = np.nanpercentile(vegt_cover, 80)
RZ_SM = np.copy(total_soil_moisture)
RZ_SM[vegt_cover <= Veg_Cover_Threshold_RZ] = np.nan
print('No RZ_SM so the vegetation Threshold for RZ is adjusted from 0,9 to =', '%0.3f' % Veg_Cover_Threshold_RZ)
#RZ_SM = RZ_SM.clip(Theta_res, (0.85 * Theta_sat))
#RZ_SM[np.logical_or(water_mask == 1.0, water_mask == 2.0)] = 1.0
RZ_SM_NAN = np.copy(RZ_SM)
RZ_SM_NAN[RZ_SM==0] = np.nan
RZ_SM_min = np.nanmin(RZ_SM_NAN)
RZ_SM_max = np.nanmax(RZ_SM_NAN)
RZ_SM_mean = np.nanmean(RZ_SM_NAN)
print('Root Zone Soil moisture mean =', '%0.3f (cm3/cm3)' % RZ_SM_mean)
print('Root Zone Soil moisture min =', '%0.3f (cm3/cm3)' % RZ_SM_min)
print('Root Zone Soil moisture max =', '%0.3f (cm3/cm3)' % RZ_SM_max)
Max_moisture_RZ = vegt_cover * (RZ_SM_max - RZ_SM_mean) + RZ_SM_mean
# Soil moisture in the top (temporary)
top_soil_moisture_temp = np.copy(total_soil_moisture)
top_soil_moisture_temp[np.logical_or(vegt_cover <= 0.02, vegt_cover >= 0.1)] = 0
top_soil_moisture_temp[top_soil_moisture_temp == 0] = np.nan
top_soil_moisture_std = np.nanstd(top_soil_moisture_temp)
top_soil_moisture_mean = np.nanmean(top_soil_moisture_temp)
print('Top Soil moisture mean =', '%0.3f (cm3/cm3)' % top_soil_moisture_mean)
print('Top Soil moisture Standard Deviation', '%0.3f (cm3/cm3)' % top_soil_moisture_std)
# calculate root zone moisture
root_zone_moisture_temp = (total_soil_moisture - (top_soil_moisture_mean + top_soil_moisture_std) * (1-vegt_cover))/vegt_cover # total soil moisture = soil moisture no vegtatation *(1-vegt_cover)+soil moisture root zone * vegt_cover
try:
root_zone_moisture_temp[root_zone_moisture_temp <= Theta_res_sub] = Theta_res_sub[root_zone_moisture_temp <= Theta_res_sub]
except:
root_zone_moisture_temp[root_zone_moisture_temp <= Theta_res_sub] = Theta_res_sub
root_zone_moisture_temp[root_zone_moisture_temp >= Max_moisture_RZ] = Max_moisture_RZ[root_zone_moisture_temp >= Max_moisture_RZ]
root_zone_moisture_first = np.copy(root_zone_moisture_temp)
root_zone_moisture_first[np.logical_or(QC_Map ==1.0 ,np.logical_or(water_mask == 1.0, vegt_cover < 0.0))] = 0
# Normalized stress trigger:
norm_trigger = (root_zone_moisture_first - Soil_moisture_wilting_point)/ (SM_stress_trigger + 0.02 - Soil_moisture_wilting_point)
norm_trigger[norm_trigger > 1.0] = 1.0
# moisture stress biomass:
moisture_stress_biomass_first = norm_trigger - (np.sin(2 * np.pi * norm_trigger)) / (2 * np.pi)
moisture_stress_biomass_first=np.where(moisture_stress_biomass_first<0.5*FPAR,0.5*FPAR,moisture_stress_biomass_first)
moisture_stress_biomass_first[moisture_stress_biomass_first <= 0.0] = 0
moisture_stress_biomass_first[moisture_stress_biomass_first > 1.0] = 1.0
# Soil moisture in the top layer - Recalculated ??
top_soil_moisture = ((total_soil_moisture - root_zone_moisture_first * vegt_cover) / (1.0 - vegt_cover))
try:
top_soil_moisture[top_soil_moisture > Theta_sat_top] = Theta_sat_top [top_soil_moisture > Theta_sat_top]
except:
top_soil_moisture[top_soil_moisture > Theta_sat_top] = Theta_sat_top
top_soil_moisture[np.logical_or(water_mask == 1.0, QC_Map == 1.0)] = 1.0
return(SM_stress_trigger, total_soil_moisture, root_zone_moisture_first, moisture_stress_biomass_first,top_soil_moisture,RZ_SM_NAN)
#------------------------------------------------------------------------------
def Calc_Bulk_surface_resistance(sl_es_24,Rn_24,Refl_rad_water,air_dens,esat_24,eact_24,rah_pm_act,ETA_24,Lhv,Psychro_c):
"""
Function to calculate the bulk surface resistance
"""
# Bulk surface resistance (s/m):
bulk_surf_resis_24 = ((((sl_es_24 * (Rn_24 - Refl_rad_water) + air_dens *
1004 * (esat_24 - eact_24) / rah_pm_act) / (ETA_24 * Lhv / 86400) -
sl_es_24) / Psychro_c - 1.0) * rah_pm_act)
bulk_surf_resis_24[ETA_24 <= 0.0] = 100000.0
bulk_surf_resis_24 = bulk_surf_resis_24.clip(0.0, 100000.0)
return(bulk_surf_resis_24)
#------------------------------------------------------------------------------
def Calc_ETact(esat_24,eact_24,EF_inst,Rn_24,Refl_rad_water,Lhv):
"""
Function to calculate the daily evaporation
"""
# Advection factor ???
AF = 1 + 0.985 * (np.exp((esat_24 - eact_24) * 0.08) - 1.0) * EF_inst
# Daily evapotranspiration:
ETA_24 = EF_inst * AF * (Rn_24 - Refl_rad_water) / (Lhv * 1000) * 86400000
ETA_24=ETA_24.clip(0,15.0)
return(ETA_24, AF)
#------------------------------------------------------------------------------
def Calc_instantaneous_ET_fraction(LE_inst,rn_inst,g_inst):
"""
Function to calculate the evaporative fraction
"""
EF_inst = LE_inst / (rn_inst - g_inst) # Evaporative fraction
EF_inst = EF_inst.clip(0.0, 1.8)
EF_inst[LE_inst<0] = 0
return(EF_inst)
#------------------------------------------------------------------------------
def Calc_Ref_Pot_ET(LAI,Surface_temp,sl_es_24,Rn_ref,air_dens,esat_24,eact_24,rah_grass,Psychro_c,Rn_24,Refl_rad_water,rah_pm_pot,rl):
"""
Function to calculate the reference potential evapotransporation and potential evaporation
"""
# Effective leaf area index involved, see Allen et al. (2006):
LAI_eff = LAI / (0.3 * LAI + 1.2)
rs_min = rl / LAI_eff # Min (Bulk) surface resistance (s/m)
# Latent heat of vaporization (J/kg):
# Lhv = (2.501 - 2.361e-3 * (Surface_temp - 273.15)) * 1E6
Lhv = 2.45 * 1E6
# Reference evapotranspiration- grass
# Penman-Monteith of the combination equation (eq 3 FAO 56) (J/s/m2)
LET_ref_24 = ((sl_es_24 * Rn_ref + air_dens * 1004 * (esat_24 - eact_24) /
rah_grass) / (sl_es_24 + Psychro_c * (1 + 70.0/rah_grass)))
# Reference evaportranspiration (mm/d):
ETref_24 = LET_ref_24 / (Lhv * 1000) * 86400000
# Potential evapotranspiration
# Penman-Monteith of the combination equation (eq 3 FAO 56) (J/s/m2)
LETpot_24 = ((sl_es_24 * (Rn_24 - Refl_rad_water) + air_dens * 1004 *
(esat_24 - eact_24)/rah_pm_pot) / (sl_es_24 + Psychro_c * (1 + rs_min/rah_pm_pot)))
# Potential evaportranspiration (mm/d)
ETpot_24 = LETpot_24 / (Lhv * 1000) * 86400000
ETpot_24[ETpot_24 > 15.0] = 15.0
return(ETpot_24,ETref_24,Lhv,rs_min)
#------------------------------------------------------------------------------
def Calc_Rn_Ref(shape_lsc,water_mask,Rn_24,Ra_mountain_24,Transm_24,Rnl_24_FAO,Wind_24):
"""
Function to calculate the net solar radiation
"""
# constants:
G24_water = 0.1 # G24 ratio for water - reflectivity?
# Reflected radiation at water surface: ??
Refl_rad_water = np.zeros((shape_lsc[1], shape_lsc[0]))
Refl_rad_water = np.where(water_mask != 0.0, G24_water * Rn_24, 0.0)
# Aerodynamic resistance (s/m) for grass surface:
rah_grass = 208.0 / Wind_24
print('rah_grass=', '%0.3f (s/m)' % np.nanmean(rah_grass))
# Net radiation for grass Rn_ref, eq 40, FAO56:
Rn_ref = Ra_mountain_24 * Transm_24 * (1 - 0.23) - Rnl_24_FAO # Rnl avg(fao-slob)?
return(Rn_ref, Refl_rad_water,rah_grass)
#------------------------------------------------------------------------------
def Iterate_Friction_Velocity(k_vk,u_200,Surf_roughness,g_inst,rn_inst, ts_dem, ts_dem_hot, ts_dem_cold,air_dens, Surface_temp,L,psi,psi_m200,psi_m200_stable,QC_Map, hot_pixels, slope):
"""
Function to correct the windspeed and aerodynamic resistance for the iterative process the output can be used as the new input for this model
"""
# Sensible heat 2 (Step 6)
# Corrected value for the friction velocity, unstable
ustar_corr_unstable = (k_vk * u_200 / (np.log(200.0 / Surf_roughness) -
psi_m200))
# Corrected value for the friction velocity, stable
ustar_corr_stable = (k_vk * u_200 / (np.log(200.0 / Surf_roughness) -
psi_m200_stable))
ustar_corr = np.where(L > 0.0, ustar_corr_stable, ustar_corr_unstable)
ustar_corr[ustar_corr < 0.02] = 0.02
rah_corr_unstable = (np.log(2.0/0.01) - psi) / (k_vk * ustar_corr) # unstable
rah_corr_stable = (np.log(2.0/0.01) - 0.0) / (k_vk * ustar_corr) # stable
rah_corr = np.where(L > 0.0, rah_corr_stable, rah_corr_unstable)
L_corr, psi_m200_corr_stable, psi_corr, psi_m200_corr,h,dT, slope_dt, offset_dt = sensible_heat(
rah_corr, ustar_corr, rn_inst, g_inst, ts_dem, ts_dem_hot, ts_dem_cold,
air_dens, Surface_temp, k_vk,QC_Map, hot_pixels, slope)
return(L_corr,psi_corr,psi_m200_corr,psi_m200_corr_stable,h,ustar_corr,rah_corr,dT,slope_dt, offset_dt)
#------------------------------------------------------------------------------
def Calc_Wind_Speed_Friction(h_obst,Wind_inst,zx,LAI,NDVI,Surf_albedo,water_mask,surf_roughness_equation_used):
"""
Function to calculate the windspeed and friction by using the Raupach or NDVI model
"""
# constants
k_vk = 0.41 # <NAME> constant
h_grass = 0.12 # Grass height (m)
cd = 53 # Free parameter for displacement height, default = 20.6
# 1) Raupach model
zom_Raupach,disp_height=Raupach_Model(h_obst,cd,LAI,Surf_albedo,water_mask,k_vk)
# 2) NDVI model
zom_NDVI=NDVI_Model(NDVI,Surf_albedo,water_mask)
if surf_roughness_equation_used == 1:
Surf_roughness = zom_NDVI
else:
Surf_roughness = zom_Raupach
zom_grass = 0.123 * h_grass
# Friction velocity for grass (m/s):
ustar_grass = k_vk * Wind_inst / np.log(zx / zom_grass)
print('u*_grass = ', '%0.3f (m/s)' % np.mean(ustar_grass))
# Wind speed (m/s) at the "blending height" (200m):
u_200 = ustar_grass * np.log(200 / zom_grass) / k_vk
print('Wind speed at the blending height, u200 =', '%0.3f (m/s)' % np.mean(u_200))
# Friction velocity (m/s):
ustar_1 = k_vk * u_200 / np.log(200 / Surf_roughness)
return(Surf_roughness,u_200,ustar_1,disp_height)
#------------------------------------------------------------------------------
def Raupach_Model(h_obst,cd,LAI,Surf_albedo,water_mask,k_vk):
"""
Function for the Raupach model to calculate the surface roughness
"""
psi = np.log(2.0) - 1 + np.power(2.0, -1) # Vegetation influence function
# Displacement height:
disp_height = h_obst * (1 - (1 - np.exp(-np.power(cd * LAI, 0.5))) /
np.power(cd * LAI, 0.5))
# Drag coefficient
Cs = np.power(k_vk, 2) / np.power(np.log((h_obst-disp_height) / 0.01) +
psi, 2)
ratio = np.power(Cs + 0.35 * LAI/2, 0.5) # uh/u
zom_Raupach = (h_obst - disp_height) / np.exp(k_vk * ratio - psi)
return(zom_Raupach, disp_height)
#------------------------------------------------------------------------------
def NDVI_Model(NDVI,Surf_albedo,water_mask):
"""
Function for the NDVI model to calculate the surface roughness
"""
zom_NDVI = np.exp(1.096 * NDVI / Surf_albedo - 5.307)
zom_NDVI[water_mask == 1.0] = 0.001
zom_NDVI[zom_NDVI > 10.0] = 10.0
return(zom_NDVI)
#------------------------------------------------------------------------------
def Correct_Surface_Temp(Temp_24,Surface_temp,Temp_lapse_rate,DEM_resh,Pair,dr,Transm_corr,cos_zn,Sun_elevation,deg2rad,ClipLandsat):
"""
Function to correct the surface temperature based on the DEM map
"""
#constants:
Gsc = 1367 # Solar constant (W / m2)
cos_zenith_flat = np.cos((90 - Sun_elevation) * deg2rad)
Temp_corr = Surface_temp + Temp_lapse_rate * DEM_resh # rescale everything to sea level
Temp_corr[Surface_temp == 350.0] = 0.0
#air_dens = 1000 * Pair / (1.01 * Surface_temp * 287)
air_dens = 1000 * Pair / (1.01 * (Temp_24 + 273.15) * 287)
#
ts_dem = (Temp_corr + (Gsc * dr * Transm_corr * cos_zn -
Gsc * dr * Transm_corr * cos_zenith_flat) / (air_dens * 1004 * 0.050))
#(Temp_corr - (Gsc * dr * Transm_corr * cos_zn -
# Gsc * dr * Transm_corr * cos_zenith_flat) / (air_dens * 1004 * 0.050))
ts_dem[ClipLandsat==1]=np.nan
ts_dem[ts_dem==0]=np.nan
ts_dem=ts_dem.clip(273,350)
return(ts_dem,air_dens,Temp_corr)
#------------------------------------------------------------------------------
def Calc_Hot_Pixels(ts_dem,QC_Map, water_mask, NDVI,NDVIhot_low,NDVIhot_high,Hot_Pixel_Constant):
"""
Function to calculates the hot pixels based on the surface temperature and NDVI
"""
for_hot = np.copy(ts_dem)
for_hot[NDVI <= NDVIhot_low] = np.nan
for_hot[NDVI >= NDVIhot_high] = np.nan
for_hot_thres = np.nanpercentile(for_hot, 99.5)
for_hot[np.logical_or(water_mask != 0.0, QC_Map != 0.0)] = 0.0
hot_pixels = np.copy(for_hot)
hot_pixels[for_hot < 273.0] = np.nan
hot_pixels[for_hot < for_hot_thres] = np.nan
ts_dem_hot_min = np.nanmin(hot_pixels) # Max
ts_dem_hot_max = np.nanmax(hot_pixels) # Max
ts_dem_hot_mean = np.nanmean(hot_pixels) # Mean
ts_dem_hot_std = np.nanstd(hot_pixels) # Standard deviation
#ts_dem_hot = ts_dem_hot_max - 0.25 * ts_dem_hot_std
#ts_dem_hot = (ts_dem_hot_max + ts_dem_hot_mean)/2
ts_dem_hot=ts_dem_hot_mean + Hot_Pixel_Constant * ts_dem_hot_std
print('hot : min= %0.3f (Kelvin)' % ts_dem_hot_min, 'hot : max= %0.3f (Kelvin)' % ts_dem_hot_max, ', sd= %0.3f (Kelvin)' % ts_dem_hot_std, \
', mean= %0.3f (Kelvin)' % ts_dem_hot_mean, ', value= %0.3f (Kelvin)' % ts_dem_hot)
return(ts_dem_hot,hot_pixels)
"""
#------------------------------------------------------------------------------
def Calc_Hot_Pixels(ts_dem,QC_Map, water_mask, NDVI,NDVIhot_low,NDVIhot_high,Hot_Pixel_Constant):
#Function to calculates the hot pixels based on the surface temperature and NDVI
for_hot = np.copy(ts_dem)
for_hot[NDVI <= NDVIhot_low] = np.nan
for_hot[NDVI >= NDVIhot_high] = np.nan
for_hot[np.logical_or(water_mask != 0.0, QC_Map != 0.0)] = 0.0
hot_pixels = np.copy(for_hot)
hot_pixels[for_hot < 273.0] = np.nan
ts_dem_hot_max = np.nanmax(hot_pixels) # Max
ts_dem_hot_mean = np.nanmean(hot_pixels) # Mean
ts_dem_hot_std = np.nanstd(hot_pixels) # Standard deviation
#ts_dem_hot = ts_dem_hot_max - 0.25 * ts_dem_hot_std
#ts_dem_hot = (ts_dem_hot_max + ts_dem_hot_mean)/2
ts_dem_hot=ts_dem_hot_mean + Hot_Pixel_Constant * ts_dem_hot_std
print('hot : max= %0.3f (Kelvin)' % ts_dem_hot_max, ', sd= %0.3f (Kelvin)' % ts_dem_hot_std, \
', mean= %0.3f (Kelvin)' % ts_dem_hot_mean, ', value= %0.3f (Kelvin)' % ts_dem_hot)
return(ts_dem_hot,hot_pixels)
#------------------------------------------------------------------------------
def Calc_Hot_Pixels(ts_dem,QC_Map, water_mask, NDVI,NDVIhot_low,NDVIhot_high,Hot_Pixel_Constant):
#Function to calculates the hot pixels based on the surface temperature and NDVI
for_hot = np.copy(ts_dem)
for_hot[for_hot <= NDVIhot_low] = np.nan
for_hot[for_hot >= NDVIhot_high] = np.nan
for_hot[np.logical_or(water_mask != 0.0, QC_Map != 0.0)] = 0.0
hot_pixels = np.copy(for_hot)
hot_pixels[for_hot < 273.0] = np.nan
ts_dem_hot_max = np.nanmax(hot_pixels) # Max
ts_dem_hot_mean = np.nanmean(hot_pixels) # Mean
ts_dem_hot_std = np.nanstd(hot_pixels) # Standard deviation
#ts_dem_hot = ts_dem_hot_max - 0.25 * ts_dem_hot_std
#ts_dem_hot = (ts_dem_hot_max + ts_dem_hot_mean)/2
ts_dem_hot=ts_dem_hot_mean + Hot_Pixel_Constant * ts_dem_hot_std
print('hot : max= %0.3f (Kelvin)' % ts_dem_hot_max, ', sd= %0.3f (Kelvin)' % ts_dem_hot_std, \
', mean= %0.3f (Kelvin)' % ts_dem_hot_mean, ', value= %0.3f (Kelvin)' % ts_dem_hot)
return(ts_dem_hot,hot_pixels)
"""
#------------------------------------------------------------------------------
def Calc_Cold_Pixels(ts_dem,tcoldmin,tcoldmax,water_mask,QC_Map,ts_dem_cold_veg,Cold_Pixel_Constant):
"""
Function to calculates the the cold pixels based on the surface temperature
"""
for_cold = np.copy(ts_dem)
for_cold[water_mask != 1.0] = 0.0
for_cold[QC_Map != 0] = 0.0
cold_pixels = np.copy(for_cold)
cold_pixels[for_cold < tcoldmin] = np.nan
cold_pixels[for_cold > tcoldmax] = np.nan
# cold_pixels[for_cold < 285.0] = 285.0
ts_dem_cold_std = np.nanstd(cold_pixels) # Standard deviation
ts_dem_cold_min = np.nanmin(cold_pixels) # Min
ts_dem_cold_max = np.nanmax(cold_pixels)
ts_dem_cold_mean = np.nanmean(cold_pixels) # Mean
# If average temperature is below zero or nan than use the vegetation cold pixel
if ts_dem_cold_mean <= 0.0:
ts_dem_cold = ts_dem_cold_veg
if np.isnan(ts_dem_cold_mean) == True:
ts_dem_cold = ts_dem_cold_veg
else:
ts_dem_cold = ts_dem_cold_mean
# ts_dem_cold = ts_dem_cold_mean + Cold_Pixel_Constant * ts_dem_cold_std
if ts_dem_cold > ts_dem_cold_veg:
ts_dem_cold = ts_dem_cold_veg
print('cold water: min=%0.3f (Kelvin)' %ts_dem_cold_min , 'max=%0.3f (Kelvin)' %ts_dem_cold_max, ', sd= %0.3f (Kelvin)' % ts_dem_cold_std, \
', mean= %0.3f (Kelvin)' % ts_dem_cold_mean, ', value= %0.3f (Kelvin)' % ts_dem_cold)
return(ts_dem_cold,cold_pixels,ts_dem_cold_mean)
#------------------------------------------------------------------------------
def Calc_Cold_Pixels_Veg(NDVI,NDVI_max,NDVI_std,QC_Map,ts_dem,Image_Type, Cold_Pixel_Constant):
"""
Function to calculates the the cold pixels based on vegetation
"""
cold_pixels_vegetation = np.copy(ts_dem)
cold_pixels_vegetation[np.logical_or(NDVI <= (NDVI_max-0.1*NDVI_std),QC_Map != 0.0)] = 0.0 #(ORI = 0.1*NDVI_std)
cold_pixels_vegetation[cold_pixels_vegetation==0.0] = np.nan
ts_dem_cold_std_veg = np.nanstd(cold_pixels_vegetation)
ts_dem_cold_min_veg = np.nanmin(cold_pixels_vegetation)
ts_dem_cold_max_veg = np.nanmax(cold_pixels_vegetation)
ts_dem_cold_mean_veg = np.nanmean(cold_pixels_vegetation)
if Image_Type == 1:
ts_dem_cold_veg = ts_dem_cold_mean_veg + Cold_Pixel_Constant * ts_dem_cold_std_veg
if Image_Type == 2:
ts_dem_cold_veg = ts_dem_cold_mean_veg + Cold_Pixel_Constant * ts_dem_cold_std_veg
if Image_Type == 3:
ts_dem_cold_veg = ts_dem_cold_mean_veg + Cold_Pixel_Constant * ts_dem_cold_std_veg
print('cold vegetation: min=%0.3f (Kelvin)' %ts_dem_cold_min_veg , 'max=%0.3f (Kelvin)' %ts_dem_cold_max_veg, ',sd= %0.3f (Kelvin)' % ts_dem_cold_std_veg, \
', mean= %0.3f (Kelvin)' % ts_dem_cold_mean_veg, ', value= %0.3f (Kelvin)' % ts_dem_cold_veg)
return(ts_dem_cold_veg)
#------------------------------------------------------------------------------
def Calc_Meteo(Rs_24,eact_24,Temp_24,Surf_albedo,cos_zn,dr,tir_emis,Surface_temp,water_mask,NDVI,Transm_24,SB_const,lw_in_inst,Rs_in_inst):
"""
Calculates the instantaneous Ground heat flux and solar radiation.
"""
# Net shortwave radiation (W/m2):
Rns_24 = Rs_24 * (1 - Surf_albedo)
# Net outgoing longwave radiation (W/m2):
Rnl_24_FAO = (SB_const * np.power(Temp_24 + 273.15, 4) * (0.34-0.14 *
np.power(eact_24, 0.5)) * (1.35 * Transm_24 / 0.8 - 0.35))
Rnl_24_Slob = 110 * Transm_24
print('Mean Daily Net Radiation (Slob) = %0.3f (W/m2)' % np.nanmean(Rnl_24_Slob))
# Net 24 hrs radiation (W/m2):
Rn_24_FAO = Rns_24 - Rnl_24_FAO # FAO equation
Rn_24_Slob = Rns_24 - Rnl_24_Slob # Slob equation
Rn_24 = (Rn_24_FAO + Rn_24_Slob) / 2 # Average
# Instantaneous outgoing longwave radiation:
lw_out_inst = tir_emis * SB_const * np.power(Surface_temp, 4)
# Instantaneous net radiation
rn_inst = (Rs_in_inst * (1 - Surf_albedo) + lw_in_inst - lw_out_inst -
(1 - tir_emis) * lw_in_inst)
# Instantaneous Soil heat flux
g_inst = np.where(water_mask != 0.0, 0.4 * rn_inst,
((Surface_temp - 273.15) * (0.0038 + 0.0074 * Surf_albedo) *
(1 - 0.978 * np.power(NDVI, 4))) * rn_inst)
return(Rn_24,rn_inst,g_inst,Rnl_24_FAO)
#------------------------------------------------------------------------------
def Calc_surface_temp(Temp_inst,Landsat_nr,Lmax,Lmin,therm_data,b10_emissivity,k1_c,k2_c,eact_inst,shape_lsc,water_mask_temp,Bands_thermal,Rp,tau_sky,surf_temp_offset,Image_Type):
"""
Calculates the surface temperature and create a water mask
"""
# Spectral radiance for termal
if Landsat_nr == 8:
if Bands_thermal == 1:
k1 = k1_c[0]
k2 = k2_c[0]
L_lambda_b10 = (Lmax[-1] - Lmin[-1]) / (65535-1) * therm_data[:, :, 0] + Lmin[-1]
# Get Temperature
Temp_TOA = Get_Thermal(L_lambda_b10,Rp,Temp_inst,tau_sky,b10_emissivity,k1,k2)
elif Bands_thermal == 2:
L_lambda_b10 = (Lmax[-2] - Lmin[-2]) / (65535-1) * therm_data[:, :, 0] + Lmin[-2]
L_lambda_b11 = (Lmax[-1] - Lmin[-1]) / (65535-1) * therm_data[:, :, 1] + Lmin[-1]
# Brightness temperature
# From Band 10:
Temp_TOA_10 = (k2_c[0] / np.log(k1_c[0] / L_lambda_b10 + 1.0))
# From Band 11:
Temp_TOA_11 = (k2_c[1] / np.log(k1_c[1] / L_lambda_b11 + 1.0))
# Combined:
Temp_TOA = (Temp_TOA_10 + 1.378 * (Temp_TOA_10 - Temp_TOA_11) +
0.183 * np.power(Temp_TOA_10 - Temp_TOA_11, 2) - 0.268 +
(54.30 - 2.238 * eact_inst) * (1 - b10_emissivity))
elif Landsat_nr == 7:
k1=666.09
k2=1282.71
L_lambda_b6 = (Lmax[-1] - Lmin[-1]) / (256-1) * therm_data[:, :, 0] + Lmin[-1]
# Brightness temperature - From Band 6:
Temp_TOA = Get_Thermal(L_lambda_b6,Rp,Temp_inst,tau_sky,b10_emissivity,k1,k2)
elif Landsat_nr == 5:
k1=607.76
k2=1260.56
L_lambda_b6 = ((Lmax[-1] - Lmin[-1]) / (256-1) * therm_data[:, :, 0] +
Lmin[-1])
# Brightness temperature - From Band 6:
Temp_TOA = Get_Thermal(L_lambda_b6,Rp,Temp_inst,tau_sky,b10_emissivity,k1,k2)
# Surface temperature
Surface_temp = Temp_TOA
Surface_temp = Surface_temp.clip(230.0, 360.0)
# Cloud mask:
temp_water = np.zeros((shape_lsc[1], shape_lsc[0]))
temp_water = np.copy(Surface_temp)
temp_water[water_mask_temp == 0.0] = np.nan
temp_water_sd = np.nanstd(temp_water) # Standard deviation
temp_water_mean = np.nanmean(temp_water) # Mean
print('Mean water temperature = ', '%0.3f (Kelvin)' % temp_water_mean)
print('SD water temperature = ', '%0.3f (Kelvin)' % temp_water_sd)
cloud_mask = np.zeros((shape_lsc[1], shape_lsc[0]))
cloud_mask[Surface_temp < np.minimum((temp_water_mean - 1.0 * temp_water_sd -
surf_temp_offset),290)] = 1.0
return(Surface_temp,cloud_mask)
#------------------------------------------------------------------------------
def Get_Thermal(lambda_b10,Rp,Temp_inst,tau_sky,TIR_Emissivity,k1,k2):
# Narrow band downward thermal radiation from clear sky, rsky (W/m2/sr/µm)
rsky = (1.807E-10 * np.power(Temp_inst + 273.15, 4) * (1 - 0.26 *
np.exp(-7.77E-4 * np.power((-Temp_inst), -2))))
print('Rsky = ', '%0.3f (W/m2/sr/µm)' % np.nanmean(rsky))
# Corrected thermal radiance from the surface, Wukelikc et al. (1989):
correc_lambda_b10 = ((lambda_b10 - Rp) / tau_sky -
(1.0 - TIR_Emissivity) * rsky)
# Brightness temperature - From Band 10:
Temp_TOA = (k2 / np.log(TIR_Emissivity * k1 /
correc_lambda_b10 + 1.0))
return(Temp_TOA)
#------------------------------------------------------------------------------
def Calc_vegt_para(NDVI,SAVI,water_mask_temp,shape_lsc):
"""
Calculates the Fraction of PAR, Thermal infrared emissivity, Nitrogen, Vegetation Cover, LAI, b10_emissivity
"""
# Fraction of PAR absorbed by the vegetation canopy (FPAR):
FPAR = -0.161 + 1.257 * NDVI
FPAR[NDVI < 0.125] = 0.0
# Termal infrared emissivity
tir_emis = 1.009 + 0.047 * np.log(NDVI)
tir_emis[np.logical_or(water_mask_temp == 1.0, water_mask_temp == 2.0)] = 1.0
tir_emis[np.logical_and(NDVI < 0.125, water_mask_temp == 0.0)] = 0.92
# Vegetation Index - Regression model from Bagheri et al. (2013)
VI_NDVI = 38.764 * np.square(NDVI) - 24.605 * NDVI + 5.8103
VI_SAVI = 6.3707 * np.square(SAVI) - 2.8503 * SAVI + 1.6335
VI = (VI_NDVI + VI_SAVI) / 2.0 # Average of computed from NDVI and SAVI
# Nitrogen computation
Nitrogen = np.copy(VI)
Nitrogen[VI <= 0.0] = 0.0
Nitrogen[NDVI <= 0.0] = 0.0
# Vegetation cover:
vegt_cover = 1 - np.power((0.8 - NDVI)/(0.8 - 0.125), 0.7)
vegt_cover[NDVI < 0.125] = 0.0
vegt_cover[NDVI > 0.8] = 0.99
# Leaf Area Index (LAI)
LAI_1 = np.log(-(vegt_cover - 1)) / -0.45
LAI_1[LAI_1 > 8] = 8.0
LAI_2 = (9.519 * np.power(NDVI, 3) + 0.104 * np.power(NDVI, 2) +
1.236 * NDVI - 0.257)
LAI_3 = 11.0 * np.power(SAVI, 3)
LAI_3[SAVI >= 0.817] = 6.0
LAI_4 = -np.log((0.69 - SAVI) / 0.59) / 0.91 # For South. Idaho, empirical
LAI_4[SAVI < 0.0] = 0.0
LAI_4[SAVI >= 0.689] = 6.0
LAI = (LAI_1 + LAI_2 + LAI_3 + LAI_4) / 4.0 # Average LAI
LAI[LAI < 0.001] = 0.001
b10_emissivity = np.zeros((shape_lsc[1], shape_lsc[0]))
b10_emissivity = np.where(LAI <= 3.0, 0.95 + 0.01 * LAI, 0.98)
b10_emissivity[water_mask_temp != 0.0] = 1.0
return(FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity)
#------------------------------------------------------------------------------
def Water_Mask(shape_lsc,Reflect):
"""
Calculates the water and cloud mask
"""
mask = np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and(Reflect[:, :, 3] < Reflect[:, :, 2],
Reflect[:, :, 4] < Reflect[:, :, 1])] = 1.0
water_mask_temp = np.copy(mask)
return(water_mask_temp)
#------------------------------------------------------------------------------
def Calc_albedo(Reflect,path_radiance,Apparent_atmosf_transm):
"""
This function calculates and returns the Surface albedo, NDVI, and SAVI by using the refectance from the landsat image.
"""
# Surface albedo:
Surf_albedo = (0.254 * Reflect[:, :, 0] + 0.149 * Reflect[:, :, 1] +
0.147 * Reflect[:, :, 2] + 0.311 * Reflect[:, :, 3] +
0.103 * Reflect[:, :, 4] + 0.036 * Reflect[:, :, 5] -
path_radiance) / np.power(Apparent_atmosf_transm, 2)
# Better tsw instead of Apparent_atmosf_transm ??
Surf_albedo = Surf_albedo.clip(0.0, 0.6)
return(Surf_albedo)
#------------------------------------------------------------------------------
def Calc_NDVI(Reflect):
"""
This function calculates and returns the Surface albedo, NDVI, and SAVI by using the refectance from the landsat image.
"""
# Computation of Normalized Difference Vegetation Index (NDVI)
NDVI = ((Reflect[:, :, 3] - Reflect[:, :, 2]) /
(Reflect[:, :, 3] + Reflect[:, :, 2]))
return(NDVI)
#------------------------------------------------------------------------------
def Calc_SAVI(Reflect,L):
"""
This function calculates and returns the Surface albedo, NDVI, and SAVI by using the refectance from the landsat image.
"""
# Computation of Soil Adjusted Vegetation Index (SAVI)
SAVI = (1 + L) * ((Reflect[:, :, 3] - Reflect[:, :, 2]) /
(L + Reflect[:, :, 3] + Reflect[:, :, 2]))
return(SAVI)
#------------------------------------------------------------------------------
def CalculateSnowWaterMask(NDVI,shape_lsc,water_mask_temp,Surface_temp):
'''
Devides the temporaly water mask into a snow and water mask by using the surface temperature
'''
NDVI_nan=np.copy(NDVI)
NDVI_nan[NDVI==0]=np.nan
NDVI_nan=np.float32(NDVI_nan)
NDVI_std=np.nanstd(NDVI_nan)
NDVI_max=np.nanmax(NDVI_nan)
NDVI_treshold_cold_pixels=NDVI_max-0.1*NDVI_std
print('NDVI treshold for cold pixels = ', '%0.3f' % NDVI_treshold_cold_pixels)
ts_moist_veg_min=np.nanmin(Surface_temp[NDVI>NDVI_treshold_cold_pixels])
# calculate new water mask
mask=np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and(np.logical_and(water_mask_temp==1, Surface_temp <= 275),NDVI>=0.3)]=1
snow_mask=np.copy(mask)
# calculate new water mask
mask=np.zeros((shape_lsc[1], shape_lsc[0]))
mask[np.logical_and(water_mask_temp==1, Surface_temp > 273)]=1
water_mask=np.copy(mask)
return(snow_mask,water_mask,ts_moist_veg_min, NDVI_max, NDVI_std)
#------------------------------------------------------------------------------
def Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape_lsc,ClipLandsat,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn,dr,Landsat_nr, proyDEM_fileName):
"""
This function calculates and returns the reflectance and spectral radiation from the landsat image.
"""
Spec_Rad = np.zeros((shape_lsc[1], shape_lsc[0], 7))
Reflect = np.zeros((shape_lsc[1], shape_lsc[0], 7))
for band in Bands[:-(len(Bands)-6)]:
# Open original Landsat image for the band number
src_FileName = os.path.join(input_folder, '%s_B%1d.TIF'
% (Name_Landsat_Image, band))
ls_data=Open_landsat(src_FileName, proyDEM_fileName)
ls_data = ls_data*ClipLandsat
# stats = band_data.GetStatistics(0, 1)
index = np.where(Bands[:-(len(Bands)-6)] == band)[0][0]
if Landsat_nr == 8:
# Spectral radiance for each band:
L_lambda = Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L8, index, cos_zn, dr)
elif Landsat_nr == 7:
# Spectral radiance for each band:
L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda = Landsat_rho_lambda(L_lambda, ESUN_L7, index, cos_zn, dr)
elif Landsat_nr == 5:
# Spectral radiance for each band:
L_lambda=Landsat_L_lambda(Lmin, Lmax, ls_data, index, Landsat_nr)
# Reflectivity for each band:
rho_lambda =Landsat_rho_lambda(L_lambda, ESUN_L5, index, cos_zn, dr)
else:
print('Landsat image not supported, use Landsat 5, 7 or 8')
Spec_Rad[:, :, index] = L_lambda
Reflect[:, :, index] = rho_lambda
Reflect = Reflect.clip(0.0, 1.0)
return(Reflect,Spec_Rad)
#------------------------------------------------------------------------------
def Landsat_L_lambda(Lmin,Lmax,ls_data,index,Landsat_nr):
"""
Calculates the lambda from landsat
"""
if Landsat_nr==8:
L_lambda = ((Lmax[index] - Lmin[index]) / (65535 - 1) * ls_data + Lmin[index])
elif Landsat_nr == 5 or Landsat_nr ==7:
L_lambda = (Lmax[index] - Lmin[index]) / 255 * ls_data + Lmin[index]
return(L_lambda)
#------------------------------------------------------------------------------
def Landsat_rho_lambda(L_lambda,ESUN,index,cos_zn,dr):
"""
Calculates the lambda from landsat
"""
rho_lambda = np.pi * L_lambda / (ESUN[index] * cos_zn * dr)
return(rho_lambda)
#------------------------------------------------------------------------------
def Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder, shape_lsc,ClipLandsat, proyDEM_fileName):
"""
This function calculates and returns the thermal data from the landsat image.
"""
therm_data = np.zeros((shape_lsc[1], shape_lsc[0], len(Bands)-6))
for band in Bands[-(len(Bands)-6):]:
# Open original Landsat image for the band number
src_FileName = os.path.join(input_folder, '%s_B%1d.TIF'
% (Name_Landsat_Image, band))
if not os.path.exists(src_FileName):
src_FileName = os.path.join(input_folder, '%s_B%1d_VCID_2.TIF'
% (Name_Landsat_Image, band))
ls_data=Open_landsat(src_FileName, proyDEM_fileName)
ls_data = ls_data*ClipLandsat
index = np.where(Bands[:] == band)[0][0] - 6
therm_data[:, :, index] = ls_data
return(therm_data)
#------------------------------------------------------------------------------
def Open_landsat(src_FileName, proyDEM_fileName):
"""
This function opens a landsat image and returns the data array of a specific landsat band.
"""
# crop band to the DEM extent
ls, ulx, uly, lrx, lry, epsg_to = reproject_dataset_example(src_FileName, proyDEM_fileName)
# Open the cropped Landsat image for the band number
ls_data = ls.GetRasterBand(1).ReadAsArray()
return(ls_data)
#------------------------------------------------------------------------------
def Get_Extend_Landsat(src_FileName):
"""
This function gets the extend of the landsat image
"""
ls = gdal.Open(src_FileName) # Open Landsat image
print('Original LANDSAT Image - ')
geo_t_ls = ls.GetGeoTransform() # Get the Geotransform vector
x_size_ls = ls.RasterXSize # Raster xsize - Columns
y_size_ls = ls.RasterYSize # Raster ysize - Rows
print(' Size :', x_size_ls, y_size_ls)
(ulx, uly) = geo_t_ls[0], geo_t_ls[3]
(lrx, lry) = (geo_t_ls[0] + geo_t_ls[1] * x_size_ls,
geo_t_ls[3] + geo_t_ls[5] * y_size_ls)
band_data = ls.GetRasterBand(1)
return(ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls)
#------------------------------------------------------------------------------
def Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect):
"""
Calculates the extraterrestiral solar radiation by using the date, slope and aspect.
"""
# Constants
deg2rad = np.pi / 180.0 # Factor to transform from degree to rad
Min_cos_zn = 0.1 # Min value for cos zenith angle
Max_cos_zn = 1.0 # Max value for cos zenith angle
Gsc = 1367 # Solar constant (W / m2)
try:
Loc_time = float(hour) + float(minutes)/60 # Local time (hours)
except:
Loc_time = np.float_(hour) + np.float_(minutes)/60 # Local time (hours)
# Rounded difference of the local time from Greenwich (GMT) (hours):
offset_GTM = round(np.sign(lon[int(lon.shape[0])//2, int(lon.shape[1])//2]) * lon[int(lon.shape[0])//2,int(lon.shape[1])//2] * 24 // 360)
print(' Local time: ', '%0.3f' % np.nanmean(Loc_time))
print(' Difference of local time (LT) from Greenwich (GMT): ', offset_GTM)
# 1. Calculation of extraterrestrial solar radiation for slope and aspect
# Computation of Hour Angle (HRA = w)
B = 360./365 * (DOY-81) # (degrees)
# Computation of cos(theta), where theta is the solar incidence angle
# relative to the normal to the land surface
delta=np.arcsin(np.sin(23.45*deg2rad)*np.sin(np.deg2rad(B))) # Declination angle (radians)
phi = lat_proy * deg2rad # latitude of the pixel (radians)
s = slope * deg2rad # Surface slope (radians)
gamma = (aspect-180) * deg2rad # Surface aspect angle (radians)
w=w_time(Loc_time, lon_proy, DOY) # Hour angle (radians)
a,b,c = Constants(delta,s,gamma,phi)
cos_zn= AngleSlope(a,b,c,w)
cos_zn = cos_zn.clip(Min_cos_zn, Max_cos_zn)
print('Average Cos Zenith Angle: ', '%0.3f (Radians)' % np.nanmean(cos_zn))
dr = 1 + 0.033 * cos(DOY*2*pi/365) # Inverse relative distance Earth-Sun
# Instant. extraterrestrial solar radiation (W/m2), Allen et al.(2006):
Ra_inst = Gsc * cos_zn * dr
# 24-hours extraterrestrial radiation
# 1.) determine if there are one or two periods of sun
# 2.) calculate the 24-hours extraterrestrial radiation if there are two periods of sun
# 3.) calculate the 24-hours extraterrestrial radiation if there is one period of sun
#1.) determine amount of sun periods
Ra_24 = np.zeros(np.shape(lat_proy))*np.nan
constant=Gsc*dr/(2*np.pi)
TwoPeriod= TwoPeriods(delta,s,phi) # all input in radians
#2.) calculate the 24-hours extraterrestrial radiation (2 periods)
ID = np.where(np.ravel(TwoPeriod==True))
Ra_24.flat[ID]=TwoPeriodSun(constant,delta,s.flat[ID],gamma.flat[ID],phi.flat[ID])
#3.) calculate the 24-hours extraterrestrial radiation (1 period)
ID = np.where(np.ravel(TwoPeriod==False))
Ra_24.flat[ID]=OnePeriodSun(constant,delta,s.flat[ID],gamma.flat[ID],phi.flat[ID])
# Horizontal surface
ws = np.arccos(-np.tan(delta) * np.tan(phi)) # Sunrise/sunset time angle
# Extraterrestial radiation for a horizontal surface for 24-h period:
Ra_hor_24 = (Gsc * dr / np.pi * (np.sin(delta) * np.sin(phi) * ws + np.cos(delta) * np.cos(phi) * np.sin(ws)))
# cos_theta_flat = (np.sin(delta) * np.sin(phi) + np.cos(delta) * np.cos(phi) * np.cos(w))
# Mountain radiation
Ra_mountain_24 = np.where(Ra_24 > Min_cos_zn * Ra_hor_24, Ra_24 / np.cos(s),
Ra_hor_24)
Ra_mountain_24[Ra_mountain_24 > 600.0] = 600.0
return(Ra_mountain_24,Ra_inst,cos_zn,dr,phi,delta)
#------------------------------------------------------------------------------
def OnePeriodSun(constant,delta,s,gamma,phi):
'''
Based on <NAME> 2006
Calculate the 24-hours extraterrestrial radiation when there is one sun period
'''
sunrise,sunset = SunHours(delta,s,gamma,phi)
Vals=IntegrateSlope(constant,sunrise,sunset,delta,s,gamma,phi)
return(Vals)
#------------------------------------------------------------------------------
def TwoPeriodSun(constant,delta,s,gamma,phi):
'''
Based on <NAME> 2006
Calculate the 24-hours extraterrestrial radiation when there are two sun period
'''
A1, A2 = SunHours(delta,s,gamma,phi)
a,b,c = Constants(delta,s,gamma,phi)
riseSlope, setSlope = BoundsSlope(a,b,c)
B1 = np.maximum(riseSlope,setSlope)
B2 = np.minimum(riseSlope,setSlope)
Angle_B1 = AngleSlope(a,b,c,B1)
Angle_B2 = AngleSlope(a,b,c,B2)
B1[abs(Angle_B1) > 0.001] = np.pi - B1[abs(Angle_B1) > 0.001]
B2[abs(Angle_B2) > 0.001] = -np.pi - B2[abs(Angle_B2) > 0.001]
# Check if two periods really exist
ID = np.ravel_multi_index(np.where(np.logical_and(B2 >= A1, B1 >= A2) == True),a.shape)
Val = IntegrateSlope(constant,B2.flat[ID],B1.flat[ID],delta,s.flat[ID],gamma.flat[ID],phi.flat[ID])
ID = ID[Val < 0]
# Finally calculate resulting values
Vals = np.zeros(B1.shape)
Vals.flat[ID] = (IntegrateSlope(constant,A1.flat[ID],B2.flat[ID],delta,s.flat[ID],gamma.flat[ID],phi.flat[ID]) +
IntegrateSlope(constant,B1.flat[ID],A2.flat[ID],delta,s.flat[ID],gamma.flat[ID],phi.flat[ID]))
ID = np.ravel_multi_index(np.where(Vals == 0),a.shape)
Vals.flat[ID] = IntegrateSlope(constant,A1.flat[ID],A2.flat[ID],delta,s.flat[ID],gamma.flat[ID],phi.flat[ID])
return(Vals)
#------------------------------------------------------------------------------
def IntegrateSlope(constant,sunrise,sunset,delta,s,gamma,phi):
'''
Based on <NAME> 2006 equation 5
Calculate the 24 hours extraterrestrial radiation
'''
# correct the sunset and sunrise angels for days that have no sunset or no sunrise
SunOrNoSun = np.logical_or(((np.abs(delta + phi)) > (np.pi/2)),((np.abs(delta - phi)) > (np.pi/2)))
integral=np.zeros(s.shape)
ID = np.where(np.ravel(SunOrNoSun==True))
# No sunset
if abs(delta+phi.flat[ID])>(np.pi/2):
sunset1=np.pi
sunrise1=-np.pi
integral.flat[ID] = constant * (np.sin(delta)*np.sin(phi)*np.cos(s)*(sunset1-sunrise1)
- np.sin(delta)*np.cos(phi)*np.sin(s)*np.cos(gamma)*(sunset1-sunrise1)
+ np.cos(delta)*np.cos(phi)*np.cos(s)*(np.sin(sunset1)-np.sin(sunrise1))
+ np.cos(delta)*np.sin(phi)*np.sin(s)*np.cos(gamma)*(np.sin(sunset1)-np.sin(sunrise1))
- np.cos(delta)*np.sin(s)*np.sin(gamma)*(np.cos(sunset1)-np.cos(sunrise1)))
# No sunrise
elif np.abs(delta-phi.flat[ID])>(np.pi/2):
integral.flat[ID]=constant * (np.sin(delta)*np.sin(phi)*np.cos(s)*(0)
- np.sin(delta)*np.cos(phi)*np.sin(s)*np.cos(gamma)*(0)
+ np.cos(delta)*np.cos(phi)*np.cos(s)*(np.sin(0)-np.sin(0))
+ np.cos(delta)*np.sin(phi)*np.sin(s)*np.cos(gamma)*(np.sin(0)-np.sin(0))
- np.cos(delta)*np.sin(s)*np.sin(gamma)*(np.cos(0)-np.cos(0)))
ID = np.where(np.ravel(SunOrNoSun==False))
integral.flat[ID] = constant * (np.sin(delta)*np.sin(phi)*np.cos(s)*(sunset-sunrise)
- np.sin(delta)*np.cos(phi)*np.sin(s)*np.cos(gamma)*(sunset-sunrise)
+ np.cos(delta)*np.cos(phi)*np.cos(s)*(np.sin(sunset)-np.sin(sunrise))
+ np.cos(delta)*np.sin(phi)*np.sin(s)*np.cos(gamma)*(np.sin(sunset)-np.sin(sunrise))
- np.cos(delta)*np.sin(s)*np.sin(gamma)*(np.cos(sunset)-np.cos(sunrise)))
return(integral)
#------------------------------------------------------------------------------
def TwoPeriods(delta,s,phi):
'''
Based on <NAME> 2006
Create a boolean map with True values for places with two sunsets
'''
TwoPeriods = (np.sin(s) > np.ones(s.shape)*np.sin(phi)*np.sin(delta)+np.cos(phi)*np.cos(delta))
return(TwoPeriods)
#------------------------------------------------------------------------------
def SunHours(delta,slope,slopedir,lat):
# Define sun hours in case of one sunlight period
a,b,c = Constants(delta,slope,slopedir,lat)
riseSlope, setSlope = BoundsSlope(a,b,c)
bound = BoundsHorizontal(delta,lat)
Calculated = np.zeros(slope.shape, dtype = bool)
RiseFinal = np.zeros(slope.shape)
SetFinal = np.zeros(slope.shape)
# First check sunrise is not nan
# This means that their is either no sunrise (whole day night) or no sunset (whole day light)
# For whole day light, use the horizontal sunrise and whole day night a zero..
Angle4 = AngleSlope(a,b,c,-bound)
RiseFinal[np.logical_and(np.isnan(riseSlope),Angle4 >= 0)] = -bound[np.logical_and(np.isnan(riseSlope),Angle4 >= 0)]
Calculated[np.isnan(riseSlope)] = True
# Step 1 > 4
Angle1 = AngleSlope(a,b,c,riseSlope)
Angle2 = AngleSlope(a,b,c,-bound)
ID = np.ravel_multi_index(np.where(np.logical_and(np.logical_and(Angle2 < Angle1+0.001 ,Angle1 < 0.001),Calculated == False) == True),a.shape)
RiseFinal.flat[ID] = riseSlope.flat[ID]
Calculated.flat[ID] = True
# step 5 > 7
Angle3 = AngleSlope(a,b,c,-np.pi - riseSlope)
ID = np.ravel_multi_index(np.where(np.logical_and(np.logical_and(-bound<(-np.pi-riseSlope),Angle3 <= 0.001),Calculated == False) == True),a.shape)
RiseFinal.flat[ID] = -np.pi -riseSlope.flat[ID]
Calculated.flat[ID] = True
# For all other values we use the horizontal sunset if it is positive, otherwise keep a zero
RiseFinal[Calculated == False] = -bound[Calculated == False]
# Then check sunset is not nan or < 0
Calculated = np.zeros(slope.shape, dtype = bool)
Angle4 = AngleSlope(a,b,c,bound)
SetFinal[np.logical_and(np.isnan(setSlope),Angle4 >= 0)] = bound[np.logical_and(np.isnan(setSlope),Angle4 >= 0)]
Calculated[np.isnan(setSlope)] = True
# Step 1 > 4
Angle1 = AngleSlope(a,b,c,setSlope)
Angle2 = AngleSlope(a,b,c,bound)
ID = np.ravel_multi_index(np.where(np.logical_and(np.logical_and(Angle2 < Angle1+0.001,Angle1 < 0.001),Calculated == False) == True),a.shape)
SetFinal.flat[ID] = setSlope.flat[ID]
Calculated.flat[ID] = True
# step 5 > 7
Angle3 = AngleSlope(a,b,c,np.pi - setSlope)
ID = np.ravel_multi_index(np.where(np.logical_and(np.logical_and(bound>(np.pi-setSlope),Angle3 <= 0.001),Calculated == False) == True),a.shape)
SetFinal.flat[ID] = np.pi - setSlope.flat[ID]
Calculated.flat[ID] = True
# For all other values we use the horizontal sunset if it is positive, otherwise keep a zero
SetFinal[Calculated == False] = bound[Calculated == False]
# Angle4 = AngleSlope(a,b,c,bound)
# SetFinal[np.logical_and(Calculated == False,Angle4 >= 0)] = bound[np.logical_and(Calculated == False,Angle4 >= 0)]
# If Sunrise is after Sunset there is no sunlight during the day
SetFinal[SetFinal <= RiseFinal] = 0
RiseFinal[SetFinal <= RiseFinal] = 0
return(RiseFinal,SetFinal)
#------------------------------------------------------------------------------
def Constants(delta,s,gamma,phi):
'''
Based on <NAME> 2006 equation 11
determines constants for calculating the exterrestial solar radiation
'''
a = np.sin(delta)*np.cos(phi)*np.sin(s)*np.cos(gamma) - np.sin(delta)*np.sin(phi)*np.cos(s)
b = np.cos(delta)*np.cos(phi)*np.cos(s) + np.cos(delta)*np.sin(phi)*np.sin(s)*np.cos(gamma)
c = np.cos(delta)*np.sin(s)*np.sin(gamma)
return(a,b,c)
#------------------------------------------------------------------------------
def BoundsSlope(a,b,c):
'''
Based on <NAME> 2006 equation 13
This function calculates candidate values for sunrise and sunset hour angles
'''
Div = (b**2+c**2)
Div[Div <= 0] = 0.00001
sinB = (a*c + b*np.sqrt(b**2+c**2-a**2)) / Div
sinA = (a*c - b*np.sqrt(b**2+c**2-a**2)) / Div
sinB[sinB < -1] = -1; sinB[sinB > 1] = 1 # Limits see appendix A.2.i
sinA[sinA < -1] = -1; sinA[sinA > 1] = 1 # Limits see appendix A.2.i
sunrise = np.arcsin(sinA)
sunset = np.arcsin(sinB)
return(sunrise,sunset)
#------------------------------------------------------------------------------
def BoundsHorizontal(delta,phi):
''''
Based on <NAME> 2006
This function calculates sunrise hours based on earth inclination and latitude
If there is no sunset or sunrise hours the values are either set to 0 (polar night) or pi (polar day)
'''
bound = np.arccos(-np.tan(delta)*np.tan(phi))
bound[abs(delta+phi) > np.pi/2] = np.pi
bound[abs(delta-phi) > np.pi/2] = 0
return(bound)
#------------------------------------------------------------------------------
def AngleSlope(a,b,c,w):
'''
Based on <NAME> 2006
Calculate the cos zenith angle by using the hour angle and constants
'''
angle = -a + b*np.cos(w) + c*np.sin(w)
return(angle)
#------------------------------------------------------------------------------
def Calc_Gradient(dataset,pixel_spacing):
"""
This function calculates the slope and aspect of a DEM map.
"""
# constants
deg2rad = np.pi / 180.0 # Factor to transform from degree to rad
rad2deg = 180.0 / np.pi # Factor to transform from rad to degree
# Calculate slope
x, y = np.gradient(dataset)
slope = np.arctan(np.sqrt(np.square(x/pixel_spacing) + np.square(y/pixel_spacing))) * rad2deg
# calculate aspect
aspect = np.arctan2(y/pixel_spacing, -x/pixel_spacing) * rad2deg
aspect = 180 + aspect
return(deg2rad,rad2deg,slope,aspect)
#------------------------------------------------------------------------------
def DEM_lat_lon(DEM_fileName,output_folder):
"""
This function retrieves information about the latitude and longitude of the
DEM map.
"""
# name for output
lat_fileName = os.path.join(output_folder, 'Output_radiation_balance','latitude.tif')
lon_fileName = os.path.join(output_folder, 'Output_radiation_balance','longitude.tif')
g = gdal.Open(DEM_fileName) # Open DEM
geo_t = g.GetGeoTransform() # Get the Geotransform vector:
x_size = g.RasterXSize # Raster xsize - Columns
y_size = g.RasterYSize # Raster ysize - Rows
# create a longitude and a latitude array
lon = np.zeros((y_size, x_size))
lat = np.zeros((y_size, x_size))
for col in np.arange(x_size):
lon[:, col] = geo_t[0] + col * geo_t[1] + geo_t[1]/2
# ULx + col*(E-W pixel spacing) + E-W pixel spacing
for row in np.arange(y_size):
lat[row, :] = geo_t[3] + row * geo_t[5] + geo_t[5]/2
# ULy + row*(N-S pixel spacing) + N-S pixel spacing,
# negative as we will be counting from the UL corner
# Define shape of the raster
shape = [x_size, y_size]
# Save lat and lon files in geo- coordinates
save_GeoTiff_proy(g, lat, lat_fileName, shape, nband=1)
save_GeoTiff_proy(g, lon, lon_fileName, shape, nband=1)
return(lat,lon,lat_fileName,lon_fileName)
#------------------------------------------------------------------------------
def reproject_dataset(dataset, pixel_spacing, UTM_Zone):
"""
A sample function to reproject and resample a GDAL dataset from within
Python. The idea here is to reproject from one system to another, as well
as to change the pixel size. The procedure is slightly long-winded, but
goes like this:
1. Set up the two Spatial Reference systems.
2. Open the original dataset, and get the geotransform
3. Calculate bounds of new geotransform by projecting the UL corners
4. Calculate the number of pixels with the new projection & spacing
5. Create an in-memory raster dataset
6. Perform the projection
"""
# 1) Open the dataset
g = gdal.Open(dataset)
if g is None:
print('input folder does not exist')
# Define the EPSG code...
EPSG_code = '326%02d' % UTM_Zone
epsg_to = int(EPSG_code)
# 2) Define the UK OSNG, see <http://spatialreference.org/ref/epsg/27700/>
try:
proj = g.GetProjection()
Proj_in=proj.split('EPSG","')
epsg_from=int((str(Proj_in[-1]).split(']')[0])[0:-1])
except:
epsg_from = int(4326) # Get the Geotransform vector:
geo_t = g.GetGeoTransform()
# Vector components:
# 0- The Upper Left easting coordinate (i.e., horizontal)
# 1- The E-W pixel spacing
# 2- The rotation (0 degrees if image is "North Up")
# 3- The Upper left northing coordinate (i.e., vertical)
# 4- The rotation (0 degrees)
# 5- The N-S pixel spacing, negative as it is counted from the UL corner
x_size = g.RasterXSize # Raster xsize
y_size = g.RasterYSize # Raster ysize
epsg_to = int(epsg_to)
# 2) Define the UK OSNG, see <http://spatialreference.org/ref/epsg/27700/>
osng = osr.SpatialReference()
osng.ImportFromEPSG(epsg_to)
wgs84 = osr.SpatialReference()
wgs84.ImportFromEPSG(epsg_from)
inProj = Proj(init='epsg:%d' %epsg_from)
outProj = Proj(init='epsg:%d' %epsg_to)
nrow_skip = round((0.07*y_size)/2)
ncol_skip = round((0.04*x_size)/2)
# Up to here, all the projection have been defined, as well as a
# transformation from the from to the to
ulx, uly = transform(inProj,outProj,geo_t[0], geo_t[3] + nrow_skip * geo_t[5])
lrx, lry = transform(inProj,outProj,geo_t[0] + geo_t[1] * (x_size-ncol_skip),
geo_t[3] + geo_t[5] * (y_size-nrow_skip))
# See how using 27700 and WGS84 introduces a z-value!
# Now, we create an in-memory raster
mem_drv = gdal.GetDriverByName('MEM')
# The size of the raster is given the new projection and pixel spacing
# Using the values we calculated above. Also, setting it to store one band
# and to use Float32 data type.
col = int((lrx - ulx)/pixel_spacing)
rows = int((uly - lry)/pixel_spacing)
# Re-define lr coordinates based on whole number or rows and columns
(lrx, lry) = (ulx + col * pixel_spacing, uly -
rows * pixel_spacing)
dest = mem_drv.Create('', col, rows, 1, gdal.GDT_Float32)
if dest is None:
print('input folder to large for memory, clip input map')
# Calculate the new geotransform
new_geo = (ulx, pixel_spacing, geo_t[2], uly,
geo_t[4], - pixel_spacing)
# Set the geotransform
dest.SetGeoTransform(new_geo)
dest.SetProjection(osng.ExportToWkt())
# Perform the projection/resampling
gdal.ReprojectImage(g, dest, wgs84.ExportToWkt(), osng.ExportToWkt(),gdal.GRA_Bilinear)
return dest, ulx, lry, lrx, uly, epsg_to
#------------------------------------------------------------------------------
def reproject_dataset_example(dataset, dataset_example, method = 1):
# open example dataset
g_ex = gdal.Open(dataset_example)
try:
proj = g_ex.GetProjection()
Proj=proj.split('EPSG","')
epsg_to=int((str(Proj[-1]).split(']')[0])[0:-1])
except:
epsg_to = int(4326)
Y_raster_size = g_ex.RasterYSize
X_raster_size = g_ex.RasterXSize
Geo = g_ex.GetGeoTransform()
ulx = Geo[0]
uly = Geo[3]
lrx = ulx + X_raster_size * Geo[1]
lry = uly + Y_raster_size * Geo[5]
# open dataset that must be transformed
g_in = gdal.Open(dataset)
try:
proj = g_in.GetProjection()
Proj=proj.split('EPSG","')
epsg_from=int((str(Proj[-1]).split(']')[0])[0:-1])
except:
epsg_from = int(4326)
# Set the EPSG codes
osng = osr.SpatialReference()
osng.ImportFromEPSG(epsg_to)
wgs84 = osr.SpatialReference()
wgs84.ImportFromEPSG(epsg_from)
# Create new raster
mem_drv = gdal.GetDriverByName('MEM')
dest1 = mem_drv.Create('', X_raster_size, Y_raster_size, 1, gdal.GDT_Float32)
dest1.SetGeoTransform(Geo)
dest1.SetProjection(osng.ExportToWkt())
# Perform the projection/resampling
if method == 1:
gdal.ReprojectImage(g_in, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_NearestNeighbour)
if method == 2:
gdal.ReprojectImage(g_in, dest1, wgs84.ExportToWkt(), osng.ExportToWkt(), gdal.GRA_Average)
return(dest1, ulx, lry, lrx, uly, epsg_to)
#------------------------------------------------------------------------------
def save_GeoTiff_proy(src_dataset, dst_dataset_array, dst_fileName, shape_lsc, nband):
"""
This function saves an array dataset in GeoTiff, using the parameters
from the source dataset, in projected coordinates
"""
dst_dataset_array = np.float_(dst_dataset_array)
dst_dataset_array[dst_dataset_array<-9999] = np.nan
geotransform = src_dataset.GetGeoTransform()
spatialreference = src_dataset.GetProjection()
# create dataset for output
fmt = 'GTiff'
driver = gdal.GetDriverByName(fmt)
dir_name = os.path.dirname(dst_fileName)
# If the directory does not exist, make it.
if not os.path.exists(dir_name):
os.makedirs(dir_name)
dst_dataset = driver.Create(dst_fileName, shape_lsc[0], shape_lsc[1], nband,gdal.GDT_Float32)
dst_dataset.SetGeoTransform(geotransform)
dst_dataset.SetProjection(spatialreference)
dst_dataset.GetRasterBand(1).SetNoDataValue(-9999)
dst_dataset.GetRasterBand(1).WriteArray(dst_dataset_array)
dst_dataset = None
#------------------------------------------------------------------------------
def w_time(LT,lon_proy, DOY):
"""
This function computes the hour angle (radians) of an image given the
local time, longitude, and day of the year.
"""
nrow, ncol = lon_proy.shape
# Difference of the local time (LT) from Greenwich Mean Time (GMT) (hours):
delta_GTM = np.sign(lon_proy[nrow//2, ncol//2]) * lon_proy[nrow//2, ncol//2] * 24 // 360
if np.isnan(delta_GTM) == True:
delta_GTM = np.nanmean(lon_proy) * np.nanmean(lon_proy) * 24 // 360
# Local Standard Time Meridian (degrees):
LSTM = 15 * delta_GTM
# Ecuation of time (EoT, minutes):
B = 360./365 * (DOY-81) # (degrees)
EoT = 9.87*sin(np.deg2rad(2*B))-7.53*cos(np.deg2rad(B))-1.5*sin(np.deg2rad(B))
# Net Time Correction Factor (minutes) at the center of the image:
TC = 4 * (lon_proy - LSTM) + EoT # Difference in time over the longitude
LST = LT + delta_GTM + TC//60 # Local solar time (hours)
HRA = 15 * (LST-12) # Hour angle HRA (degrees)
deg2rad = np.pi / 180.0 # Factor to transform from degree to rad
w = HRA * deg2rad # Hour angle HRA (radians)
return w
#------------------------------------------------------------------------------
def info_general_metadata(filename):
"""
This function retrieves general information of the Landsat image
(date and time aquired, UTM zone, sun elevation) from the
metadata file.
"""
Landsat_meta = open(filename, "r") # Open metadata file
for line in Landsat_meta:
if re.match("(.*)SCENE_CENTER_TIME(.*)", line): # search in metadata for line SCENE_CENTER_TIME
words = line.split()# make groups of words which are divided by an open space
time_list = words[2].split(':', 2) # Take the second word of words and split the word which are divided by :
if len(time_list[0])== 3:
time_list[0]=time_list[0][1:3]
time_list[2]=time_list[2][0:-1]
hour = float(time_list[0]) # take the first word of time_list
minutes = float(time_list[1]) + float(time_list[2][:-1]) / 60 # Take the second and third word of time_list and place :-1 to remove Z behined minutes
Landsat_meta = open(filename, "r") # Open metadata file
for line in Landsat_meta:
if re.match("(.*)DATE_ACQUIRED(.*)", line):
words = line.split()
DOY = time.strptime(words[2], "%Y-%m-%d").tm_yday
year = time.strptime(words[2], "%Y-%m-%d").tm_year
mon = time.strptime(words[2], "%Y-%m-%d").tm_mon
day = time.strptime(words[2], "%Y-%m-%d").tm_mday
Landsat_meta = open(filename, "r") # Open metadata file
for line in Landsat_meta:
if re.match("(.*)UTM_ZONE(.*)", line):
words = line.split()
UTM_Zone = int(words[2])
Landsat_meta = open(filename, "r") # Open metadata file
for line in Landsat_meta:
if re.match("(.*)SUN_ELEVATION(.*)", line):
words = line.split()
Sun_elevation = float(words[2])
return year, DOY, mon, day, hour, minutes, UTM_Zone, Sun_elevation
#------------------------------------------------------------------------------
def info_band_metadata(filename, Bands):
"""
This function retrieves Landsat band information (minimum and maximum
radiance) from the metadata file.
"""
Lmin = np.zeros(len(Bands)) # Minimum band radiance, for each band
Lmax = np.zeros(len(Bands)) # Maximum band radiance, for each band
k1_const = np.zeros(len(Bands)-6) # TIRS_Thermal constant k1 ######
k2_const = np.zeros(len(Bands)-6) # TIRS_Thermal constant k2 ######
for band in Bands:
Landsat_meta = open(filename, "r") # Open metadata file
for line in Landsat_meta:
if re.match("(.*)RADIANCE_MINIMUM_BAND_%1d(.*)" % band, line):
words = line.split()
value = float(words[2])
Lmin[np.where(Bands == band)[0][0]] = value
if re.match("(.*)RADIANCE_MAXIMUM_BAND_%1d(.*)" % band, line):
words = line.split()
value = float(words[2])
Lmax[np.where(Bands == band)[0][0]] = value
if re.match("(.*)K1_CONSTANT_BAND_%1d(.*)" % band, line): # #####
words = line.split()
value = float(words[2])
k1_const[np.where(Bands == band)[0][0]-6] = value
if re.match("(.*)K2_CONSTANT_BAND_%1d(.*)" % band, line): # #####
words = line.split()
value = float(words[2])
k2_const[np.where(Bands == band)[0][0]-6] = value
return Lmin, Lmax, k1_const, k2_const
#------------------------------------------------------------------------------
def sensible_heat(rah, ustar, rn_inst, g_inst, ts_dem, ts_dem_hot, ts_dem_cold,
air_dens, Surf_temp, k_vk, QC_Map, hot_pixels, slope):
"""
This function computes the instantaneous sensible heat given the
instantaneous net radiation, ground heat flux, and other parameters.
"""
# Near surface temperature difference (dT):
dT_ini = (rn_inst - g_inst) * rah / (air_dens * 1004)
dT_hot =
|
np.copy(dT_ini)
|
numpy.copy
|
import re
import pandas as pd
import numpy as np
import sys
import os
import networkx as nx
from datetime import datetime
from collections import defaultdict
import random
import time
import uuid
def makehash(w=dict):
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(w)
def makedeephash():
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(makedeephash)
def makehashlist():
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(list)
def makehashset():
"""autovivification like hash in perl
http://stackoverflow.com/questions/651794/whats-the-best-way-to-initialize-a-dict-of-dicts-in-python
use call it on hash like h = makehash()
then directly
h[1][2]= 3
useful ONLY for a 2 level hash
"""
# return defaultdict(makehash)
return defaultdict(set)
def reformat_dict(dic, is_list=False):
"""
get dict with delim and things and returns two object
1) nr => fullname
2) nr = value
"""
nr2fullname = {}
nr2feat = {}
for k, v in dic.items():
k = k.strip('"')
if is_list:
v = [x.replace('"', "") for x in v]
else:
v = str(v).strip('"')
n = k.split("_")[-1]
nr2feat[n] = v
nr2fullname[n] = k
return nr2feat, nr2fullname
def df2dict(path, k, v):
tmp = pd.read_csv(path, sep="\t")
return dict(zip(list(tmp[k]), list(tmp[v])))
def create_unique(dic):
"""
create unique identifier from the dict
"""
return dict(zip(range(1, len(dic.keys()) + 1), dic.keys()))
def reformat_dict_f(dic, mapping):
"""
switch keys with key values (unique identifier)
"""
return {k: dic[v] for k, v in mapping.items()}
def read_sample_ids_diff(info_path):
"""
read sample ids and return a hash
cond => short ID
"""
header = []
HoH = {}
temp = {}
for line in open(info_path, "r"):
line = line.rstrip("\n")
if line.startswith(str("Sample") + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
if temp:
HoH[temp["cond"]] = temp["short_id"]
return HoH
def create_df(prot_dict):
df = pd.DataFrame.from_dict(prot_dict)
df = df.T
# df.drop_duplicates(subset=None, keep='first', inplace=True)
df.fillna(value=0, inplace=True)
df[(df.T != 0).all()]
return df
def create_file(filename, header):
"""
create file in filename
header is list
"""
with open(filename, "w", encoding="utf-8") as outfile:
outfile.write("%s\n" % "\t".join([str(x) for x in header]))
def dump_file(filename, things):
"""
dump things to file to filename
"""
with open(filename, "a", encoding="utf-8") as outfile:
outfile.write("%s\n" % things)
def read_pred(pred_path):
"""
collapse prediction into protein groups
need to modify prediction to add complex member and also protein names
"""
header = []
temp = {}
test = {}
for line in open(pred_path, "r"):
line = line.rstrip("\n")
if line.startswith(str("ID") + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
# # TODO deal with duplicate entries in database
test[temp["ID"]] = float(temp["POS"])
return test
def read_mp_feat(pred_path):
"""
if no prediction was done take mp_feat_norm
"""
header = []
temp = {}
test = makehash()
for line in open(pred_path, "r"):
line = line.rstrip("\n")
if line.startswith(str("ID") + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
# need to deal with error being a thing or empty col
temp = dict(zip(header, things))
test[temp["ID"]] = []
test[temp["ID"]].extend(temp["MB"].split("#"))
# test[temp['ID']][temp['MB']] = 'yes'
return test
def read_matrix(path, arr=False):
"""
read matrix and returns HoA[protein] = # delim int
"""
header = []
HoA = makehash()
temp = {}
for line in open(path, "r"):
line = line.rstrip("\n")
if line.startswith(str("ID") + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
if temp:
val = "#".join([temp[key] for key in header if key != "ID"])
HoA[temp["ID"]] = val
return HoA
def read_peaks(path, arr=False):
"""
read peak list in the form of
prot peaks selected cmplx name
output hash cmplx name prot => peaks selected
"""
header = []
HoA = makehash()
temp = {}
for line in open(path, "r"):
line = line.rstrip("\n")
if line.startswith("MB\tID"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
if temp:
row = "\t".join([temp["PKS"], temp["SEL"]])
HoA[temp["ID"]][temp["MB"]] = row
return HoA
def read_sample_ids(info_path):
"""
read sample to treatment
"""
header = []
HoH = {}
temp = {}
for line in open(info_path, "r"):
line = line.rstrip("\n")
if line.startswith(str("Sample") + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
if temp:
HoH[temp["Sample"]] = "_".join([temp["cond"], temp["repl"]])
return HoH
def read_txt(path, first_col="GN"):
"""
read a tab delimited file giving a path and the first column name
return a hash of hashes prot => sample => val
"""
header = []
HoA = makehash()
temp = {}
for line in open(path, "r"):
line = line.rstrip("\n")
if line.startswith(str(first_col) + "\t"):
header = re.split(r"\t+", line)
else:
things = re.split(r"\t+", line)
temp = dict(zip(header, things))
if temp:
HoA[temp.get("GN")] = []
for key in header:
try:
HoA[temp.get("GN")].append(float(temp[key]))
except ValueError:
continue
return HoA
def read_cal(infile):
"""
read calibration file
"""
out = []
out2 = []
for line in open(infile, "r"):
tmp_ = re.split(r"\t+", line.rstrip("\n"))
out.append(int(tmp_[0]))
out2.append(float(tmp_[1]))
return out, out2
def ppi2graph(infile):
df = pd.read_csv(infile, sep="\t")
ppi = dict(zip(df["protA"], df["protB"]))
n = nx.Graph()
for k in ppi.keys():
n.add_edge(k, ppi[k])
return n
def wrout(d, filename, header, is_hyp=False):
"""
giving a list, a filename and a set of headers (tab delimited)
"""
with open(filename, "w", encoding="utf-8") as outfile:
outfile.write("\t".join(header) + "\n")
for k in d:
if is_hyp:
base_id = uniqueid()
cmplx_nr = "cmplx_" + str(base_id)
line = "\t".join([cmplx_nr, k, str(d[k])])
outfile.write(str(line) + "\n")
else:
outfile.write(str(k) + "\n")
# print('file saved in ' + str(filename))
return True
def read_combined(combfile):
"""
receive a combined file and uniforms the annotation
"""
HoA = makehashlist()
df = pd.read_csv(combfile, sep="\t")
for index, row in df.iterrows():
HoA[row["CMPLX"]].append(row["ID"])
return HoA
def create_db_from_cluster(nodes, clusters):
idx = 1
ids = "ppi"
header = ["ComplexID", "ComplexName", "subunits(Gene name)"]
path = resource_path("./ppi_db.txt")
create_file(path, header)
for cmplx in clusters:
nm = ";".join([str(nodes[x]) for x in list(cmplx)])
tmp = "_".join([ids, str(idx)])
dump_file(path, "\t".join([str(idx), tmp, nm]))
idx += 1
return True
def file2folder(file_, prefix="./tmp/"):
# we are already stripping the extension
filename = os.path.splitext(os.path.basename(file_))[0]
return os.path.join(prefix, filename)
def resource_path(relative_path):
"""
Get absolute path to resource, works for dev and for PyInstaller
"""
base_path = getattr(sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def catch(func, handle=lambda e: e, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
return handle(e)
def split_hypo_db(pred, ref_cmplx, cmplx_ann):
"""
read prediction ref cmplx and cmplx and return the positive
"""
pred = pd.read_csv(pred, sep="\t", index_col=False)
ref_cmplx = pd.read_csv(ref_cmplx, sep="\t")
ann = pd.read_csv(cmplx_ann, sep="\t")
on = ["ID"]
xx = lambda x, y, on: pd.merge(x, y, how="left", left_on=on, right_on=on)
mrg = xx(ann, xx(pred, ref_cmplx, on), ["ID"])
pos = mrg["IS_CMPLX"] == "Yes"
mrg = mrg[pos]
# split in hypothesis and db and then return
hyp = mrg[(mrg.ANN == 0)]
db = mrg[mrg.ANN == 1]
return hyp, db
def uniqueid():
"""
generate unique id with length 17 to 21
"""
return str(uuid.uuid4())
def split_to_df(df, col, sep=","):
tmp = pd.DataFrame(df[col].str.split(sep).tolist(), index=df.index.copy())
return tmp
def explode(df, lst_cols, fill_value="", preserve_index=False):
# make sure `lst_cols` is list-alike
"""
https://stackoverflow.com/questions/12680754/split-explode-pandas-dataframe-string-entry-to-separate-rows/40449726#40449726
"""
if (
lst_cols is not None
and len(lst_cols) > 0
and not isinstance(lst_cols, (list, tuple, np.ndarray, pd.Series))
):
lst_cols = [lst_cols]
# all columns except `lst_cols`
idx_cols = df.columns.difference(lst_cols)
# calculate lengths of lists
lens = df[lst_cols[0]].str.len()
# preserve original index values
idx =
|
np.repeat(df.index.values, lens)
|
numpy.repeat
|
"""
Written by <NAME> 10/11/2018
Extract the contribution of ENSO/IOD using mulit-linear regression method (MLR)
from the selected monthly time series.
The input time series is decomposed employing the following model finction:
X(t) = beat0 + beta1*t + beta2*cos(2pi*t) + beta3*sin(2pi*t) +
beta4*cos(4pi*t) + beta5*sin(2pi*t) + beta6*Nino3.4 +
beta7*H{Nino3.4} + beta8*IOD + beta9*H{IOD}
Where:
- Nino3.4 is the monthly Nino 3.4 ENSO index;
- H{Nino3.4} is the monthly Nino 3.4 ENSO index Hilber transfrom;
- IOD is the Indian Ocean Dipole Index;
- H{IOD} is the Indian Ocean Dipole Index Hilber transfrom;
More info about the signal decomposition can be found in:
Forootan et al. 2016:
"Quantifying the impacts of ENSO and IOD on rain gauge and remotely
sensed precipitation products over Australia"
https://www.researchgate.net/publication/283722412_
Quantifying_the_impacts_of_ENSO_and_IOD_on_rain_gauge_and_
remotely_sensed_precipitation_products_over_Australia
Download IOD index from:
- NOTE: You need to chose a specific version of the index
http://www.jamstec.go.jp/frsgc/research/d1/iod/e/iod/dipole_mode_index.html
Download NINO 3.4 index from:
http://www.cpc.ncep.noaa.gov/data/indices/
"""
# - python dependencies
from __future__ import print_function
import os
import numpy as np
import scipy as sp
from scipy.fftpack import hilbert
from scipy import signal
import matplotlib.pyplot as plt
def digit_date(year, month):
# -- create output date variable
t_date = np.zeros((1))
# -- Vector containing the number of days for a leap and a standard year
dpm_leap = np.array([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], dtype=np.float)
dpm_stnd = np.array([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31], dtype=np.float)
# -- I need this matrix in order to calculate the total number of day passed from
# -- The begin of the year and to pass to the date digital format
mon_mat = np.tri(12, 12, -1)
# -- month index
mon_ind =
|
np.int(month-1)
|
numpy.int
|
import copy
import datetime
from unittest import TestCase
import numpy as np
from giant import rotations as at
from giant.ray_tracer import scene, shapes, rays, INTERSECT_DTYPE
import os
LOCALDIR = os.path.dirname(os.path.realpath(__file__))
class TestSceneObj(TestCase):
def setUp(self):
tri1 = np.array([[-5, -4, -4.5],
[0, 0, 1],
[0, 0, 0]])
tri2 = tri1+np.array([[2.5, 0, 0]]).T
tri3 = tri2+np.array([[2.5, 0, 0]]).T
tri4 = tri3+np.array([[2.5, 0, 0]]).T
self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1, np.arange(12).reshape(-1, 3))
def check_parameters(self, obj, model, position, orientation):
self.assertTrue(obj.shape == model)
np.testing.assert_array_equal(obj.position, position.ravel())
self.assertEqual(obj.orientation, orientation)
def test_creation(self):
sobj = scene.SceneObject(self.triangles)
self.check_parameters(sobj, self.triangles, np.zeros(3, dtype=np.float64),
at.Rotation(np.eye(3)))
sobj = scene.SceneObject(self.triangles, current_position=np.ones(3),
current_orientation=at.Rotation([1, 2, 3]))
self.check_parameters(sobj, self.triangles, np.ones(3), at.Rotation([1, 2, 3]))
def test_change_position(self):
translation = [1, 2, 3]
tri = copy.deepcopy(self.triangles)
tri.translate([4, 5, 5])
sobj = scene.SceneObject(tri, current_position=[4, 5, 5])
tri2 = copy.deepcopy(self.triangles)
tri2.translate(translation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(sobj)
with self.subTest(intype=intype):
test_obj.change_position(intype(translation))
self.check_parameters(test_obj, tri2, np.array(translation), at.Rotation(np.eye(3)))
def test_change_orientation(self):
rotation = [1, 2, 3]
tri = copy.deepcopy(self.triangles)
tri.rotate([4, 5, 5])
sobj = scene.SceneObject(tri, current_orientation=[4, 5, 5])
tri2 = copy.deepcopy(self.triangles)
tri2.rotate(rotation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(sobj)
with self.subTest(intype=intype):
test_obj.change_orientation(intype(rotation))
self.check_parameters(test_obj, tri2, np.zeros(3), at.Rotation(rotation))
def test_translate(self):
translation = [1, 2, 3]
sobj = scene.SceneObject(copy.deepcopy(self.triangles), current_position=[2, 3, 4])
tri2 = copy.deepcopy(self.triangles)
tri2.translate(translation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(sobj)
with self.subTest(intype=intype):
test_obj.translate(intype(translation))
self.check_parameters(test_obj, tri2, np.array(translation)+np.array([2, 3, 4]), np.eye(3))
def test_rotate(self):
rotation = [1, 2, 3]
sobj = scene.SceneObject(copy.deepcopy(self.triangles), current_orientation=[3, 2, 1])
tri2 = copy.deepcopy(self.triangles)
tri2.rotate(rotation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(sobj)
with self.subTest(intype=intype):
test_obj.rotate(intype(rotation))
self.check_parameters(test_obj, tri2, np.zeros(3), at.Rotation(rotation) * at.Rotation([3, 2, 1]))
# noinspection PyArgumentList
class TestAutoSceneObj(TestCase):
def setUp(self):
tri1 = np.array([[-5, -4, -4.5],
[0, 0, 1],
[0, 0, 0]])
tri2 = tri1+np.array([[2.5, 0, 0]]).T
tri3 = tri2+np.array([[2.5, 0, 0]]).T
tri4 = tri3+np.array([[2.5, 0, 0]]).T
self.triangles = shapes.Triangle64(np.hstack([tri1, tri2, tri3, tri4]).T, 1, np.arange(12).reshape(-1, 3))
def pos_fun(time):
rng = np.random.RandomState(int(time.toordinal()))
return rng.randn(3)
self.pos_fun = pos_fun
def frame_fun(time):
rng = np.random.RandomState(int(time.toordinal()))
return at.Rotation(rng.randn(3))
self.frame_fun = frame_fun
def check_parameters(self, obj, model, position, orientation):
self.assertTrue(obj.shape == model)
np.testing.assert_array_equal(obj.position, position.ravel())
self.assertEqual(obj.orientation, orientation)
self.assertEqual(obj.orientation_function, self.frame_fun)
self.assertEqual(obj.position_function, self.pos_fun)
def test_creation(self):
asobj = scene.SceneObject(self.triangles, position_function=self.pos_fun,
orientation_function=self.frame_fun)
self.check_parameters(asobj, self.triangles, np.zeros(3, dtype=np.float64),
at.Rotation(np.eye(3)))
asobj = scene.SceneObject(self.triangles, position_function=self.pos_fun,
orientation_function=self.frame_fun, current_position=np.ones(3),
current_orientation=at.Rotation([1, 2, 3]))
self.check_parameters(asobj, self.triangles, np.ones(3), at.Rotation([1, 2, 3]))
def test_change_position(self):
translation = [1, 2, 3]
tri = copy.deepcopy(self.triangles)
tri.translate([4, 5, 5])
asobj = scene.SceneObject(tri, position_function=self.pos_fun,
orientation_function=self.frame_fun, current_position=[4, 5, 5])
tri2 = copy.deepcopy(self.triangles)
tri2.translate(translation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(asobj)
with self.subTest(intype=intype):
test_obj.change_position(intype(translation))
self.check_parameters(test_obj, tri2, np.array(translation), at.Rotation(np.eye(3)))
def test_change_orientation(self):
rotation = [1, 2, 3]
tri = copy.deepcopy(self.triangles)
tri.rotate([4, 5, 5])
asobj = scene.SceneObject(tri, position_function=self.pos_fun,
orientation_function=self.frame_fun, current_orientation=[4, 5, 5])
tri2 = copy.deepcopy(self.triangles)
tri2.rotate(rotation)
for intype in (list, tuple, np.array):
test_obj = copy.deepcopy(asobj)
with self.subTest(intype=intype):
test_obj.change_orientation(intype(rotation))
self.check_parameters(test_obj, tri2,
|
np.zeros(3)
|
numpy.zeros
|
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('TkAgg')
from mpl_toolkits.mplot3d import Axes3D
def visualise_room(graph, dimensions_mean=0, dimensions_std=1):
fig = plt.figure(0, dpi=90)
ax = fig.add_subplot(1, 2, 1, projection='3d')
ax = plot_room_graph(ax, graph['nodes'], graph['senders'], graph['receivers'])
ax = fig.add_subplot(1, 2, 2, projection='3d')
ax.set_aspect("equal")
# Plot frames of each of the objects in the graph
nodes = np.array(graph['nodes'])
type_indices = np.argmax(nodes[:, :9], axis=1)
ax = plot_room(ax, type_indices, nodes[:, 9:11], nodes[:, 11:14],
nodes[:, 14:] + dimensions_mean/dimensions_std)
return ax
def plot_room(ax, type_index, rotations, positions, dimensions):
for type, rot, pos, dim in zip(type_index, rotations, positions, dimensions):
colour = get_colour(type)
coords, height = calc_frame_coords(rot, pos, dim)
plot_frame(ax, coords, pos[2],
pos[2] + height, colour)
axis_equal_3d(ax)
ax.view_init(elev=90, azim=-90)
plt.show()
def plot_frame(ax, vertices, base, height, color="b"):
for i in range(0, len(vertices) - 1):
v1 = vertices[i][:2]
v2 = vertices[i + 1][:2]
ax.plot3D(*zip(np.array([*v1, base]), np.array([*v2, base])), color=color)
ax.plot3D(*zip(np.array([*v1, height]), np.array([*v2, height])), color=color)
ax.plot3D(*zip(np.array([*v1, base]), np.array([*v1, height])), color=color)
if i == len(vertices) - 2:
ax.plot3D(*zip(
|
np.array([*v2, base])
|
numpy.array
|
import numpy as np
import unittest
import random
import warnings
from scipy.stats import pearsonr
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, \
StackingClassifier, StackingRegressor, GradientBoostingClassifier, \
GradientBoostingRegressor
from sklearn.dummy import DummyRegressor, DummyClassifier
from sklearn.svm import SVC, SVR
from sklearn.metrics import roc_auc_score, explained_variance_score, \
balanced_accuracy_score
import pipecaster.utils as utils
from pipecaster.testing_utils import make_multi_input_classification, \
make_multi_input_regression
import pipecaster.transform_wrappers as transform_wrappers
from pipecaster.transform_wrappers import make_transformer
from pipecaster.score_selection import RankScoreSelector
from pipecaster.multichannel_pipeline import MultichannelPipeline
from pipecaster.ensemble_learning import Ensemble, SoftVotingClassifier, \
SoftVotingDecision, MultichannelPredictor, HardVotingClassifier, \
AggregatingRegressor
from pipecaster.cross_validation import cross_val_score
class TestVoting(unittest.TestCase):
def test_soft_voting(self, verbose=0, seed=42):
Xs, y, _ = make_multi_input_classification(n_informative_Xs=5,
n_random_Xs=2, random_state=seed)
clf = MultichannelPipeline(n_channels=7)
clf.add_layer(StandardScaler())
base_clf = KNeighborsClassifier()
base_clf = transform_wrappers.SingleChannel(base_clf)
clf.add_layer(base_clf)
clf.add_layer(SoftVotingClassifier())
scores = cross_val_score(clf, Xs, y, score_method='predict',
scorer=balanced_accuracy_score)
score = np.mean(scores)
if verbose > 0:
print('accuracy = {}'.format(score))
self.assertTrue(score > 0.80)
def test_soft_voting_decision(self, verbose=0, seed=42):
Xs, y, _ = make_multi_input_classification(n_informative_Xs=6,
n_random_Xs=3,
random_state=seed)
clf = MultichannelPipeline(n_channels=9)
clf.add_layer(StandardScaler())
base_clf = make_transformer(SVC(),
transform_method='decision_function')
clf.add_layer(base_clf)
meta_clf1 = SoftVotingDecision()
clf.add_layer(3, meta_clf1, 3, meta_clf1, 3, meta_clf1)
meta_clf2 = MultichannelPredictor(GradientBoostingClassifier())
clf.add_layer(meta_clf2)
scores = cross_val_score(clf, Xs, y, score_method='predict',
scorer=balanced_accuracy_score)
score = np.mean(scores)
if verbose > 0:
print('accuracy = {}'.format(score))
self.assertTrue(score > 0.85)
def test_hard_voting(self, verbose=0, seed=42):
Xs, y, _ = make_multi_input_classification(
n_informative_Xs=10, n_random_Xs=0,
class_sep=2, random_state=seed)
clf = MultichannelPipeline(n_channels=10)
clf.add_layer(StandardScaler())
base_clf = KNeighborsClassifier()
base_clf = make_transformer(base_clf, transform_method='predict')
clf.add_layer(base_clf)
clf.add_layer(HardVotingClassifier())
scores = cross_val_score(clf, Xs, y, score_method='predict',
scorer=balanced_accuracy_score)
score = np.mean(scores)
if verbose > 0:
print('accuracy = {}'.format(score))
self.assertTrue(score > 0.90)
class TestAggragation(unittest.TestCase):
def test_aggregating_regressor(self, verbose=0, seed=42):
Xs, y, _ = make_multi_input_regression(n_informative_Xs=3,
random_state=seed)
clf = MultichannelPipeline(n_channels=3)
base_clf = GradientBoostingRegressor(n_estimators=50)
clf.add_layer(make_transformer(base_clf))
clf.add_layer(AggregatingRegressor(np.mean))
cross_val_score(clf, Xs, y, cv=3)
scores = cross_val_score(clf, Xs, y, score_method='predict',
scorer=explained_variance_score)
score = np.mean(scores)
if verbose > 0:
print('accuracy = {}'.format(score))
self.assertTrue(score > 0.3)
class TestEnsembleMetaprediction(unittest.TestCase):
def setUp(self):
warnings.filterwarnings('ignore')
def tearDown(self):
warnings.resetwarnings()
def test_discrimination_cls(self, verbose=0, seed=42):
"""
Determine if Ensemble can discriminate between dummy classifiers and LogisticRegression classifiers
"""
X, y = make_classification(n_samples=500, n_features=20, n_informative=15, class_sep=1, random_state=seed)
base_classifiers = [DummyClassifier(strategy='stratified') for i in range(5)]
base_classifiers.extend([LogisticRegression() for i in range(5)])
random.shuffle(base_classifiers)
informative_mask = [True if type(c) == LogisticRegression else False for c in base_classifiers]
mclf = MultichannelPipeline(n_channels=1)
mclf.add_layer(StandardScaler())
mclf.add_layer(Ensemble(base_classifiers, SVC(), internal_cv=5, score_selector=RankScoreSelector(k=5)))
mclf.fit([X], y)
selected_indices = mclf.get_model(layer_index=1, model_index=0).get_support()
selection_mask = [True if i in selected_indices else False for i in range(len(base_classifiers))]
if verbose > 0:
n_correct = sum([1 for i, s in zip(informative_mask, selection_mask) if i and s])
print('\n\ncorrectly selected {}/5 LogigistRegression classifiers'.format(n_correct))
print('incorrectly selected {}/5 DummyClassifiers\n\n'.format(5- n_correct))
self.assertTrue(np.array_equal(selection_mask, informative_mask),
'Ensemble failed to discriminate between dummy classifiers and LogisticRegression')
def test_compare_to_StackingClassifier(self, verbose=0, seed=42):
"""
Determine if Ensemble with dummies correctly selects the real predictors and gives similar
performance to scikit-learn StackingClassifier trained without dummies.
"""
X, y = make_classification(n_samples=1000, n_features=20, n_informative=5, class_sep=0.5, random_state=seed)
classifiers = [LogisticRegression(random_state=seed),
KNeighborsClassifier(),
RandomForestClassifier(random_state=seed)]
dummy_classifiers = [DummyClassifier(strategy='stratified', random_state=seed) for repeat in range(100)]
all_classifiers = classifiers + dummy_classifiers
random.shuffle(all_classifiers)
mclf = MultichannelPipeline(n_channels=1)
mclf.add_layer(StandardScaler())
mclf.add_layer(Ensemble(all_classifiers, SVC(random_state=seed), internal_cv=5, score_selector=RankScoreSelector(k=3)))
pc_score_all = np.mean(cross_val_score(mclf, [X], y, cv=5, n_processes=5))
mclf.fit([X], y)
selected_classifiers = mclf.get_model(1,0).get_base_models()
self.assertTrue(len(selected_classifiers) == 3,
'Ensemble picked the {} classifiers instead of 3.'.format(len(selected_classifiers)))
self.assertFalse(DummyClassifier in [c.__class__ for c in selected_classifiers],
'Ensemble chose a dummy classifier over a real one')
mclf = MultichannelPipeline(n_channels=1)
mclf.add_layer(StandardScaler())
mclf.add_layer(Ensemble(classifiers, SVC(random_state=seed), internal_cv=5, score_selector=RankScoreSelector(k=3)))
pc_score_informative = np.mean(cross_val_score(mclf, [X], y, cv=5, n_processes=5))
base_classifier_arg = [(str(i), c) for i, c in enumerate(classifiers)]
clf = StackingClassifier(base_classifier_arg, SVC(random_state=seed), cv=StratifiedKFold(n_splits=3))
sk_score_informative = np.mean(cross_val_score(clf, X, y, cv=5, n_processes=5))
if verbose > 0:
base_classifier_arg = [(str(i), c) for i, c in enumerate(all_classifiers)]
clf = StackingClassifier(base_classifier_arg, SVC(random_state=seed), cv=StratifiedKFold(n_splits=3))
sk_score_all = np.mean(cross_val_score(clf, X, y, cv=5, n_processes=5))
print('\nBalanced accuracy scores')
print('Ensemble informative predictors: {}'.format(pc_score_informative))
print('Ensemble all predictors: {}'.format(pc_score_all))
print('StackingClassifier informative predictors: {}'.format(sk_score_informative))
print('StackingClassifier all predictors: {}'.format(sk_score_all))
self.assertTrue(np.round(pc_score_all, 2) == np.round(pc_score_informative, 2),
'Ensemble accuracy is not same for all classifiers and informative classifiers.')
tolerance_pct = 5
self.assertTrue(pc_score_all >= sk_score_informative * (1 - tolerance_pct / 100.0),
'''Ensemble with random inputs did not perform within accepted tolerance of StackingClassifier with no dummy classifiers.''')
def test_discrimination_rgr(self, verbose=0, seed=42):
"""
Determine if Ensemble can discriminate between dummy regressors and LinearRegression classifiers
"""
X, y = make_regression(n_samples=500, n_features=20, n_informative=10, random_state=seed)
base_regressors = [DummyRegressor(strategy='mean') for i in range(5)]
base_regressors.extend([LinearRegression() for i in range(5)])
random.shuffle(base_regressors)
informative_mask = [True if type(c) == LinearRegression else False for c in base_regressors]
mclf = MultichannelPipeline(n_channels=1)
mclf.add_layer(StandardScaler())
mclf.add_layer(Ensemble(base_regressors, SVR(), internal_cv=5, score_selector=RankScoreSelector(k=5)))
mclf.fit([X], y)
selected_indices = mclf.get_model(layer_index=1, model_index=0).get_support()
selection_mask = [True if i in selected_indices else False for i in range(len(base_regressors))]
if verbose > 0:
n_correct = sum([1 for i, s in zip(informative_mask, selection_mask) if i and s])
print('\n\ncorrectly selected {}/5 LinearRegression regressors'.format(n_correct))
print('incorrectly selected {}/5 DummyRegressors\n\n'.format(5- n_correct))
self.assertTrue(
|
np.array_equal(selection_mask, informative_mask)
|
numpy.array_equal
|
#!/usr/bin/env python3
import OutflowCone as oc
import numpy as np
import numpy.ma as ma
class Cone:
""" Galactic wind outflow cone model.
"""
def __init__(self, inc=0, PA=0, theta=60, r_in=0.0, r_out=5.0):
""" Create a new outflow cone.
Keywords:
inc -- The inclination of the cone with respect to the
line-of-sight.
PA -- The position angle of the cone with respect to
the line-of-sight.
theta -- The opening half-angle of the cone (degrees).
r_in -- The inner radius of the cone (kpc).
r_out -- The outer radius of the cone (kpc).
"""
self.inc = inc
self.PA = PA
self.theta = theta
self.r_in = r_in
self.r_out = r_out
self.positions = None
def GenerateClouds(self, n_clouds, bicone=False, falloff=1,
zero_z=False, flatten=False):
""" Generate 'n' model clouds within the cone bounds.
Arguments:
n_clouds--
Keywords:
bicone -- Create a single cone or a bi-cone. Default is False.
falloff -- Radial density distribution exponent. Default is 1 for
a mass-conserving outflow (density goes as r^-2).
A value of 1/3 creates a constant-density profile.
zero_z -- r_in makes the z-height of the base of the cone non-zero.
Should the clouds all get translated down? (e.g. z -= r_in)
flatten -- Keep the inner radius spherical? Or flatten it?
Returns:
None. Creates "positions" member variable, containing Cartesian
position vectors for 'n' clouds, and "velocities" member variable,
containing zero-velocity vectors for all clouds (ostensibly these
are Cartesian as well).
"""
self._n_clouds = n_clouds
# Even spread in cos(theta) to avoid clustering at poles.
theta_rad = np.radians(self.theta)
if bicone:
vs1 = np.random.random(self._n_clouds/2.) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad)
vs2 = -(np.random.random(self._n_clouds/2.) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad))
vs = np.concatenate((vs1, vs2))
else:
vs = np.random.random(self._n_clouds) * \
(1-np.cos(theta_rad)) + np.cos(theta_rad)
thetas =
|
np.arccos(vs)
|
numpy.arccos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.