prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
import torch
from sklearn.gaussian_process.kernels import (
Hyperparameter,
Kernel as KernelSklearn,
StationaryKernelMixin,
NormalizedKernelMixin,
)
from neuralprocess.data.base import FunctionGenerator
class Kernel:
"""
A simple base class for kernels that exposes construction arguments
as attributes.
"""
def __init__(self, **kwargs):
self._params = dict()
self.params = kwargs
@property
def params(self):
return self._params.copy()
@params.setter
def params(self, kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
self._params.update(kwargs)
def __setattr__(self, key, val):
super().__setattr__(key, val)
if key not in ("params", "_params"):
self._params.update({key: val})
def __call__(self, a, b):
raise NotImplementedError
class GaussianKernel(Kernel):
def __init__(self, lengthscale=0.5, amplitude=1.0):
super().__init__(lengthscale=lengthscale, amplitude=amplitude)
def __call__(self, a, b):
"""a, b assumed to be of shape (N,1)"""
sqdist = (a - b.T) ** 2
l = self.lengthscale ** 2
return self.amplitude *
|
np.exp(-0.5 / l * sqdist)
|
numpy.exp
|
import pandas
import numpy as np
import datetime
import duckdb
df = pandas.DataFrame([{"col1":"val1","col2":1.05},{"col1":"val3","col2":np.NaN}])
df["newcol1"] = np.where(df["col1"] == "val1",np.NaN,df["col1"])
current_time = datetime.datetime.now().replace(microsecond=0)
df['datetest'] = current_time
df.loc[0,'datetest'] = pandas.NaT
conn = duckdb.connect(':memory:')
conn.register('testing_null_values', df)
results = conn.execute('select * from testing_null_values').fetchall()
assert results[0][0] == 'val1'
assert results[0][1] == 1.05
assert results[0][2] == None
assert results[0][3] == None
assert results[1][0] == 'val3'
assert results[1][1] == None
assert results[1][2] == 'val3'
assert results[1][3] == current_time
result_df = conn.execute('select * from testing_null_values').fetchdf()
assert result_df['col1'][0] == df['col1'][0]
assert result_df['col1'][1] == df['col1'][1]
assert result_df['col2'][0] == df['col2'][0]
assert
|
np.isnan(result_df['col2'][1])
|
numpy.isnan
|
"""apply color transformation to an area of an image
use output data of filter_rois.py to apply color transformation
"""
import pandas as pd
import numpy as np
import cv2
import argparse
parser = argparse.ArgumentParser(description='Apply color transformations to an area of an image')
parser.add_argument('-i', '--image', dest='image_path', help='Path to input image to be modified', required=True)
parser.add_argument('-d', '--data', dest='data_path',
help='Path to csv specifying regions to be modified', required=True)
parser.add_argument('-c', '--colors', dest='colors',
help='Path to csv specifying new colors (in rgb) to apply to regions')
args = parser.parse_args()
# recolor labeled areas buy single rgb value
new_colors_rgb = pd.read_csv(args.colors).to_dict()
for k in new_colors_rgb.keys():
new_colors_rgb[k] = list(new_colors_rgb[k].values())
# read in image and recolor data
image = cv2.imread(args.image_path)
recolor_data = pd.read_csv(args.data_path)
# copy image for safe keeping and blur
image_og = np.copy(image)
blurred = cv2.GaussianBlur(image, (3, 3), 0, 0)
# get image dims
im_h, im_w = image.shape[:2]
def scale_between(arr, a, b):
return (b - a) * (arr - arr.min()) / (arr.max() - arr.min())
for i, row in recolor_data.iterrows():
if row['label'] in new_colors_rgb.keys():
new_color_patch = cv2.cvtColor(np.zeros((10, 10), dtype='uint8'), cv2.COLOR_GRAY2BGR)
new_color_patch[:] = new_colors_rgb[row['label']][::-1]
else:
continue
# mask to roi
x, y, w, h = row['box_x'], row['box_y'], row['box_w'], row['box_h']
mask = np.zeros((im_h, im_w), dtype="uint8")
mask = cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)
masked = cv2.bitwise_and(blurred, blurred, mask=mask)
# perform color filter
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV)
lower_thresh = (row['low_h'], row['low_s'], row['low_v'])
upper_thresh = (row['hi_h'], row['hi_s'], row['hi_v'])
color_mask = cv2.inRange(hsv, lower_thresh, upper_thresh)
# clean up salt/pepper color filtering noise
kernel = np.ones((5, 5), np.uint8)
color_mask = cv2.morphologyEx(color_mask, cv2.MORPH_OPEN, kernel)
color_mask = cv2.morphologyEx(color_mask, cv2.MORPH_CLOSE, kernel)
color_mask = cv2.dilate(color_mask,
|
np.ones((3, 3), np.uint8)
|
numpy.ones
|
from os.path import join
import torch
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
from scipy.spatial import distance_matrix
import model
import data
cinn = model.ColorizationCINN(0)
cinn.cuda()
cinn.eval()
state_dict = {k:v for k,v in torch.load('output/lsun_cinn.pt').items() if 'tmp_var' not in k}
cinn.load_state_dict(state_dict)
def colorize_test_set(temp=1., postfix=0, img_folder='images'):
'''Colorize the whole test set once.
temp: Sampling temperature
postfix: Has to be integer. Append to file name (e.g. to make 10 diverse colorizations of test set)
'''
counter = 0
with torch.no_grad():
for Lab in tqdm(data.test_loader):
Lab = Lab.cuda()
z = temp * torch.randn(Lab.shape[0], model.ndim_total).cuda()
L, ab = Lab[:, :1], Lab[:, 1:]
ab_gen = cinn.reverse_sample(z, L)
rgb_gen = data.norm_lab_to_rgb(L.cpu(), ab_gen.cpu())
for im in rgb_gen:
im =
|
np.transpose(im, (1,2,0))
|
numpy.transpose
|
import abc
import numpy as np
import math
import random
import itertools as it
from hklearn_genetic.board_conflicts import conflict
from deap import tools, gp
class ProblemInterface(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'evaluate') and
callable(subclass.evaluate) and
hasattr(subclass, 'stop_criteria') and
callable(subclass.stop_criteria) and
hasattr(subclass, 'populate') and
callable(subclass.populate) and
hasattr(subclass, 'decode') and
callable(subclass.decode) and
hasattr(subclass, 'crossover') and
callable(subclass.crossover) and
hasattr(subclass, 'mutate') and
callable(subclass.mutate))
@ProblemInterface.register
class IProblem:
"""Evalua las soluciones potenciales del problema"""
def evaluate(self, X):
pass
"""Regresa si la población ha llegado al criterio de paro"""
def stop_criteria(self, X_eval):
pass
"""Crea una poblacion inicial de posibles soluciones"""
def populate(self, n_individuals):
pass
"""Pasa a la población del genotipo al fenotipo"""
def decode(self, X_encoded):
pass
"""Efectúa la cruza con los elementos de la población"""
def crossover(self, X, pc, elitism):
pass
"""Efectúa la mutación con los elementos de la población"""
def mutate(self, X, pm, elitism):
pass
class BaseProblem(IProblem):
def get_crossover_probs(self, n_cross):
return np.random.rand(1 , n_cross)[0,:]
def get_crossover_points(self, length):
return
|
np.random.randint(0, length)
|
numpy.random.randint
|
import os
import numpy as np
import pandas as pd
import scipy.sparse as sp
from scipy.interpolate import interp1d
import scipy.signal as signal
import multiprocess as mp
import cooler
import cooltools.snipping as snipping
from cooltools.lib.numutils import logbins
import bioframe
from mirnylib.numutils import zoomArray
import DNA_info
def clean_up_loops(loop_list, arms):
'''Removes items for loop_list that are either not in any of the regions contained in arms or is contained
in a region between two of the regions in arms. Use this to clean up a new list of loops and save the
modified list for future use.'''
for each in ['chrom1', 'chrom2', 'pos1', 'pos2']:
assert each in loop_list.columns
features = loop_list.copy(deep=True)
features['index1'] = -1
features['index2'] = -1
for i, arm in enumerate(arms):
chrom = arm[0]
start = arm[1]
end = arm[2]
features['index1'] = features.apply(lambda x: i
if (x['chrom1']==chrom and x['pos1'] > start)
and x['pos1'] < end else x['index1'], axis=1)
features['index2'] = features.apply(lambda x: i
if (x['chrom2']==chrom and x['pos2'] > start)
and x['pos2'] < end else x['index2'], axis=1)
features = features[np.logical_or(features.index1 != -1, features.index2 != -1)]
features = features[features.index1 == features.index2]
features = features[['chrom1', 'pos1', 'chrom2', 'pos2']]
return features
def sparseSymmetricOOE(matrix, mask, log_binning=True):
'''Quick OOE operation for sparse symmetric matrices. This will be used by the LocalObsOverExp object to
compute OOE on support regions.'''
if matrix.shape[0] == 1:
return matrix
#Finding number of valid bins per diagonal using FFT convolve
count_per_diag = signal.fftconvolve(mask, mask[::-1], mode='full')
count_per_diag = np.round(count_per_diag[len(count_per_diag)//2:])
count_per_diag = count_per_diag.astype(int)
row, col, data = matrix.row, matrix.col, matrix.data
nan_indices = ~np.isfinite(data)
data[nan_indices]=0
diff = abs(row-col)
#Summing by diagonal
scaling = np.bincount(diff, weights=data, minlength=len(count_per_diag))/2
assert len(scaling)==len(count_per_diag)
if log_binning:
hi = len(scaling)
lo = 1
ratio = 1.2
N = int(np.log(hi / lo) / np.log(ratio))
bins = logbins(1, len(scaling), N=N)
bin_mids = np.sqrt(bins[1:]*bins[0:-1])
lab = np.concatenate(tuple((i+1)*np.ones(bins[i+1]-bins[i], dtype=int) for i in range(len(bins)-1)))
log_scaling = np.bincount(lab,weights=scaling[1:])
log_count = np.bincount(lab, weights=count_per_diag[1:])
coarse_expected = log_scaling[1:]/log_count[1:]
f = interp1d(np.log10(bin_mids), np.log10(coarse_expected), kind='linear')
y = f(np.log10(np.arange(2,np.floor(bin_mids[-1]))))
x = np.log10(np.arange(2,np.floor(bin_mids[-1])))
xremaining = np.log10(np.arange(np.round(10**x[-1]+1),len(scaling)))
yremaining = y[-1] + ((y[-1]-y[-2])/(x[-1]-x[-2]))*(xremaining - x[-1])
x = np.append(x,xremaining)
y = np.append(y,yremaining)
fine_expected = 10**y
fine_bins = np.round(10**x)
for i in range(1,-1,-1):
fine_expected = np.insert(fine_expected,0,scaling[i]/count_per_diag[i])
fine_bins = np.insert(fine_bins,0,i).astype(int)
assert np.all((fine_bins[1:]-fine_bins[0:-1])==1)
else:
fine_expected = scaling/count_per_diag
matrix.data = data/fine_expected[diff]
# matrix.data[nan_indices] = np.nan
return matrix
class LocalObsExpSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr, cooler_opts=None, log_binning=True):
self.clr = clr
self.log_binning = log_binning
self.binsize = self.clr.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr.offset(region1) - self.clr.offset(region1[0])
self.offsets[region2] = self.clr.offset(region2) - self.clr.offset(region2[0])
matrix = (self.clr.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask = self.clr.bins().fetch(region1)
mask = np.isfinite(mask['weight'].values).astype(int)
matrix = sparseSymmetricOOE(matrix, mask, log_binning=self.log_binning)
if self.cooler_opts['sparse']:
matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray().astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class DifferenceSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix = sp.coo_matrix(matrix1.todense() - matrix2.todense())
if self.cooler_opts['sparse']:
matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = matrix[i0:i1, j0:j1].toarray().astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = matrix[lo1:hi1, lo2:hi2].toarray().astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class RatioSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix1 = matrix1.todense()
matrix1[matrix1==0] = 1
matrix2 = matrix2.todense()
matrix2[matrix2==0] = 1
matrix = matrix1/matrix2
# if self.cooler_opts['sparse']:
# matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = np.asarray(matrix[i0:i1, j0:j1]).astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = np.asarray(matrix[lo1:hi1, lo2:hi2]).astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols = np.sum(snippet, axis=1) == 0
snippet[nan_rows, :] = np.nan
snippet[:, nan_cols] = np.nan
return snippet
class DifferenceSnipper:
'''Object whose methods are fed to cooltools.snipping.pileup function. Only works if regions that
are fed to the select method are the same i.e. region1 MUST BE SAME AS region2.'''
def __init__(self, clr1, clr2, cooler_opts=None, log_binning=True):
self.clr1 = clr1
self.clr2 = clr2
self.log_binning = log_binning
assert clr1.binsize == clr2.binsize
self.binsize = self.clr1.binsize
self.offsets = {}
self.pad = True
self.cooler_opts = {} if cooler_opts is None else cooler_opts
self.cooler_opts.setdefault('sparse', True)
def select(self, region1, region2):
print(region1, region2)
self.offsets[region1] = self.clr1.offset(region1) - self.clr1.offset(region1[0])
self.offsets[region2] = self.clr1.offset(region2) - self.clr1.offset(region2[0])
matrix1 = (self.clr1.matrix(**self.cooler_opts)
.fetch(region1, region2))
matrix2 = (self.clr2.matrix(**self.cooler_opts)
.fetch(region1, region2))
mask1 = self.clr1.bins().fetch(region1)
mask1 = np.isfinite(mask1['weight'].values).astype(int)
mask2 = self.clr2.bins().fetch(region1)
mask2 = np.isfinite(mask2['weight'].values).astype(int)
matrix1 = sparseSymmetricOOE(matrix1, mask1, log_binning=self.log_binning)
matrix2 = sparseSymmetricOOE(matrix2, mask2, log_binning=self.log_binning)
matrix1 = matrix1.todense()
matrix1[matrix1==0] = 1
matrix2 = matrix2.todense()
matrix2[matrix2==0] = 1
matrix = matrix1 - matrix2
# if self.cooler_opts['sparse']:
# matrix = matrix.tocsr()
return matrix
def snip(self, matrix, region1, region2, tup):
s1, e1, s2, e2 = tup
offset1 = self.offsets[region1]
offset2 = self.offsets[region2]
binsize = self.binsize
lo1, hi1 = (s1 // binsize) - offset1, (e1 // binsize) - offset1
lo2, hi2 = (s2 // binsize) - offset2, (e2 // binsize) - offset2
if hi1 < 0 or hi2 < 0:
print(region1, s1, e1, region2, s2, e2)
print(offset1, offset2)
print(lo1, hi1, lo2, hi2)
assert hi1 >= 0
assert hi2 >= 0
m, n = matrix.shape
dm, dn = hi1 - lo1, hi2 - lo2
out_of_bounds = False
pad_left = pad_right = pad_bottom = pad_top = None
if lo1 < 0:
pad_bottom = -lo1
out_of_bounds = True
if lo2 < 0:
pad_left = -lo2
out_of_bounds = True
if hi1 > m:
pad_top = dm - (hi1 - m)
out_of_bounds = True
if hi2 > n:
pad_right = dn - (hi2 - n)
out_of_bounds = True
if out_of_bounds:
i0 = max(lo1, 0)
i1 = min(hi1, m)
j0 = max(lo2, 0)
j1 = min(hi2, n)
snippet = np.full((dm, dn), 0.0)
snippet[pad_bottom:pad_top,
pad_left:pad_right] = np.asarray(matrix[i0:i1, j0:j1]).astype(float)
# print(m,n)
# print(i0, i1, j0, j1)
# print(matrix[i0:i1, j0:j1].toarray().astype(float).shape)
# print(snippet[pad_bottom:pad_top, pad_left:pad_right].shape)
else:
snippet = np.asarray(matrix[lo1:hi1, lo2:hi2]).astype(float)
nan_rows = np.sum(snippet, axis=0) == 0
nan_cols =
|
np.sum(snippet, axis=1)
|
numpy.sum
|
"""
Module containing manifolds of n-dimensional rotations
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import numpy.random as rnd
from scipy.linalg import expm, logm
from scipy.special import comb
from pymanopt.tools.multi import multiprod, multitransp, multisym, multiskew
from pymanopt.manifolds.manifold import Manifold
class Rotations(Manifold):
"""
Returns a manifold structure to optimize over rotation matrices.
manifold = Rotations(n)
manifold = Rotations(n, k)
Special orthogonal group (the manifold of rotations): deals with matrices
X of size k x n x n (or n x n if k = 1, which is the default) such that
each n x n matrix is orthogonal, with determinant 1, i.e.,
dot(X.T, X) = eye(n) if k = 1, or dot(X[i].T, X[i]) = eye(n) if k > 1.
This is a description of SO(n)^k with the induced metric from the
embedding space (R^nxn)^k, i.e., this manifold is a Riemannian
submanifold of (R^nxn)^k endowed with the usual trace inner product.
Tangent vectors are represented in the Lie algebra, i.e., as skew
symmetric matrices. Use the function manifold.tangent2ambient(X, H) to
switch from the Lie algebra representation to the embedding space
representation. This is often necessary when defining
problem.ehess(X, H).
By default, the retraction is only a first-order approximation of the
exponential. To force the use of a second-order approximation, call
manifold.retr = manifold.retr2 after creating M. This switches from a
QR-based computation to an SVD-based computation.
By default, k = 1.
Example. Based on the example found at:
http://www.manopt.org/manifold_documentation_rotations.html
>>> import numpy as np
>>> from pymanopt import Problem
>>> from pymanopt.solvers import TrustRegions
>>> from pymanopt.manifolds import Rotations
Generate the problem data.
>>> n = 3
>>> m = 10
>>> A = np.random.randn(n, m)
>>> B = np.random.randn(n, m)
>>> ABt = np.dot(A,B.T)
Create manifold - SO(n).
>>> manifold = Rotations(n)
Define the cost function.
>>> cost = lambda X : -np.tensordot(X, ABt, axes=X.ndim)
Define and solve the problem.
>>> problem = Problem(manifold=manifold, cost=cost)
>>> solver = TrustRegions()
>>> X = solver.solve(problem)
See also: Stiefel
This file is based on rotationsfactory from Manopt: www.manopt.org
Ported by: <NAME>
Original author: <NAME>, Dec. 30, 2012.
"""
def __init__(self, n, k=1):
if k == 1:
self._name = 'Rotations manifold SO({n})'.format(n=n)
elif k > 1:
self._name = 'Rotations manifold SO({n})^{k}'.format(n=n, k=k)
else:
raise RuntimeError("k must be an integer no less than 1.")
self._n = n
self._k = k
def __str__(self):
return self._name
@property
def dim(self):
return self._k * comb(self._n, 2)
def inner(self, X, U, V):
return np.tensordot(U, V, axes=U.ndim)
def norm(self, X, U):
return la.norm(U)
@property
def typicaldist(self):
return np.pi * np.sqrt(self._n * self._k)
def proj(self, X, H):
return multiskew(multiprod(multitransp(X), H))
def tangent(self, X, H):
return multiskew(H)
def tangent2ambient(self, X, U):
return multiprod(X, U)
egrad2rgrad = proj
def ehess2rhess(self, X, egrad, ehess, H):
Xt = multitransp(X)
Xtegrad = multiprod(Xt, egrad)
symXtegrad = multisym(Xtegrad)
Xtehess = multiprod(Xt, ehess)
return multiskew(Xtehess - multiprod(H, symXtegrad))
def retr(self, X, U):
def retri(Y):
Q, R = la.qr(Y)
return np.dot(Q, np.diag(np.sign(np.sign(np.diag(R)) + 0.5)))
Y = X + multiprod(X, U)
if self._k == 1:
return retri(Y)
else:
for i in range(self._k):
Y[i] = retri(Y[i])
return Y
def retr2(self, X, U):
def retr2i(Y):
U, _, Vt =
|
la.svd(Y)
|
numpy.linalg.svd
|
#!/usr/bin/env python
from scipy import interpolate
import numpy as np
from numpy.lib.recfunctions import append_fields
import scipy.signal as sig
import scipy.stats as st
import time, os
import pandas as pd
import math
#import report_ctd
import ctdcal.report_ctd as report_ctd
import warnings
import ctdcal.fit_ctd as fit_ctd
import datetime
from decimal import Decimal
import settings
import sys
sys.path.append('ctdcal/')
import oxy_fitting
import gsw
warnings.filterwarnings("ignore", 'Mean of empty slice.')
def cast_details(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
'''
We determine the cast details using pandas magic.
First find alternating periods of pumps on and pumps off, then select the
pumps on period with the highest pressure. Get values from the row with the
highest pressure, and return all values to be sent to log.
Input:
stacast - integer, the station and cast, as SSSCC format
log_file - file handle or string, log_file
p_col - string, name of the pressure column
time_col - string, name of the time column
b_lat_col - string, name of the latitude column
b_lon_col - string, name of the longitude column
alt_col - string, name of the altimeter column
inMat - pandas dataframe, the dataframe to come in
Output:
start_cast_time - float, unix epoch seconds?, start of cast time, to be reported to log file
end_cast_time - float, unix epoch seconds?, end of cast time, to be reported to log file
bottom_cast_time - float, unix epoch seconds?, bottom of cast time, to be reported to log file
start_pressure - float, pressure at which cast started, to be reported to log file
max_pressure - float, bottom of the cast pressure, to be reported to log file
b_lat - float, latitude at bottom of cast
b_lon - float, longitude at bottom of cast
b_alti - float, altimeter reading at bottom of cast - volts only!
inMat - the dataframe that came in, with soak period trimmed off
don't need end_cast_time, max_pressure
inMat is trimmed to start and end of cast
'''
df_test = pd.DataFrame.from_records(inMat)
dfs = find_pump_on_off_dfs(df_test)
dfs_1 = find_pumps_on_dfs(dfs)
df_cast = find_max_pressure_df(dfs_1)
df_cast1 = find_last_soak_period(df_cast)
df_cast2 = trim_soak_period_from_df(df_cast1)
start_cast_time = float(df_cast2['scan_datetime'].head(1))
start_pressure = float(df_cast2['CTDPRS'].head(1))
end_cast_time = float(df_cast2['scan_datetime'].tail(1))
max_pressure = float(df_cast2['CTDPRS'].max())
bottom_cast_time = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['scan_datetime'])
b_lat = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLAT'])
b_lon = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['GPSLON'])
b_alti = float(df_cast2.loc[df_cast2['CTDPRS'].idxmax()]['ALT'])
#last two lines must be in to return the same as old - change to slices of df later
report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time,
bottom_cast_time, start_pressure, max_pressure, b_alti,
b_lat, b_lon)
#reconvert to ndarray - might need to be altered to remove second index
# inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()].to_records(index=False)
inMat = df_cast2.loc[:df_cast2['CTDPRS'].idxmax()]
return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
#Move next four functions to a library or class(?) Clean up module
def find_pump_on_off_dfs(df):
'''Find pump_on patterns of dataframes, and return a list(?) of dataframes to iterate over.
'''
return [g for i,g in df.groupby(df['pump_on'].ne(df['pump_on'].shift()).cumsum())]
def find_max_pressure_df(dfs):
'''Giving a list of data frames, return a reference to the frame with which contians the highest pressure value
'''
max_pressure_df = dfs[0]
max_pressure = max_pressure_df['CTDPRS'].max() #TODO make into config var
for df in dfs:
if df['CTDPRS'].max() > max_pressure:
max_pressure_df = df
return max_pressure_df
def find_pumps_on_dfs(dfs):
'''given a list of dataframes, remove all the frames with one or more rows containing a "false" pump on flag
'''
return list(filter(lambda df: df['pump_on'].all(), dfs))
def trim_soak_period_from_df(df):
'''Look for minimum pressure in dataframe, then return everything after minimum pressure/top of cast.
'''
test = int(df.iloc[1:int((len(df)/4))]['CTDPRS'].idxmin())
return df.loc[test:]
def find_last_soak_period(df_cast, surface_pressure=2, time_bin=8, downcast_pressure=50):
"""Find the soak period before the downcast starts.
The algorithm is tuned for repeat hydrography work, specifically US GO-SHIP
parameters. This assumes the soak depth will be somewhere between 10 and 30
meters, the package will sit at the soak depth for at least 20 to 30 seconds
before starting ascent to the surface and descent to target depth.
Parameters
----------
df_cast : DataFrame
DataFrame of the entire cast
surface_pressure : integer
Minimum surface pressure threshold required to look for soak depth.
2 dbar was chosen as an average rosette is roughly 1.5 to 2 meters tall.
time_bin : integer
Time, in whole seconds.
downcast_pressure : integer
Minimum pressure threshold required to assume downcast has started.
50 dbar has been chosen as double the deep soak depth of 20-30 dbar.
Returns
-------
df_cast_ret : DataFrame
DataFrame starting within time_bin seconds of the last soak period.
The algorithm is not guaranteed to catch the exact start of the soak period,
but within a minimum period of time_bin seconds(?) from end of the soak if
the soak period assumption is valid. This should be shorter than the total
soak period time, and able to catch the following rise and descent of the
package that signals the start of the cast.
The algorithm has been designed to handle four general cases of casts:
* A routine cast with pumps turning on in water and normal soak
* A cast where the pumps turn on in air/on deck
* A cast where the pumps turn on and off due to rosette coming out of water
* A cast where there are multiple stops on the downcast to the target depth
"""
#Validate user input
if time_bin <= 0:
raise ValueError('Time bin value should be positive whole seconds.')
if downcast_pressure <=0:
raise ValueError('Starting downcast pressure threshold must be positive integers.')
if downcast_pressure < surface_pressure:
raise ValueError(f'Starting downcast pressure threshold must be greater \
than surface pressure threshold.')
# If pumps have not turned on until in water, return DataFrame
if df_cast.iloc[0]['CTDPRS'] > surface_pressure:
return df_cast
#Bin the data by time, and compute the average rate of descent
df_blah = df_cast.loc[:,:]
df_blah['bin'] = pd.cut(df_blah.loc[:,'index'],
range(df_blah.iloc[0]['index'],df_blah.iloc[-1]['index'],time_bin*24),
labels=False, include_lowest=True)
df_blah2 = df_blah.groupby('bin').mean()
#Compute difference of descent rates and label bins
df_blah2['prs_diff'] = df_blah2['CTDPRS'].diff().fillna(0).round(0)
df_blah2['movement'] = pd.cut(df_blah2['prs_diff'], [-1000,-0.5,0.5,1000], labels=['up','stop','down'])
#Find all periods where the rosette is not moving
df_stop = df_blah2.groupby('movement').get_group('stop')
groupby_test = df_blah2.groupby(df_blah2['movement'].ne(df_blah2['movement'].shift()).cumsum())
list_test = [g for i,g in groupby_test]
#Find a dataframe index of the last soak period before starting descent
def poop(list_obj, downcast_pressure):
""" Return dataframe index in the last soak period before starting
descent to target depth.
"""
for i, x in zip(range(len(list_test)),list_test):
if x['CTDPRS'].max() < downcast_pressure:
if x.max()['movement'] == 'stop':
index = i
if x['CTDPRS'].max() > downcast_pressure:
return index
return index
#Truncate dataframe to new starting index : end of dataframe
start_index = np.around(list_test[poop(list_test, downcast_pressure)].head(1)['index'])
df_cast = df_cast.set_index('index')
df_cast = df_cast.loc[int(start_index):,:]
df_cast_ret = df_cast.reset_index()
return df_cast_ret
#End move four functions
# def cast_details_old(stacast, log_file, p_col, time_col, b_lat_col, b_lon_col, alt_col, inMat=None):
# """cast_details function
#
# Function takes full NUMPY ndarray with predefined dtype array
# and adjusts ndarray to remove all extraneous surface data.
# Function returns cast start time, end time, bottom time and
# cleaned up matrix.
#
# Args:
# param1 (str): stacast, station cast input
# param2 (str): log_file, log file to write cast data.
# param3 (str): p_col, pressure data column name
# param4 (str): time_col, time data column name
# param5 (ndarray): inMat, numpy ndarray with dtype array
#
# Returns:
# Narray: The return value is ndarray with adjusted time of parameter
# specified.
#
# """
#
#
# if inMat is None:
# print("In cast_details: No data")
# return
# else:
# # Top of cast time, bottom of cast time, end of cast time,
# start_cast_time = 0.0
# bottom_cast_time = 0.0
# end_cast_time = 0.0
# # Test cycle time constant
# fl = 24
# # starting P
# start_pressure = 2.0
# # Max P
# max_pressure = 10000.0
# lm = len(inMat)-1
# rev = np.arange(int(lm/4),0,-1)
#
# # Find starting top of cast
# # Smallest P from reverse array search
# for i in rev:
# if start_pressure < inMat[p_col][i]:
# tmp = i
# elif start_pressure > inMat[p_col][i]:
# start_pressure = inMat[p_col][i]
# tmp = abs(i - 24) #patched to not break through the c(sea)-floor, can be made cleaner
# break
# start_cast_time = inMat[time_col][tmp]
#
# # Remove everything before cast start
# inMat = inMat[tmp:]
#
# # Max P and bottom time
# max_pressure = max(inMat[p_col])
# tmp = np.argmax((inMat[p_col]))
# bottom_cast_time = inMat[time_col][tmp]
# b_lat = inMat[b_lat_col][tmp]
# b_lon = inMat[b_lon_col][tmp]
# b_alti = inMat[alt_col][tmp]
#
# tmp = len(inMat)
# # Find ending top of cast time
# for i in range(int(tmp/2),tmp):
# if start_pressure > inMat[p_col][i]:
# end_cast_time = inMat[time_col][i]
# if i < tmp: tmp = i + 24
# break
#
# # Remove everything after cast end
# inMat = inMat[:tmp]
#
# report_ctd.report_cast_details(stacast, log_file, start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_alti, b_lat, b_lon)
#
# return start_cast_time, end_cast_time, bottom_cast_time, start_pressure, max_pressure, b_lat, b_lon, b_alti, inMat
def ctd_align(inMat=None, col=None, time=0.0):
"""ctd_align function
Function takes full NUMPY ndarray with predefined dtype array
and adjusts time of sensor responce and water flow relative to
the time frame of temperature sensor.
Args:
param1 (ndarray): inMat, numpy ndarray with dtype array
param2 (float): col, column to apply time advance to.
param3 (float): time, advance in seconds to apply to raw data.
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
# Num of frames per second.
fl = 24
if (inMat is not None) & (col is not None) & ( time > 0.0):
# Time to advance
advnc = int(fl * time)
tmp = np.arange(advnc, dtype=np.float)
last = inMat[col][len(inMat)-1]
tmp.fill(float(last))
inMat[col] = np.concatenate((inMat[col][advnc:],tmp))
return inMat
def ctd_quality_codes(column=None, p_range=None, qual_code=None, oxy_fit=False, p_qual_col=None, qual_one=None, inMat=None):
"""ctd_quality_codes function
Function takes full NUMPY ndarray with predefined dtype array
Args:
param1 (ndarray):
param2 (float):
Returns:
Narray: The return value is ndarray with adjusted time of parameter
specified.
"""
#If p_range set apply qual codes to part of array and return
if p_range is not None:
print("Some algoirythm for formatting qual codes per pressure range")
return
else:
q_df = pd.DataFrame(index=np.arange(len(inMat)), columns=p_qual_col)
for pq in p_qual_col:
if pq in list(qual_one):
q_df[pq] = q_df[pq].fillna(1)
elif oxy_fit and pq is column:
q_df[pq] = q_df[pq].fillna(2)
else:
q_df[pq] = q_df[pq].fillna(2)
q_nd = q_df.as_matrix(columns=q_df.columns)
return q_nd
def formatTimeEpoc(time_zone='UTC', time_pattern='%Y-%m-%d %H:%M:%S', input_time = None):
"""formatTimeEpoc function
Function takes pattern of time input, relative time zone, and
date time data array and returns array of epoc time.
title and the second row are the units for each column.
Args:
param1 (str): relative time zone for data.
param2 (str): pattern of incoming data.
param3 (ndarray): input_time, numpy 1d ndarray time array
Returns:
1D ndarray: The return array of epoch time
"""
if input_time is None:
print("In formatTimeEpoc: No data entered.")
return
else:
os.environ['TZ'] = 'UTC'
epoch_time = input_time
for i in range(0,len(input_time)):
epoch_time[i] = int(time.mktime(time.strptime(str(input_time[i], "utf-8"), time_pattern)))
return epoch_time
def dataToDataFrame(inFile):
"""dataToDataFrame function
Function takes full file path to csv type data file and returns a
PANDAS dataframe for data treatment with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): Full path to data file.
Returns:
DataFrame: The return value is a full dataframe with header.
.. REF PAGE:
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv
"""
#df = pd.read_csv(inFile, header=[0,2])
df = pd.read_csv(inFile)
return df
def dataToNDarray(inFile, dtype=None, names=None, separator=',', skip=None):
"""dataToNDarray function
Function takes full file path to csv type data file and returns NUMPY
ndarray type ndarray for data manipulation with a two row header.
Data file should have a two row header. The first row being the column
title and the second row are the units for each column.
Args:
param1 (str): inFile, full path to csv file
param2 (arr): dtype list
param3 (str): separator, default comma ','
Returns:
Narray: The return value is a full data ndarray with two row header.
Reference Page:
https://scipy.github.io/old-wiki/pages/Cookbook/InputOutput.html
"""
try:
return pd.read_pickle(inFile).to_records()
except:
if skip is None:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names)
else:
arr = np.genfromtxt(inFile, delimiter=separator, dtype=dtype, names=names, skip_header=skip)
return arr
def hysteresis_correction(H1=-0.033, H2=5000, H3=1450, inMat = None):
"""Hysteresis Correction function
Function takes data ndarray and hysteresis coefficiants
and returns hysteresis corrected oxygen data.
Args:
param1 (float): H1, hysteresis correction coefficiant 1
param2 (float): H2, hysteresis correction coefficiant 2
param3 (float): H3, hysteresis correction coefficiant 3
param5 (array): inMat, raw ctd data.
Returns:
array: Return dissolved oxygen hysteresis corrected data.
.. REF PAGE:
http://http://www.seabird.com/document/an64-3-sbe-43-dissolved-oxygen-do-sensor-hysteresis-corrections
"""
Oxnewconc = np.arange(0,len(inMat),1)
Oxnewconc[0] = inMat['o1_mll'][1]
if inMat is None:
print("Hysteresis Correction function: No data")
return
else:
for i in range(1,len(inMat)-1):
D = 1 + H1 * (math.exp(inMat['p_dbar'][i] / H2) - 1)
C = math.exp(-1 * 0.04167/ H3)
Oxnewconc[i] = ((inMat['o1_mll'][i] + (Oxnewconc[i-1] * C * D)) - (inMat['o1_mll'][i-1] * C)) / D
inMat['o1_mll'][:] = Oxnewconc[:]
return inMat
def data_interpolater(inArr):
"""data_interpolater to handle indices and logical indices of NaNs.
Input:
- inArr, 1d numpy array with return True np.isnans()
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
- interpolated array
Example:
>>> # linear interpolation of NaNs
>>> outArray = data_interpolater(inArr)
"""
nans, tmp= np.isnan(inArr), lambda z: z.nonzero()[0]
inArr[nans] = np.interp(tmp(nans), tmp(~nans), inArr[~nans])
return inArr
def o2pl2pkg(p_col, t_col, sal_col, dopl_col, dopkg_col, lat_col, lon_col, inMat):
"""o2pl2pkg convert ml/l dissolved oxygen to umol/kg
Input:
- t_col, temperature column header deg c.
- sal_col, salinity column header psu.
- dopl_col, dissolved column header ml/l.
- dopkg_col, dissolved column header umol/kg
- lat_col, latitude for entire cast deg.
- lon_col, longitude for entire cast deg.
- inMat, dtype ndarray processed ctd time data.
Output:
- Converted Oxygen column umol/kg
Example:
>>> # linear interpolation of NaNs
>>> outArray = o2pl2kg(inArr)
"""
pkg = np.ndarray(shape=len(inMat), dtype=[(dopkg_col, np.float)])
# Absolute sailinity from Practical salinity.
SA = gsw.SA_from_SP(inMat[sal_col], inMat[p_col], inMat[lat_col], inMat[lon_col])
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, inMat[t_col], inMat[p_col])
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
# Convert DO ml/l to umol/kg
for i in range(0,len(inMat[dopl_col])):
pkg[i] = inMat[dopl_col][i] * 44660 / (s0[i] + 1000)
return pkg
def oxy_to_umolkg(df_sal, df_pressure, df_lat, df_lon, df_temp, df_oxy):
'''Rewritten from Courtney's method to use array-likes (aka use dataframes and ndarrays).
'''
# Absolute salinity from Practical salinity.
SA = gsw.SA_from_SP(df_sal, df_pressure, df_lat, df_lon)
# Conservative temperature from insitu temperature.
CT = gsw.CT_from_t(SA, df_temp, df_pressure)
s0 = gsw.sigma0(SA, CT) # Potential density from Absolute Salinity g/Kg Conservative temperature deg C.
series = df_oxy * 44660 / (s0 + 1000)
return series
def raw_ctd_filter(input_array=None, filter_type='triangle', win_size=24, parameters=None):
"""raw_ctd_filter function
Function takes NUMPY array
of raw ctd data and returns filtered data. This function also needs
one of three filter types (boxcar, gaussian, triangle) as well as
window size.
Args:
param1 (ndarray): Numpy ndarray with predefined header with at
param2 (str): One of three tested filter types
boxcar, gaussian_std, triangle.
default is triangle
param3 (int): A window size for the filter. Default is 24, which
is the number of frames per second from a SBE9+/11 CTD/Dech unit.
param4 (ndarray): parameters the dtype names used in filtering the
analytical inputs.
Returns:
Narray: The return value is a matrix of filtered ctd data with
the above listed header values.
"""
if input_array is None:
print("In raw_ctd_filter: No data array.")
return
else:
return_array = input_array
if parameters is None:
print("In raw_ctd_filter: Empty parameter list.")
else:
for p in parameters:
if filter_type is 'boxcar':
win = sig.boxcar(win_size)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/len(win)
elif filter_type is 'gaussian':
sigma = np.std(arr)
win = sig.general_gaussian(win_size, 1.0, sigma)
return_array[str(p)] = sig.convolve(input_array[str(p)], win, mode='same')/(len(win))
elif filter_type is 'triangle':
win = sig.triang(win_size)
return_array[p] = 2*sig.convolve(input_array[p], win, mode='same')/len(win)
return return_array
def ondeck_pressure(stacast, p_col, c1_col, c2_col, time_col, inMat=None, conductivity_startup=20.0, log_file=None):
"""ondeck_pressure function
Function takes full NUMPY ndarray with predefined dtype array
of filtered ctd raw data the stores, analizes and removes ondeck
values from data.
Args:
param1 (str): stacast, station cast info
param1 (str): p_col, pressure data column name
param2 (str): c1_col, cond1 data column name
param3 (str): c2_col, cond2 data column name
param4 (str): time_col, time data column name
param5 (ndarray): numpy ndarray with dtype array
param6 (float): conductivity_startup, threshold value
param7 (str): log_file, log file name
Returns:
Narray: The return ndarray with ondeck data removed.
Also output start/end ondeck pressure.
"""
start_pressure = []
tmpMat = []
outMat = []
tmp = 0
start_p = 0.0
n = 0
ep = []
end_p = 0.0
# Frequency
fl = 24
fl2 = fl*2
# One minute
mt = 60
# Half minute
ms = 30
time_delay = fl*ms
if inMat is None:
print("Ondeck_pressure function: No data.")
return
else:
# Searches first quarter of matrix, uses conductivity
# threshold min to capture startup pressure
for j in range(0,int(len(inMat)/4)):
if ((inMat[c1_col][j] < conductivity_startup) and (inMat[c2_col][j] < conductivity_startup)):
tmp = j
start_pressure.append(inMat[p_col][j])
# Evaluate starting pressures
if not start_pressure: start_p = "Started in Water"
else:
n = len(start_pressure)
if (n > time_delay): start_p = np.average(start_pressure[fl2:n-(time_delay)])
else: start_p = np.average(start_pressure[fl2:n])
# Remove on-deck startup
inMat = inMat[tmp:]
tmp = len(inMat);
# Searches last half of NDarray for conductivity threshold
if len(inMat) % 2 == 0:
inMat_2 = inMat.copy()
else:
inMat_2 = inMat.iloc[1:].copy()
inMat_half1, inMat_half2 =
|
np.split(inMat_2,2)
|
numpy.split
|
'''This module provices an abstract class whose purpose is fitting a 1D curve
using several different methods (polynomial, splines, Gaussian processes),
but providing a consistent API for evalutation and computing derivatives,
extrama, etc.'''
from __future__ import print_function
import numpy as num
from snpy.utils import fit_spline
from scipy.interpolate import splrep,splev,sproot
from scipy.optimize import brentq, newton
from scipy.misc import derivative as deriv
import six
try:
from snpy.spline2 import spline2, evalsp,eval_extrema,eval_x
except:
spline2 = None
try:
from numpy import polynomial
except:
polynomial = None
# Both pymc and sklearn take a long time to import, so we'll start doing it
# on-demand. We'll check for their existence, but that's it.
gp = None
if six.PY2:
import pkgutil
loader = pkgutil.find_loader('pymc')
if loader is not None:
gp = 'pymc'
else:
loader = pkgutil.find_loader('sklearn')
if loader is not None:
gp = 'sklearn'
else:
import importlib
spec = importlib.util.find_spec('pymc')
if spec is not None:
gp = 'pymc'
else:
spec = importlib.util.find_spec('sklearn')
if spec is not None:
gp = 'sklearn'
# try:
# import pymc
# from pymc import gp as GP
# import os
# if 'OMP_NUM_THREADS' not in os.environ:
# os.environ['OMP_NUM_THREADS'] = '1'
# gp = 'pymc'
# except:
# try:
# from sklearn.gaussian_process import GaussianProcessRegressor
# from sklearn.gaussian_process.kernels import Matern
# gp = 'sklearn'
# except:
# pass
try:
from . import InteractiveFit
except:
InteractiveFit = None
functions = {}
def regularize(x, y, ey):
'''Given x,y,dy data, make sure the data is strictly
monatonic with respect to x and elimitate repeated values.
Args:
x (float array): input independent variable
y (float array): input dependent variable
ey (float array): input error in dependent variable
Returns:
3-tuple: (x,y,ey)
output values of x,y,ey, where duplicate x are averaged and
x is strictly monotonically increasing.'''
# x-values need to be strictly ascending.
if x.shape[0] < 2:
return x,y,ey
sids = num.argsort(x)
x = x[sids]
y = y[sids]
ey = ey[sids]
# here's some Numeric magic. first, find where we have repeating x-values
Nmatrix = num.equal(x[:,num.newaxis], x[num.newaxis,:])
val_matrix = y[:,num.newaxis]*Nmatrix
e_matrix = ey[:,num.newaxis]*Nmatrix
average = num.sum(val_matrix, axis=0)/sum(Nmatrix)
e_average = num.sum(e_matrix, axis=0)/sum(Nmatrix)
# at this point, average is the original data, but with repeating data points
# replaced with their average. Now, we just pick out the unique x's and
# the first of any repeating points:
gids = num.concatenate([[True], num.greater(x[1:] - x[:-1], 0.)])
x = x[gids]
y = average[gids]
ey = e_average[gids]
return x,y,ey
class oneDcurve:
'''Base class for 1D interpolators. Each subclass inherits the basic
structure defined below, but is responstible for implementing the
different methods.'''
num_real_keep = 100
def __init__(self, x, y, ey, mask=None, **args):
'''Instantiate a new interpolator.
Args:
x (float array): independent variable
y (float array): dependent variable
ey (float array): error in dependent variable
mask (bool array): True where data is good, False to omit from fit
args (optional args): extra arguments are handled by each subclass
'''
x = num.atleast_1d(x)
y = num.atleast_1d(y)
ey = num.atleast_1d(ey)
if not len(x.shape) == 1:
raise ValueError("x, y, ey must be 1D arrays")
if not x.shape == y.shape:
raise ValueError("x, y must have same shape")
if not y.shape == ey.shape:
raise ValueError("y, ey must have same shape")
self.xdata = x
self.ydata = y
self.eydata = ey
self.vardata = num.power(ey,2)
if mask is None:
self.mask = num.ones(x.shape, dtype=bool) # mask for the data
else:
self.mask = mask
if len(self.xdata[self.mask]) < 2:
raise ValueError("(masked) data must have more than one point!")
self.realization = None
self.realizations = []
self.pars = {}
self.setup = False
self.ifit = None
def __getattr__(self, key):
if 'pars' in self.__dict__:
if key in self.pars:
return self.pars[key]
if key == 'x':
return self.xdata[self.mask]
if key == 'y':
return self.ydata[self.mask]
if key == 'ey':
return self.eydata[self.mask]
if key == 'var':
return self.vardata[self.mask]
raise AttributeError("Instance has not attribute %s" % (key))
def __setattr__(self, key, value):
if 'pars' in self.__dict__:
if key in self.__dict__['pars']:
self.__dict__['pars'][key] = value
self.__dict__['setup'] = False
if self.ifit is not None:
self.ifit.redraw()
return
else:
self.__dict__[key] = value
return
self.__dict__[key] = value
def _regularize(self):
'''Given a data set, we make sure that the independent variable
is strinctly increasing, elminiating repeated values by an
average.'''
# x-values need to be strictly ascending.
sids = num.argsort(self.x)
x = self.x[sids]
y = self.y[sids]
ey = self.ey[sids]
# here's some Numeric magic. first, find where we have repeating x-values
Nmatrix = num.equal(x[:,num.newaxis], x[num.newaxis,:])
val_matrix = y[:,num.newaxis]*Nmatrix
e_matrix = ey[:,num.newaxis]*Nmatrix
average = num.sum(val_matrix, axis=0)/sum(Nmatrix)
e_average = num.sum(e_matrix, axis=0)/sum(Nmatrix)
# at this point, average is the original data, but with repeating data points
# replaced with their average. Now, we just pick out the unique x's and
# the first of any repeating points:
gids = num.concatenate([[True], num.greater(x[1:] - x[:-1], 0.)])
x = x[gids]
y = average[gids]
ey = e_average[gids]
return x,y,ey
def maskpoint(self, x, y):
'''Mask the point closest to (x,y).
Args:
x (float): (x,y) point to mask out
y (float): (x,y) point to mask out
Returns:
None
Effects:
self.mask is updated
'''
id = num.argmin(num.power(self.x-x,2) + num.power(self.y-y,2))
self.mask[id] = False
self.setup = False
def maskid(self, i):
'''Mask the point based on index.
Args:
i (int): index of point to mask.
Returns:
None
Effects:
self.mask is updated
'''
self.mask[id] = False
self.setup = False
def maskresids(self, absclip=None, sigclip=None):
'''Mask data outside a range of residuals.
Args:
absclip (float): mask out data with residuals > absclip
sigclip (float): mask out data with residuals > sigclip*sigma
Returns:
None
Effects:
self.mask is updated
'''
absdev = num.aboslute(self.residuals())
if absclip is not None:
self.mask *= num.greater(absdev, absclip)
self.setup = False
elif sigclip is not None:
sigma = 1.49*num.median(absdev)
self.mask *= num.greater(absdev, sigclip*sigma)
self.setup = False
def __call__(self, x):
'''Return the interpolation at point(s) x.
Args:
x (float array or scalar): Location at which to compute interpolant
Returns:
2-tuple: (y, mask)
- y (float array or scalar): interpolant. type matches input x
- mask (float array or scalar): False indicates extrapolation
'''
raise NotImplementedError('Derived class must overide')
def error(self, x, N=50):
'''Estimate the error in the interpolant at the point x.
Args:
x (float array or scalar): location at which to compute error
N (int): If bootstrap is required, number of iterations.
Returns:
float array or scalar: the error (type matches input x)
'''
scalar = (len(num.shape(x)) == 0)
x = num.atleast_1d(x)
if len(self.realizations) < N:
for i in range(N-len(self.realizations)):
self.draw()
self.reset_mean()
earr = []
for i in range(N):
self.realization = self.realizations[i]
earr.append(self.__call__(x)[0])
self.realization = None
earr = num.array(earr)
err = num.std(earr, axis=0)
if scalar:
return err[0]
else:
return err
def draw(self):
'''Generate a Monte Carlo realization of the data. Interpolator
will now give values based on this realization.'''
raise NotImplementedError('Derived class must overide')
def reset_mean(self):
'''Reset to the original data after using draw()'''
raise NotImplementedError('Derived class must overide')
def residuals(self, mask=True):
'''Compute the residuals (data - model) for current parameters.
Args:
mask (bool): If True, omit masked values from residuals
Returns:
float array: residuals
'''
if mask:
return self.y - self.__call__(self.x)[0]
else:
return self.ydata - self.__call__(self.xdata)[0]
def rms(self):
'''Returns RMS of residuals for current parameters.'''
return num.sqrt(num.mean(num.power(self.residuals(),2)))
def chisquare(self):
'''Returns the chi-square statistic for current parameters.'''
return num.sum(num.power(self.residuals(),2)*num.power(self.var,-1))
def rchisquare(self):
'''Returns the reduced chi-square statistic for current parameters.'''
raise NotImplementedError('Derived class must overide')
def DW(self):
'''Returns the Durvin-Watson statistic for current parameters.'''
r = self.residuals()
return num.sum(num.power(r[1:] - r[:-1],2))/num.sum(num.power(r,2))
def deriv(self, x, n=1):
'''Returns the nth derivative of the function at x.
Args:
x (float array or scalar): location at which to compute derivative
n (int): order of the derivative to compute
Returns:
float array or scalar: derivative at x (type matches input x)
'''
raise NotImplementedError('Derived class must overide')
def find_extrema(self, xmin=None, xmax=None):
'''Find the position and values of the maxima/minima.
Args:
xmin (float): only consider extrema on interval (xmin,xmax)
xmax (float): only consider extrema on interval (xmin,xmax)
Returns:
3-tuple: (roots,vals,curvs)
- roots (float array or None): locations where derivative is zero
or None if no extrma on interval
- vals (float array or None): interpolant at roots
- curvs (float array or None): sign of curvature at roots. -1 if
concave down (maximum), +1 if concave up (minimum)
'''
raise NotImplementedError('Derived class must overide')
def intercept(self, y):
'''Find the value of x for which the interpolator goes through y
Args:
y (float): value of y for which we wish to know x
Returns:
float array or None: value(s) of x at y or None if function does
not cross y on domain
'''
raise NotImplementedError('Derived class must overide')
def domain(self):
'''Return the valid domain for this model
Returns:
2-tuple: (xmin, xmax): the domain of the function.
'''
def interact(self):
'''If we have the InteractiveFit module, spawn an interactive fitter.
Returns:
InteractiveFit.InteractiveFit instance or None
'''
if InteractiveFit is not None:
return InteractiveFit.InteractiveFit(self)
else:
print("Sorry, you need to have matplotlib installed to use this feature")
return None
def help(self):
'''Provide a help string.'''
raise NotImplementedError('Derived class must overide')
polytypes = {'polynomial':polynomial.Polynomial,
'chebyshev':polynomial.Chebyshev,
'laguerre':polynomial.Laguerre,
'hermite':polynomial.Hermite,
'hermiteE':polynomial.HermiteE}
if polynomial is not None:
class Polynomial(oneDcurve):
def __init__(self, x, y, dy, mask=None, **args):
'''Fit an Nth order polynomial to the data. The only arguments are
[n], the order, [x0] the zero-point, xmin and xmax the lower and
upper limits of the fit, respectively.'''
oneDcurve.__init__(self, x, y, dy, mask)
self.pars = {
'n':3,
'type':'poly',
'xmin':None,
'xmax':None}
for key in args:
if key not in self.pars:
raise TypeError("%s is an invalid keyword argument for this method" % key)
self.pars[key] = args[key]
if self.xmin is None:
self.xmin = self.x.min()
if self.xmax is None:
self.xmax = self.x.max()
# no need to regularize
self._setup()
self.realization = None
def help(self):
print('n: order of the polynomial')
print('xmin: lower bound on data to interpolate')
print('xmax: upper bound on data to interpolate')
def __str__(self):
return self.type + " polynomial"
def _setup(self):
'''Given the current set of params, setup the interpolator.'''
mask = self.mask*num.greater_equal(self.xdata, self.xmin)*\
num.less_equal(self.xdata, self.xmax)
if self.type not in polytypes:
raise ValueError("Error: the polynomial type must be one of " +\
",".join(list(polytypes.keys())))
self.poly = polytypes[self.type].fit(self.xdata[mask], self.ydata[mask],
deg=self.n, w=num.power(self.eydata[mask],-1))
self.setup = True
self.realization = None
def __call__(self, x):
'''Interpolate at point [x]. Returns a 3-tuple: (y, mask) where [y]
is the interpolated point, and [mask] is a boolean array with the same
shape as [x] and is True where interpolated and False where
extrapolated'''
if not self.setup: self._setup()
if self.realization is not None:
res = self.realization(x)
else:
res = self.poly(x)
return res, num.greater_equal(x, self.poly.domain[0])*\
num.less_equal(x, self.poly.domain[1])
def draw(self):
'''Generate a random realization of the spline, based on the data.'''
y_draw = num.random.normal(self.y, self.ey)
self.realizations.append(\
polytypes[self.type].fit(self.x, y_draw, deg=self.n,
w=num.power(self.ey,-1)))
if len(self.realizations) > self.num_real_keep:
self.realizations = self.realizations[1:]
self.realization = self.realizations[-1]
def reset_mean(self):
self.realization = None
def rchisquare(self):
chisq = self.chisquare()
return chisq/(len(self.x) - 1 - len(self.poly.coef))
def deriv(self, x, n=1):
'''Returns the nth derivative of the function at x.'''
if not self.setup: self._setup()
dpoly = self.poly.deriv(m=n)
return dpoly(x)
def domain(self):
'''Returns the valid domain of the polynomial.'''
if not self.setup: self._setup()
dom = self.poly.domain
return (dom[0],dom[1])
def find_extrema(self, xmin=None, xmax=None):
'''Find the position and values of the maxima/minima. Returns a tuple:
(roots,vals,ypps) where roots are the x-values where the extrema
occur, vals are the y-values at these points, and ypps are the
2nd derivatives. optionally, restrict roots to between xmin,
and xmax'''
if self.realization is not None:
poly = self.realization
else:
poly = self.poly
if xmin is None: xmin = self.poly.domain[0]
if xmax is None: xmax = self.poly.domain[1]
if not self.setup: self._setup()
d1 = poly.deriv(m=1)
d2 = poly.deriv(m=2)
roots = d1.roots()
# Roots can be complex. Want only the real ones
gids = num.iscomplex(roots)
roots = num.real(roots[num.logical_not(gids)])
gids = num.greater_equal(roots, xmin)*num.less_equal(roots, xmax)
roots = roots[gids]
if len(roots) == 0:
return num.array([]), num.array([]), num.array([])
vals = self.__call__(roots)
curvs = d2(roots)
curvs = num.where(curvs < 0, -1, curvs)
curvs = num.where(curvs > 0, 1, curvs)
return roots,vals[0],curvs
def intercept(self, y):
'''Find the value of x for which the interpolator goes through [y]'''
if self.realization is not None:
poly = self.realization - y
else:
poly = self.poly - y
# Roots can be complex. Want only real ones
roots = poly.roots()
gids = num.isreal(roots)
roots = num.real(roots[gids])
gids = num.greater_equal(roots, self.poly.domain[0])*\
num.less_equal(roots, self.poly.domain[1])
roots = roots[gids]
if len(roots) == 0:
return None
else:
return roots
for t in polytypes:
if t == 'polynomial':
functions[t] = (Polynomial, "Nth order simple polynomial (numpy.Polynomial)")
else:
functions[t] = (Polynomial, "Nth order %s polynomial (numpy.Polynomial)" % t)
if spline2 is not None:
class HyperSpline(oneDcurve):
def __init__(self, x, y, dy, mask=None, **args):
'''Fit a spline2 (Thijsse) to the data. [args] can be any argument
recognized by spline2.spline2()'''
oneDcurve.__init__(self, x, y, dy, mask)
self.pars = {
'xrange':None,
'degree':3,
'acfsearch':0,
'acffunc':'exp',
'ksi':None,
'n':None,
'allownonopt':1,
'lopt':None,
'rejlev':0.05,
'xlog':0}
for key in args:
if key not in self.pars:
raise TypeError("%s is an invalid keyword argument for this method" % key)
self.pars[key] = args[key]
# Make sure the data conform to the spine requirements
#self._regularize()
self._setup()
self.realization = None
def __str__(self):
return "Hyperspline"
def help(self):
print("xrange: tuple of (xmin,xmax) over which to fit")
print("lopt: Force knot optimization to start with lopt knots")
print("degree: order of the spline (default 3)")
print("xlog: 0/1, apply log10() to the x values before fitting?")
print("rejlev: Use rejection level on statistical tests of rejlev")
print("allownonopt: 0/1 Allow splines with non-optimized breakpoints?")
print("acfsearch: 0/1, weather to search for auto-correlation")
print("acffunc: functional form of autocorrelation (default exp)")
print("ksi: specify auto-correlation length")
print("n: only search for autocorrelation on index scale n")
print()
print("see help(spline2) for info on these parameters")
def _setup(self):
'''Given the current set of params, setup the interpolator.'''
x,y,ey = self._regularize()
self.tck = spline2(x, y, w=1.0/ey, **self.pars)
self.setup = True
self.realization = None
def __call__(self, x):
'''Interpolate at point [x]. Returns a 3-tuple: (y, mask) where [y]
is the interpolated point, and [mask] is a boolean array with the same
shape as [x] and is True where interpolated and False where extrapolated'''
if not self.setup:
self._setup()
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
if self.realization:
evm = num.atleast_1d(evalsp(x, self.realization))
mask = num.greater_equal(x, self.realization[0][0])*\
num.less_equal(x,self.realization[0][-1])
else:
evm = num.atleast_1d(evalsp(x, self.tck))
mask = num.greater_equal(x, self.tck[0][0])*num.less_equal(x,self.tck[0][-1])
if scalar:
return evm[0],mask[0]
else:
return evm,mask
def domain(self):
return (self.tck[0][0], self.tck[0][-1])
def draw(self):
'''Generate a random realization of the spline, based on the data.'''
y_draw = num.random.normal(self.y, self.ey)
self.realizations.append(\
spline2(self.x, y_draw, w=1.0/self.ey, **self.pars))
if len(self.realizations) > self.num_real_keep:
self.realizations = self.realizations[1:]
self.realization = self.realizations[-1]
def reset_mean(self):
self.realization = None
def rchisquare(self):
chisq = self.chisquare()
return chisq/(len(self.x) - len(self.tck[0]) - 1)
def deriv(self, x, n=1):
'''Returns the nth derivative of the function at x.'''
if self.realization:
tck = self.realization
else:
tck = self.tck
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
if self.realization:
evm = num.atleast_1d(evalsp(x, self.realization, deriv=n))
else:
evm = num.atleast_1d(evalsp(x, self.tck, deriv=n))
if scalar:
return evm[0]
else:
return evm
def find_extrema(self, xmin=None, xmax=None):
'''Find the position and values of the maxima/minima. Returns a tuple:
(roots,vals,ypps) where roots are the x-values where the extrema
occur, vals are the y-values at these points, and ypps are the
2nd derivatives. Optionally specify the range over which maxima
are valid.'''
if self.realization:
vals = eval_extrema(self.realization)
else:
vals = eval_extrema(self.tck)
gids = num.ones(vals[0].shape, dtype=num.bool)
if xmin is not None:
gids = gids*num.greater_equal(vals[0],xmin)
if xmax is not None:
gids = gids*num.less_equal(vals[0],xmax)
return (vals[0][gids], vals[1][gids], vals[2][gids])
def intercept(self, y):
'''Find the value of x for which the interpolator goes through [y]'''
if self.realization:
return eval_x(y, self.realization)
else:
return eval_x(y, self.tck)
functions['hyperspline'] = (HyperSpline, "B. Thijsse style spline (snpy.spline2)")
functions['spline2'] = (HyperSpline, "B. Thijsse style spline (snpy.spline2)")
class Spline(oneDcurve):
def __init__(self, x, y, dy, mask=None, **args):
'''Fit a scipy (Dierkx) spline to the data. [args] can be any argument
recognized by scipy.interpolate.splrep.'''
oneDcurve.__init__(self, x, y, dy, mask)
self.pars = {
't':None,
'k':3,
's':None,
'xb':None,
'xe':None,
'task':0}
for key in args:
if key not in self.pars:
raise TypeError("%s is an invalid keyword argument for this method" % key)
self.pars[key] = args[key]
# Make sure the data conform to the spine requirements
#self._regularize()
self._setup()
self.realization = None
def __str__(self):
return "Spline"
def help(self):
print("k: order of the spline (default 3)")
print("task: 0,1,-1 (see scipy.interpolate.splrep)")
print("s: Smoothing length")
print("t: specify array of knots (task=-1)")
print("xb lower bound on x for fitting")
print("xe upper bound on x for fitting")
def _regularize(self):
# x-values need to be strictly ascending.
sids = num.argsort(self.x)
self.x = self.x[sids]
self.y = self.y[sids]
self.ey = self.ey[sids]
# here's some Numeric magic. first, find where we have repeating x-values
Nmatrix = num.equal(self.x[:,num.newaxis], self.x[num.newaxis,:])
val_matrix = self.y[:,num.newaxis]*Nmatrix
e_matrix = self.ey[:,num.newaxis]*Nmatrix
average = num.sum(val_matrix, axis=0)/sum(Nmatrix)
e_average = num.sum(e_matrix, axis=0)/sum(Nmatrix)
# at this point, average is the original data, but with repeating data points
# replaced with their average. Now, we just pick out the unique x's and
# the first of any repeating points:
gids = num.concatenate([[True], num.greater(self.x[1:] - self.x[:-1], 0.)])
self.x = self.x[gids]
self.y = average[gids]
self.ey = e_average[gids]
def _setup(self):
'''Given the current set of params, setup the interpolator.'''
self.tck = splrep(self.x, self.y, 1.0/self.ey, **self.pars)
# Check for NaN's in the tck.
if num.sometrue(num.isnan(self.tck[1])):
raise ValueError("The Spline is invalid. It is possible the data are too noisy, or smoothing is too low. Try increasing 's' or fixing your errors")
self.setup = True
self.realization = None
def add_knot(self, x):
'''Add a knot point at x. The task parameter is automatically set
to -1 as a result.'''
if x <= self.tck[0][3] or x >= self.tck[0][-3]:
return
old_knots = self.tck[0][4:-4]
knots = num.concatenate([[x],old_knots])
knots = num.sort(knots)
self.pars['task'] = -1
self.pars['t'] = knots
try:
self._setup()
except:
print("Adding knot failed, reverting to old knots")
self.pars['t'] = old_knots
self.setup = False
return
def delete_knot(self, x):
'''Delete knot closest to x. The task parameter is automatically set
to -1 as a result.'''
old_knots = self.tck[0][4:-4]
ds = num.absolute(old_knots - x)
id = num.argmin(ds)
l = old_knots.tolist()
del l[id]
self.pars['task'] = -1
self.pars['t'] = num.array(l)
try:
self._setup()
except:
print("Deleting knot failed, reverting to old knots")
self.pars['t'] = old_knots
self.setup = False
return
def move_knot(self, x, xnew):
'''Move knot closest to x to new location xnew. The task parameter is
automatically set to -1 as a result.'''
old_knots = self.tck[0][4:-4]
ds = num.absolute(old_knots - x)
id = num.argmin(ds)
self.pars['task'] = -1
self.pars['t'] = old_knots*1
self.pars['t'][id] = xnew
self.pars['t'] = num.sort(self.pars['t'])
try:
self._setup()
except:
self.pars['t'] = old_knots
self.setup = False
def __call__(self, x):
'''Interpolate at point [x]. Returns a 3-tuple: (y, mask) where [y]
is the interpolated point, and [mask] is a boolean array with the same
shape as [x] and is True where interpolated and False where extrapolated'''
if not self.setup:
self._setup()
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
if self.realization:
evm = num.atleast_1d(splev(x, self.realization))
mask = num.greater_equal(x, self.realization[0][0])*\
num.less_equal(x,self.realization[0][-1])
else:
evm = num.atleast_1d(splev(x, self.tck))
mask = num.greater_equal(x, self.tck[0][0])*num.less_equal(x,self.tck[0][-1])
if scalar:
return evm[0],mask[0]
else:
return evm,mask
def domain(self):
return (self.tck[0][0], self.tck[0][-1])
def draw(self):
'''Generate a random realization of the spline, based on the data.'''
k = self.tck[2]
y_draw = num.random.normal(self.y, self.ey)
args = self.pars.copy()
args['task'] = -1
args['t'] = self.tck[0][k+1:-(k+1)]
self.realizations.append(splrep(self.x, y_draw, self.ey, **args))
if len(self.realizations) > self.num_real_keep:
self.realizations = self.realizations[1:]
self.realization = self.realizations[-1]
def reset_mean(self):
self.realization = None
def rchisquare(self):
chisq = self.chisquare()
return chisq/(len(self.x) - len(self.tck[0]) - 1)
def deriv(self, x, n=1):
'''Returns the nth derivative of the function at x.'''
if self.realization:
tck = self.realization
else:
tck = self.tck
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
if self.realization:
evm = num.atleast_1d(splev(x, self.realization, der=n))
else:
evm = num.atleast_1d(splev(x, self.tck, der=n))
if scalar:
return evm[0]
else:
return evm
def find_extrema(self, xmin=None, xmax=None):
'''Find the position and values of the maxima/minima. Returns a tuple:
(roots,vals,ypps) where roots are the x-values where the extrema
occur, vals are the y-values at these points, and ypps are the
2nd derivatives. Optionall, search only betwwen xmin and xmax.'''
#evaluate the 1st derivative at k+1 intervals between the knots
if self.realization:
t,c,k = self.realization
else:
t,c,k = self.tck
if xmax is None: xmax = t[-1]
if xmin is None: xmin = t[0]
x0s = t[k:-k]
xs = []
for i in range(len(x0s)-1):
xs.append(num.arange(x0s[i],x0s[i+1],(x0s[i+1]-x0s[i])/(k+1)))
xs = num.concatenate(xs)
yps = self.deriv(xs, n=1)
# now find the roots of the 1st derivative
tck2 = splrep(xs, yps, k=3, s=0)
roots = sproot(tck2)
curvs = []
vals = []
for root in roots:
vals.append(self.__call__(root)[0])
curvs.append(self.deriv(root, n=2))
gids = num.greater_equal(roots,xmin)*num.less_equal(roots,xmax)
curvs = num.where(num.equal(curvs,0), 0, curvs/num.absolute(curvs))
return roots[gids],num.array(vals)[gids],num.array(curvs)[gids]
def intercept(self, y):
'''Find the value of x for which the interpolator goes through [y]'''
# use a fun little trick:
if self.realization:
tck = self.realization[0][::],self.realization[1]-y,\
self.realization[2]
else:
tck = self.tck[0][::],self.tck[1]-y,self.tck[2]
roots = sproot(tck)
gids = num.greater_equal(roots,tck[0][0])*num.less_equal(roots,tck[0][-1])
return sproot(tck)
functions['spline'] = (Spline, "Dierckx style splines (FITPACK)")
if gp == 'pymc':
class GaussianProcess(oneDcurve):
def __init__(self, x, y, dy, mask=None, **args):
'''Fit a GP (Gaussian Process) spline to the data. [args] can be any argument
recognized by scipy.interpolate.splrep.'''
oneDcurve.__init__(self, x, y, dy, mask)
self.pars = {
'diff_degree':None,
'scale':None,
'amp':None}
for key in args:
if key not in self.pars and key != "mean":
raise TypeError("%s is an invalid keyword argument for this method" % key)
if key != "mean": self.pars[key] = args[key]
if 'mean' in args:
self.mean = args['mean']
else:
self.mean = lambda x: x*0 + num.median(self.y)
# Make sure the data conform to the spine requirements
self.median = num.median(self.y)
self._setup()
self.realization = None
def __str__(sef):
return "Gaussian Process"
#def func(self, x):
# return x*0 + self.median
def __getstate__(self):
# we need to define this because Mean and Cov are not pickleable
dict = self.__dict__.copy()
if 'M' in dict: del dict['M']
if 'C' in dict: del dict['C']
if 'mean' in dict: dict['mean'] = None
# Setting setup to None will force re-generation of M and C
# when we are unpickled
dict['setup'] = False
return dict
def help(self):
print("scale: Scale over which the function varies")
print("amp: Amplitude of typical function variations")
print("diff_degree: Roughly, the degree of differentiability")
def _setup(self):
'''Given the current set of params, setup the interpolator.'''
import pymc.gp as GP
globals()['GP'] = GP
x,y,dy = self._regularize()
if self.diff_degree is None:
self.diff_degree = 3
if self.amp is None:
self.amp = num.std(y - self.mean(x))
if self.scale is None:
#self.scale = (self.x.max() - self.x.min())/2
self.scale = 30
self.M = GP.Mean(self.mean)
self.C = GP.Covariance(GP.matern.euclidean, diff_degree=self.diff_degree,
amp=self.amp, scale=self.scale)
GP.observe(self.M, self.C, obs_mesh=x, obs_vals=y, obs_V=num.power(dy,2))
self.setup = True
self.realization = None
def __call__(self, x):
'''Interpolate at point [x]. Returns a 3-tuple: (y, mask) where [y]
is the interpolated point, and [mask] is a boolean array with the same
shape as [x] and is True where interpolated and False where extrapolated'''
if not self.setup:
self._setup()
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
if self.realization is not None:
res = self.realization(x)
else:
res = self.M(x)
if scalar:
return res[0],self.x.min() <= x[0] <= self.x.max()
else:
return res,num.greater_equal(x, self.x.min())*\
num.less_equal(x, self.x.max())
def domain(self):
return (self.x.min(),self.x.max())
def error(self, x):
'''Returns the error in the interpolator at points [x].'''
if not self.setup:
self._setup()
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
x = num.atleast_1d(x)
res = num.sqrt(self.C(x))
if scalar:
return res[0]
else:
return res
def draw(self):
'''Generate a random realization of the spline, based on the data.'''
if not self.setup:
self._setup()
self.realization = GP.Realization(self.M, self.C)
def reset_mean(self):
self.realization = None
def rchisquare(self):
chisq = self.chisquare()
if len(self.x) < 5:
return -1
return chisq/(len(self.x) - 4)
def deriv(self, x, n=1):
'''Returns the nth derivative of the function at x.'''
if len(num.shape(x)) < 1:
scalar = True
else:
scalar = False
xs = num.atleast_1d(x)
f = lambda x: self.__call__(x)[0]
res = deriv(f, xs, dx=self.scale/100., n=n)
if scalar:
return res[0]
else:
return res
def find_extrema(self, xmin=None, xmax=None):
'''Find the position and values of the maxima/minima. Returns a tuple:
(roots,vals,ypps) where roots are the x-values where the extrema
occur, vals are the y-values at these points, and ypps are the
2nd derivatives. Optionally, only search for roots between
xmin and xmax'''
#evaluate the 1st derivative at sacle/10 intervals (that should be
# enough)
if xmin is None: xmin = self.x.min()
if xmax is None: xmax = self.x.max()
dx = min(self.scale*1.0/20, (xmax-xmin)/5.0)
xs = num.arange(xmin, xmax, dx)
#f = lambda x: self.__call__(x)[0]
dys = self.deriv(xs, n=1)
#dys = num.diff(self.__call__(xs)[0])
#adys = (dys[1:] + dys[:-1])/2
#dys = num.concatenate([[dys[0]],adys,[dys[-1]]])
pids = num.greater(dys, 0)
# Find indeces where go from True->False or False->True: XOR
inds = num.nonzero(num.logical_xor(pids[1:],pids[:-1]))[0]
if len(inds) == 0:
return (num.array([]), num.array([]), num.array([]))
ret = []
for i in range(len(inds)):
try:
res = brentq(self.deriv, xs[inds[i]], xs[inds[i]+1])
ret.append(res)
except:
continue
if len(ret) == 0:
return (num.array([]), num.array([]), num.array([]))
ret = num.array(ret)
vals = self.__call__(ret)[0]
curvs = self.deriv(ret, n=2)
curvs = num.where(curvs > 0, 1, curvs)
curvs = num.where(curvs < 0, -1, curvs)
return ret,vals,curvs
def intercept(self, y):
'''Find the value of x for which the interpolator goes through [y]'''
xs = num.arange(self.x.min(), self.x.max(), self.scale/10)
f = lambda x: self.__call__(x)[0] - y
ys = f(xs)
pids = num.greater(ys, 0)
if num.alltrue(pids) or num.alltrue(-pids):
return None
ret = []
inds =
|
num.nonzero(pids[1:] - pids[:-1])
|
numpy.nonzero
|
import logging
from unittest.mock import Mock
import grpc
import numpy
import numpy.random
import pytest
from google.protobuf import any_pb2
from google.rpc import error_details_pb2, status_pb2, code_pb2
from grpc_status import rpc_status
from grpc4bmi.bmi_grpc_server import BmiServer
from grpc4bmi.bmi_grpc_client import BmiClient, RemoteException, handle_error
from grpc4bmi.reserve import reserve_values, reserve_grid_shape, reserve_grid_padding
from test.fake_models import SomeException, FailingModel, Rect3DGridModel, UnstructuredGridBmiModel, UniRectGridModel, \
Rect2DGridModel, Structured3DQuadrilateralsGridModel, Structured2DQuadrilateralsGridModel, Float32Model, Int32Model, \
BooleanModel
from test.flatbmiheat import FlatBmiHeat
logging.basicConfig(level=logging.DEBUG)
"""
Unit tests for the BMI client class. Every test performs cross-checking with a local instance of the BMI heat toy model.
"""
class MyRpcError(grpc.RpcError):
def trailing_metadata(self):
return []
class ServerWrapper(object):
def __init__(self, server):
self.server = server
self.context = Mock(grpc.ServicerContext)
def forward(status):
raise MyRpcError(status.details)
self.context.abort_with_status.side_effect = forward
def __getattr__(self, item):
orig_attr = self.server.__getattribute__(item)
if callable(orig_attr):
def add_context(*args, **kwargs):
kwargs["context"] = self.context
return orig_attr(*args, **kwargs)
return add_context
return orig_attr
def make_bmi_classes(init=False):
client = BmiClient(stub=ServerWrapper(BmiServer(FlatBmiHeat())))
local = FlatBmiHeat()
if init:
numpy.random.seed(0)
client.initialize(None)
numpy.random.seed(0)
local.initialize(None)
return client, local
def test_server_start():
client, local = make_bmi_classes()
assert client is not None
del client
def test_component_name():
client, local = make_bmi_classes()
assert client.get_component_name() == local.get_component_name()
del client
def test_input_item_count():
client, local = make_bmi_classes()
assert client.get_input_item_count() == local.get_input_item_count()
del client
def test_output_item_count():
client, local = make_bmi_classes()
assert client.get_output_item_count() == local.get_output_item_count()
del client
def test_input_var_names():
client, local = make_bmi_classes()
assert client.get_input_var_names() == local.get_input_var_names()
del client
def test_output_var_names():
client, local = make_bmi_classes()
assert client.get_output_var_names() == local.get_output_var_names()
del client
def test_initialize():
client, local = make_bmi_classes(True)
assert client is not None
client.finalize()
del client
def test_update():
client, local = make_bmi_classes(True)
client.update()
assert client is not None
client.finalize()
del client
def test_update_until():
client, local = make_bmi_classes(True)
until = local.get_start_time() + local.get_time_step() + local.get_time_step()
client.update_until(until)
assert client.get_current_time() == until
client.finalize()
del client
def test_get_time_unit():
client, local = make_bmi_classes()
assert client.get_time_units() == local.get_time_units()
client.finalize()
del client
def test_get_time_step():
client, local = make_bmi_classes(True)
assert client.get_time_step() == local.get_time_step()
client.finalize()
del client
def test_get_current_time():
client, local = make_bmi_classes(True)
assert client.get_current_time() == local.get_current_time()
client.finalize()
del client
def test_get_updated_time():
client, local = make_bmi_classes(True)
client.update()
assert client.get_current_time() != local.get_current_time()
local.update()
assert client.get_current_time() == local.get_current_time()
client.finalize()
del client
def test_get_start_end_time():
client, local = make_bmi_classes(True)
assert client.get_start_time() == local.get_start_time()
assert client.get_end_time() == local.get_end_time()
client.finalize()
del client
def test_get_var_grid():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
assert client.get_var_grid(varname) == local.get_var_grid(varname)
del client
def test_get_var_type():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
assert client.get_var_type(varname) == local.get_var_type(varname)
del client
def test_get_var_units():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
assert client.get_var_units(varname) == local.get_var_units(varname)
del client
def test_get_var_nbytes():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
assert client.get_var_nbytes(varname) == local.get_var_nbytes(varname)
del client
def test_get_var_location():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
assert client.get_var_location(varname) == local.get_var_location(varname)
del client
def test_get_var_value():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
actual = client.get_value(varname, reserve_values(client, varname))
expected = local.get_value(varname, reserve_values(local, varname))
numpy.testing.assert_allclose(actual, expected)
del client
def test_get_value_ptr():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
with pytest.raises(NotImplementedError):
client.get_value_ptr(varname)
def test_get_vals_indices():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
indices = numpy.array([29, 8, 19, 81])
result = numpy.empty(len(indices), dtype=client.get_var_type(varname))
result = client.get_value_at_indices(varname, result, indices)
expected = numpy.empty(len(indices), dtype=local.get_var_type(varname))
expected = local.get_value_at_indices(varname, expected, indices)
numpy.testing.assert_allclose(result, expected)
def test_set_var_value():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
values = 0.123 * local.get_value(varname, reserve_values(local, varname))
client.set_value(varname, values)
numpy.testing.assert_allclose(client.get_value(varname, reserve_values(client, varname)), values)
def test_set_values_indices():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
indices = numpy.array([1, 11, 21])
values = numpy.array([0.123, 4.567, 8.901], dtype=numpy.float64)
client.set_value_at_indices(varname, indices, values)
expected = numpy.empty(len(indices), dtype=client.get_var_type(varname))
expected = client.get_value_at_indices(varname, expected, indices)
numpy.testing.assert_allclose(expected, values)
def test_get_grid_size():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
assert client.get_grid_size(grid_id) == local.get_grid_size(grid_id)
def test_get_grid_rank():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
assert client.get_grid_rank(grid_id) == local.get_grid_rank(grid_id)
def test_get_grid_type():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
assert client.get_grid_type(grid_id) == local.get_grid_type(grid_id)
def test_get_grid_shape():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
result = client.get_grid_shape(grid_id, reserve_grid_shape(client, grid_id))
expected = local.get_grid_shape(grid_id, reserve_grid_shape(local, grid_id))
numpy.testing.assert_allclose(result, expected)
def test_get_grid_spacing():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
result = client.get_grid_spacing(grid_id, reserve_grid_padding(client, grid_id))
expected = local.get_grid_spacing(grid_id, reserve_grid_padding(local, grid_id))
numpy.testing.assert_allclose(result, expected)
def test_get_grid_origin():
client, local = make_bmi_classes(True)
varname = local.get_output_var_names()[0]
grid_id = local.get_var_grid(varname)
result = client.get_grid_origin(grid_id, reserve_grid_padding(client, grid_id))
expected = local.get_grid_origin(grid_id, reserve_grid_padding(local, grid_id))
numpy.testing.assert_allclose(result, expected)
@pytest.mark.parametrize("client_method,client_request", [
('initialize', ('config.ini',)),
('update', ()),
('update_until', (42,)),
('finalize', ()),
('get_component_name', ()),
('get_input_item_count', ()),
('get_output_item_count', ()),
('get_input_var_names', ()),
('get_output_var_names', ()),
('get_time_units', ()),
('get_time_step', ()),
('get_current_time', ()),
('get_start_time', ()),
('get_end_time', ()),
('get_var_grid', ('something',)),
('get_var_type', ('something',)),
('get_var_itemsize', ('something',)),
('get_var_units', ('something',)),
('get_var_nbytes', ('something',)),
('get_var_location', ('something',)),
('get_value', ('something', numpy.empty(0))),
('get_value_at_indices', ('something', numpy.empty(0), numpy.array([42]))),
('set_value', ('something', numpy.array([1234]))),
('set_value_at_indices', ('something', numpy.array([42]), numpy.array([1234]))),
('get_grid_size', (42,)),
('get_grid_type', (42,)),
('get_grid_rank', (42,)),
('get_grid_x', (42, numpy.empty(0))),
('get_grid_y', (42, numpy.empty(0))),
('get_grid_z', (42, numpy.empty(0))),
('get_grid_shape', (42, numpy.empty(0))),
('get_grid_spacing', (42, numpy.empty(0))),
('get_grid_origin', (42, numpy.empty(0))),
('get_grid_node_count', (42,)),
('get_grid_edge_count', (42,)),
('get_grid_face_count', (42,)),
('get_grid_edge_nodes', (42, numpy.empty(0))),
('get_grid_face_nodes', (42, numpy.empty(0))),
('get_grid_face_edges', (42, numpy.empty(0))),
('get_grid_nodes_per_face', (42, numpy.empty(0))),
])
def test_method_exception(client_method, client_request):
exc = SomeException('bmi method always fails')
model = FailingModel(exc)
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
with pytest.raises(Exception) as excinfo:
getattr(client, client_method)(*client_request)
assert "bmi method always fails" in str(excinfo.value)
class TestUniRectGridModel:
@pytest.fixture
def bmiclient(self):
model = UniRectGridModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_grid_type(self, bmiclient):
assert bmiclient.get_grid_type(0) == 'uniform_rectilinear'
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 24
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 3
def test_grid_shape(self, bmiclient):
result = bmiclient.get_grid_shape(0, numpy.empty(3))
expected = (2, 3, 4)
numpy.testing.assert_allclose(result, expected)
def test_grid_origin(self, bmiclient):
result = bmiclient.get_grid_origin(0, numpy.empty(3))
expected = (0.1, 1.1, 2.1)
numpy.testing.assert_allclose(result, expected)
def test_grid_spacing(self, bmiclient):
result = bmiclient.get_grid_spacing(0, numpy.empty(3))
expected = (0.1, 0.2, 0.3)
numpy.testing.assert_allclose(result, expected)
class TestRect3DGridModel:
@pytest.fixture
def bmiclient(self):
model = Rect3DGridModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 24
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 3
def test_grid_x(self, bmiclient):
result = bmiclient.get_grid_x(0, numpy.empty(4))
expected = [0.1, 0.2, 0.3, 0.4]
numpy.testing.assert_allclose(result, expected)
def test_grid_y(self, bmiclient):
result = bmiclient.get_grid_y(0, numpy.empty(3))
expected = [1.1, 1.2, 1.3]
numpy.testing.assert_allclose(result, expected)
def test_grid_z(self, bmiclient):
result = bmiclient.get_grid_z(0, numpy.empty(2))
expected = [2.1, 2.2]
numpy.testing.assert_allclose(result, expected)
class TestRect2DGridModel:
@pytest.fixture
def bmiclient(self):
model = Rect2DGridModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 12
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 2
def test_grid_x(self, bmiclient):
result = bmiclient.get_grid_x(0, numpy.empty(4))
expected = [0.1, 0.2, 0.3, 0.4]
numpy.testing.assert_allclose(result, expected)
def test_grid_y(self, bmiclient):
result = bmiclient.get_grid_y(0, numpy.empty(3))
expected = [1.1, 1.2, 1.3]
numpy.testing.assert_allclose(result, expected)
def test_grid_z(self, bmiclient):
with pytest.raises(MyRpcError) as excinfo:
bmiclient.get_grid_z(0, numpy.empty(4))
assert 'out of bounds' in str(excinfo.value)
class TestStructured3DQuadrilateralsGridModel:
@pytest.fixture
def bmiclient(self):
model = Structured3DQuadrilateralsGridModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 4
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 3
def test_grid_shape(self, bmiclient):
result = bmiclient.get_grid_shape(0, numpy.empty(3))
expected = [1, 2, 2]
numpy.testing.assert_allclose(result, expected)
def test_grid_x(self, bmiclient):
result = bmiclient.get_grid_x(0, numpy.empty(4))
expected = [1.1, 0.1, 1.1, 2.1]
numpy.testing.assert_allclose(result, expected)
def test_grid_y(self, bmiclient):
result = bmiclient.get_grid_y(0, numpy.empty(4))
expected = [2.2, 1.2, 0.2, 2.2]
numpy.testing.assert_allclose(result, expected)
def test_grid_z(self, bmiclient):
result = bmiclient.get_grid_z(0, numpy.empty(4))
expected = [1.1, 2.2, 3.3, 4.4]
numpy.testing.assert_allclose(result, expected)
class TestStructured2DQuadrilateralsGridModel:
@pytest.fixture
def bmiclient(self):
model = Structured2DQuadrilateralsGridModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 4
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 2
def test_grid_shape(self, bmiclient):
result = bmiclient.get_grid_shape(0, numpy.empty(2))
expected = [2, 2]
numpy.testing.assert_allclose(result, expected)
def test_grid_x(self, bmiclient):
result = bmiclient.get_grid_x(0, numpy.empty(4))
expected = [1.1, 0.1, 1.1, 2.1]
numpy.testing.assert_allclose(result, expected)
def test_grid_y(self, bmiclient):
result = bmiclient.get_grid_y(0, numpy.empty(4))
expected = [2.2, 1.2, 0.2, 2.2]
numpy.testing.assert_allclose(result, expected)
def test_grid_z(self, bmiclient):
with pytest.raises(MyRpcError) as excinfo:
bmiclient.get_grid_z(0, numpy.empty(4))
assert 'Do not know what z is' in str(excinfo.value)
class TestUnstructuredGridBmiModel:
@pytest.fixture
def bmiclient(self):
model = UnstructuredGridBmiModel()
client = BmiClient(stub=ServerWrapper(BmiServer(model)))
yield client
del client
def test_get_grid_shape(self, bmiclient):
with pytest.raises(MyRpcError) as excinfo:
bmiclient.get_grid_shape(0, numpy.empty(3))
assert 'Do not know what shape is' in str(excinfo.value)
def test_grid_size(self, bmiclient):
assert bmiclient.get_grid_size(0) == 6
def test_grid_rank(self, bmiclient):
assert bmiclient.get_grid_rank(0) == 2
def test_get_grid_node_count(self, bmiclient):
result = bmiclient.get_grid_node_count(0)
assert result == 6
def test_get_grid_edge_count(self, bmiclient):
result = bmiclient.get_grid_edge_count(0)
assert result == 8
def test_get_grid_face_count(self, bmiclient):
result = bmiclient.get_grid_face_count(0)
assert result == 3
def test_get_grid_edge_nodes(self, bmiclient):
placeholder = numpy.empty(16, dtype=numpy.int)
result = bmiclient.get_grid_edge_nodes(0, placeholder)
expected = (0, 1, 1, 2, 2, 3, 3, 0, 1, 4, 4, 5, 5, 2, 5, 3)
numpy.testing.assert_allclose(result, expected)
def test_grid_face_nodes(self, bmiclient):
placeholder = numpy.empty(11, dtype=numpy.int)
result = bmiclient.get_grid_face_nodes(0, placeholder)
expected = (0, 1, 2, 3, 1, 4, 5, 2, 2, 5, 3)
numpy.testing.assert_allclose(result, expected)
def test_grid_face_edges(self, bmiclient):
placeholder = numpy.empty(11, dtype=numpy.int)
result = bmiclient.get_grid_face_edges(0, placeholder)
expected = (0, 1, 2, 3, 4, 5, 6, 1, 6, 7, 2)
numpy.testing.assert_allclose(result, expected)
def test_grid_nodes_per_face(self, bmiclient):
placeholder = numpy.empty(3, dtype=numpy.int)
result = bmiclient.get_grid_nodes_per_face(0, placeholder)
expected = (4, 4, 3)
numpy.testing.assert_allclose(result, expected)
def test_grid_x(self, bmiclient):
result = bmiclient.get_grid_x(0, numpy.empty(6))
expected = [0., 1., 2., 1., 3., 4.]
numpy.testing.assert_allclose(result, expected)
def test_grid_y(self, bmiclient):
result = bmiclient.get_grid_y(0, numpy.empty(6))
expected = [3., 1., 2., 4., 0., 3.]
numpy.testing.assert_allclose(result, expected)
def test_grid_z(self, bmiclient):
with pytest.raises(MyRpcError) as excinfo:
bmiclient.get_grid_z(0, numpy.empty(4))
assert 'Do not know what z is' in str(excinfo.value)
class TestFloat32Model:
name = 'plate_surface__temperature'
@pytest.fixture
def bmimodel(self):
return Float32Model()
@pytest.fixture
def bmiclient(self, bmimodel):
client = BmiClient(stub=ServerWrapper(BmiServer(bmimodel)))
yield client
del client
def test_get_value(self, bmiclient):
result = bmiclient.get_value(self.name, numpy.empty(3))
expected = numpy.array((1.1, 2.2, 3.3), dtype=numpy.float32)
numpy.testing.assert_allclose(result, expected)
def test_get_value_at_indices(self, bmiclient):
result = bmiclient.get_value_at_indices(self.name, numpy.empty(1), numpy.array([1]))
expected = numpy.array([2.2], dtype=numpy.float32)
numpy.testing.assert_allclose(result, expected)
def test_set_value(self, bmimodel, bmiclient):
value = numpy.array((2.1, 3.2, 4.3), dtype=numpy.float32)
bmiclient.set_value(self.name, value)
numpy.testing.assert_allclose(value, bmimodel.value)
def test_set_value_at_indices(self, bmimodel, bmiclient):
value = numpy.array([8.8], dtype=numpy.float32)
bmiclient.set_value_at_indices(self.name, numpy.array([1]), value)
expected = numpy.array((1.1, 8.8, 3.3), dtype=numpy.float32)
numpy.testing.assert_allclose(expected, bmimodel.value)
class TestInt32Model:
name = 'plate_surface__temperature'
@pytest.fixture
def bmimodel(self):
model = Int32Model()
yield model
del model
@pytest.fixture
def bmiclient(self, bmimodel):
client = BmiClient(stub=ServerWrapper(BmiServer(bmimodel)))
yield client
del client
def test_get_value(self, bmiclient):
result = bmiclient.get_value(self.name, numpy.empty(3))
expected = numpy.array((12, 24, 36), dtype=numpy.int32)
numpy.testing.assert_allclose(result, expected)
def test_get_value_at_indices(self, bmiclient):
result = bmiclient.get_value_at_indices(self.name, numpy.empty(1), numpy.array([1]))
expected = numpy.array([24], dtype=numpy.int32)
numpy.testing.assert_allclose(result, expected)
def test_set_value(self, bmimodel, bmiclient):
value = numpy.array((48, 50, 62), dtype=numpy.int32)
bmiclient.set_value(self.name, value)
numpy.testing.assert_allclose(value, bmimodel.value)
def test_set_value_at_indices(self, bmimodel, bmiclient):
value = numpy.array([88], dtype=numpy.int32)
bmiclient.set_value_at_indices(self.name, numpy.array([1]), value)
expected = numpy.array((12, 88, 36), dtype=numpy.int32)
numpy.testing.assert_allclose(expected, bmimodel.value)
class TestBooleanModel:
name = 'plate_surface__temperature'
@pytest.fixture
def bmimodel(self):
return BooleanModel()
@pytest.fixture
def bmiclient(self, bmimodel):
client = BmiClient(stub=ServerWrapper(BmiServer(bmimodel)))
yield client
del client
def test_get_value(self, bmiclient):
with pytest.raises(MyRpcError):
bmiclient.get_value(self.name,
|
numpy.empty(3)
|
numpy.empty
|
import numpy as np
from tentacle.board import Board
from tentacle.dnn3 import DCNN3
from tentacle.strategy import Strategy, Auditor
from tentacle.utils import attemper
from builtins import (super)
class StrategyDNN(Strategy, Auditor):
def __init__(self, is_train=False, is_revive=True, is_rl=False, from_file=None, part_vars=True):
super().__init__()
self.init_exp = 0.3 # initial exploration prob
self.final_exp = 0.001 # final exploration prob
self.anneal_steps = 90 * 1000 # N steps for annealing exploration
self.absorb_progress = 0
self.exploration = self.init_exp
self.temperature = 0.02
self.win_ratio = 1.
self.brain = DCNN3(is_train, is_revive, is_rl)
self.brain.run(from_file, part_vars)
def update_at_end(self, old, new):
if not self.needs_update():
return
def update(self, old, new):
pass
def _update_impl(self, old, new, reward):
pass
def board_value(self, board, context):
pass
def explore_strategy1(self, probs, legal, top1, **kwargs):
if np.random.rand() < self.exploration:
top_n = np.argsort(probs)[-2:]
if legal[top_n[-1]] != 1 or legal[top_n[-2]] != 1:
return top1, False
if probs[top_n[-1]] - probs[top_n[-2]] < 0.2:
rand_loc = np.random.choice(top_n)
return rand_loc, rand_loc != top1
return top1, False
def explore_strategy2(self, probs, legal, top1, **kwargs):
if self.win_ratio is not None:
if self.win_ratio > 1.1:
self.temperature += 0.002
elif self.win_ratio < 1/1.1:
self.temperature -= 0.002
self.temperature = min(max(0.001, self.temperature), 100)
probs = attemper(probs, self.temperature, legal)
rand_loc = np.random.choice(Board.BOARD_SIZE_SQ, 1, p=probs)
# rand_loc = np.random.multinomial(1, probs).argmax()
return rand_loc, rand_loc != top1
def explore_strategy3(self, probs, legal, top1, **kwargs):
if np.random.rand() < self.exploration:
rand_loc = np.random.choice(np.where(legal == 1)[0], 1)[0]
return rand_loc, rand_loc != top1
return top1, False
def explore_strategy4(self, probs, legal, top1, **kwargs):
'''
stat action distributin, encourage action with small prob move first
'''
return top1, False
def explore_strategy5(self, probs, legal, top1, **kwargs):
'''
one chance of explore per game
'''
game = kwargs['game']
if game.exploration_counter == 0:
NUM_ACTIONS = Board.BOARD_SIZE_SQ
x = np.random.randint(NUM_ACTIONS - game.step_counter)
if x < 2:
rand_loc = np.random.choice(np.where(legal == 1)[0], 1)[0]
return rand_loc, rand_loc != top1
return top1, False
def preferred_move(self, board, game=None):
v = board.stones
state, legal = self.get_input_values(v)
probs, raw_pred = self.brain.get_move_probs(state)
probs = probs[0]
if np.allclose(probs, 0.):
print('output probs all 0')
probs *= legal
rand_loc =
|
np.argmax(probs)
|
numpy.argmax
|
# -*- coding: utf-8 -*-
"""
Module for evaluating the factor of safety against sliding by using the limit
equilibrium method through the General Limit Equilibrium (GLE) method presented
by `Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_.
"""
# %%
class SlopeStabl:
"""Creates an instance of an object that allows to evaluate the factor of
safety against sliding of a slope. ::
SlopeStabl(slices, seedFS=1, Kh=0, maxIter=50, tol=1e-3,
interSlcFunc='halfsine', maxLambda=0.6)
Attributes:
slices (`Slices` object): object that contains the data structure of
the slices in which the sliding mass has been divided.
seedFS (`float` or `int`): Initial value of factor of safety for
starting the iterarive algorithm. ``1`` is the default value.
lambda_ (`float` or `int`): Factor that multiplies the interlice
function to determine the interslices horizontal forces. ``0`` is
the default value.
Kh (`float`): horizontal seismic coefficient for the pseudostatic
analysis. Its positive value represents the force is directed out
of the slope (i.e. in the direction of failure). ``0`` is the
default value.
maxIter (`int`): Maximum number of iterations for stoping the
algorithm in case the tolerance is not reached. ``50`` is the
default value.
tol (`float`): Required tolerace to stop the iterations. Is the
diference between the 2 last values gotten of factor of safety and
lambda, it means, two tolerances have to be reached. ``1e-3`` is
the default value.
interSlcFunc (`str` or 'float'): Interslice function that relates the
normal interslice forces and the parameter lambda to obtain the
shear interslice forces. ``halfsine`` is the default value and
corresponds to Morgenstern and Price method, but a
constant number may be input, for example ``interSlcFunc=1``,
corresponds to Spencer method.
maxLambda (`float`): Maximum value the lambda parameter can get.
``0.6`` is the default value.
nLambda (`float`): Number of value the lambda parameter can get from
zero to ``maxLambda``. ``6`` is the default value.
slices, seedFS=1, Kh=0, maxIter=50, tol=1e-3,
interSlcFunc='halfsine', maxLambda=0.6, nLambda=6
Note:
The class ``Slices`` requires
`numpy <http://www.numpy.org/>`_, `scipy <https://www.scipy.org/>`_,
`matplotlib <https://matplotlib.org/>`_ and
`shapely <https://pypi.python.org/pypi/Shapely>`_.
Examples:
>>> from numpy import array
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726, radius=80)
>>> material = MaterialParameters(
>>> cohesion=600, frictAngle=20, unitWeight=120, wtUnitWeight=62.4)
>>> watertable = WaterTable(slopeCoords=slope.coords,
watertabDepths=array([[0, 140], [20, 0]]))
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50,
>>> watertabCoords=watertable.coords, bim=None)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0)
>>> stabAnalysis.__dict__.keys()
dict_keys(['slices', 'Kh', 'seedFS', 'maxIter', 'tol', 'interSlcFunc',
'minLambda', 'maxLambda', 'nLambda', 'fsBishop',
'fsFellenius', 'fsMoment', 'fsForces', 'lambda_',
'adjustment', 'FS'])
"""
def __init__(self, slices, seedFS=1, Kh=0, maxIter=50, tol=1e-3,
interSlcFunc='halfsine', minLambda=-0.6, maxLambda=0.6,
nLambda=10):
'''
SlopeStabl(slices, seedFS=1, Kh=0, maxIter=50, tol=1e-3,
interSlcFunc='halfsine', minLambda=-0.6, maxLambda=0.6,
nLambda=10)
'''
self.slices = slices
self.Kh = Kh
self.seedFS = seedFS
self.maxIter = maxIter
self.tol = tol
self.interSlcFunc = interSlcFunc
self.minLambda = minLambda
self.maxLambda = maxLambda
self.nLambda = nLambda
# Setting the values of the interslice force function
self.intersliceForceFunct()
# Calculating the arms for the moments
self.calculateArms()
# Forces that do not vary in each iteration
self.calculateBasicForces()
# Get factors of safety for several values of lambda_a
self.iterateGLE()
return
def intersliceForceFunct(self, v=1, u=1):
'''
Method for calculating the interslice function which is a component of
the interslice forces; this is done by using the Equation [11] of
`Zhu et al (2015) <https://doi.org/10.1139/t04-072>`_, with
v = u = 1 for a simetric and non-narrowed halfsine function.
When the object is instanced with the clases with a constant interslice
function, then, all the values are equal to that constant value.
Args:
v (`int` or `float`): shape parameter. Controls the symmetry. ``1``
is the defaut value.
u (`int` or `float`): shape parameter. Controls the kurtosis. ``1``
is the defaut value.
Returns:
(`list`): Values of the all insterslice force function values.
Examples:
>>> import matplotlib.pyplot as plt
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0)
>>> interslcForceFunc = stabAnalysis.intersliceForceFunct(u=1)
>>> plt.plot(interslcForceFunc, 'k')
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/slopestabl_interSlcFunct_example.svg
:alt: slopestabl_interSlcFunct_example
.. only:: html
:download:`example script<../examples/figuresScripts/slopestabl_interSlcFunct_example.py>`.
'''
from numpy import sin, pi
# Getting the ends of the slip surface
a = self.slices.slices[0].terrainCoords[0].min() # leftmost point
b = self.slices.slices[-1].terrainCoords[0].max() # rightmost point
interslcForceFunc = list()
# Calculate the interslice functions values (f_i = fL, f_{i-1} = fR)
for slice_ in self.slices.slices:
lf = slice_.xMin # leftmost slice point
rt = slice_.xMax # rightmost slice point
# Evaluating the half-sine function on the sides of the slice
if self.interSlcFunc == 'halfsine':
halfSineL = sin(pi * ((lf - a) / (b - a)) ** v) ** u
halfSineR = sin(pi * ((rt - a) / (b - a)) ** v) ** u
else: # if not a half-sine, then use the input constant value
halfSineL = self.interSlcFunc
halfSineR = self.interSlcFunc
interslcForceFunc.append(halfSineL)
setattr(slice_, 'fL', halfSineL)
setattr(slice_, 'fR', halfSineR)
interslcForceFunc.append(halfSineR)
return interslcForceFunc
def calculateArms(self):
'''
Method for calculating the arms required for getting the momments of
each slice with respect to a rotation point.
This function does not return any output, just modifies the structure
of each slice by setting new attributes.
'''
import numpy as np
from numpy import radians as rad
from shapely.geometry import LineString
for n, slice_ in enumerate(self.slices.slices):
setattr(slice_, 'n', n)
# Vertical linestring that splits the slice through the cetroid
xMean = (slice_.xMin + slice_.xMax) / 2
vertLine = LineString([(xMean, -self.slices.rotationPt[1]),
(xMean, self.slices.rotationPt[1])])
# Arms for external loads
loadPt1 = np.array(slice_.terrainLS.intersection(vertLine))
theta = np.arctan((self.slices.rotationPt[1] - loadPt1[1]) /
(self.slices.rotationPt[0] - loadPt1[0])) % np.pi
alpha = abs(rad(slice_.w) - theta)
loadPt2RotPt = np.linalg.norm(self.slices.rotationPt - loadPt1)
proy = loadPt2RotPt * abs(np.cos(alpha))
d = (loadPt2RotPt ** 2 - proy ** 2) ** 0.5
if rad(slice_.w) < theta:
d *= -1
setattr(slice_, 'd', d)
# Arms for normal loads at the base
loadPt2 = slice_.slipSurfLS.intersection(vertLine)
if loadPt2.type is not 'Point':
loadPt2 = [xMean, loadPt1[1] - slice_.midHeight]
loadPt2 = np.array(loadPt2)
theta = np.arctan((self.slices.rotationPt[1] - loadPt2[1]) /
(self.slices.rotationPt[0] - loadPt2[0])) % np.pi
alpha = abs(0.5*np.pi - rad(slice_.alpha) - theta)
loadPt2RotPt = np.linalg.norm(self.slices.rotationPt - loadPt2)
proy = loadPt2RotPt * abs(np.cos(alpha))
f = (loadPt2RotPt ** 2 - proy ** 2) ** 0.5
if 0.5*np.pi - rad(slice_.alpha) < theta:
f *= -1
setattr(slice_, 'f', f)
setattr(slice_, 'R', proy) # Arm for the mobilized shear force
# Arms for horizontal seismic force
e = self.slices.rotationPt[1] - (loadPt2[1] + 0.5*slice_.midHeight)
setattr(slice_, 'e', e)
# Arms for the weight of the slice
x = self.slices.rotationPt[0] - xMean
setattr(slice_, 'x', x)
return
def calculateBasicForces(self):
'''
Method for calculating the forces that do not vary in each iteration or
lambda value.
This function does not return any output, just modifies the structure
of each slice by setting new attributes.
'''
for slice_ in self.slices.slices:
if self.slices.bim is not None:
# blocks and matrix areas to get slice weight
blocksArea = slice_.numBlocks * slice_.localBIM.tileSize**2
mtxArea = slice_.area - blocksArea
weight = slice_.material.blocksUnitWeight * \
blocksArea + mtxArea*slice_.material.unitWeight
else:
weight = slice_.area * slice_.material.unitWeight
setattr(slice_, 'weight', weight)
# Average water pressure (mu) and the resultant water force (U)
mu = slice_.material.wtUnitWeight * slice_.midWatTabHeight
setattr(slice_, 'mu', mu)
U = mu * slice_.l
setattr(slice_, 'U', U)
# Setting interslices forces equal to zero
setattr(slice_, 'Xl', 0)
setattr(slice_, 'Xr', 0)
setattr(slice_, 'El', 0)
setattr(slice_, 'Er', 0)
return
def calculateNormalForce(self, seedFS):
'''
Method for calculating the normal force to the base; this is done by
using the Equation [16] of
`<NAME> (1977) <https://doi.org/10.1139/t77-045>`_.
Since the normal forces are updated with each iteration, is necessary
to input a factor of safety as a seed.
Args:
seedFS (`int` or `float`): Seed factor of safety.
Returns:
(`list`): Values of all the normal forces at the slice's bases
Examples:
>>> from numpy import array
>>> import matplotlib.pyplot as plt
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=5)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0)
>>> stabAnalysis.calculateNormalForce(stabAnalysis.FS['fs'])
[45009.409630951726, 68299.77910530512, 70721.13554871723,
57346.7578530581, 22706.444365285253]
'''
from numpy import sin, cos, tan
from numpy import radians as rad
listP = list()
for slice_ in self.slices.slices:
# Calculating the normal force 'P' at the base of the slice_.
c = slice_.material.cohesion
phi = rad(slice_.material.frictAngle)
if seedFS == 0:
seedFS = 1
mAlpha = cos(rad(slice_.alpha)) + sin(rad(slice_.alpha)) * \
sin(rad(slice_.material.frictAngle)) / seedFS
P = (slice_.weight + slice_.Xr - slice_.Xl -
(c * slice_.l - slice_.U * tan(phi)) *
sin(rad(slice_.alpha)) / seedFS +
slice_.extL * sin(rad(slice_.w))) / mAlpha
setattr(slice_, 'P', P)
listP.append(P)
return listP
def getFm(self, seedFS, lambda_=0):
'''
Method for getting the factor of safety with respect to the moments
equilimrium; this is done by using the Equation [22] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_.
Since the factor of safety is updated with each iteration, is necessary
to input a factor of safety as a seed and the current value of lambda
to relate the interslice normal force and the interslice force function
with respect to the interslice shear force (Eq. [16] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_).
Args:
seedFS (`int` or `float`): Seed factor of safety.
lambda_ (`int` or `float`): Seed value of lambda. ``0`` is the
default value.
Returns:
(`dict`): Dictionary with the value of the factor of safety and a\
tuple with the boolean that indicated if the toleranfe was\
reached and the number of the iteration.
Examples:
>>> # Example Case 1 - Fig. 9 (Fredlund & Krahn, 1977)
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50,
>>> watertabCoords=None, bim=None)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, minLambda=0,
>>> interSlcFunc=1, nLambda=10)
>>> stabAnalysis.getFm(stabAnalysis.FS['fs'],
>>> stabAnalysis.FS['lambda'])
(2.0750390044795854, True)
'''
from numpy import tan
from numpy import radians as rad
# Doing the iteration
toleraceReached = False
fs = [seedFS]
for i in range(self.maxIter):
# Calculating the normal force at each base slice_.
self.calculateNormalForce(fs[-1])
num = 0
den1, den2, den3, den4 = 0, 0, 0, 0
for slice_ in self.slices.slices:
c = slice_.material.cohesion
phi = rad(slice_.material.frictAngle)
num += c * slice_.l * slice_.R + \
(slice_.P - slice_.U) * slice_.R * tan(phi)
den1 += slice_.weight * slice_.x
den2 += slice_.P * slice_.f
den3 += slice_.extL * slice_.d
den4 += self.Kh * slice_.weight * slice_.e
fs.append(num / (den1 - den2 + den3 + den4))
# Recalculating the interslice forces
self.intersliceForces(fs[-1], lambda_)
self.calculateNormalForce(fs[-1])
self.intersliceForces(fs[-1], lambda_)
# Verifying if the tolerance is reached
if i > 5 and all((fs[-1] - fs[-2], fs[-2] - fs[-3])) <= self.tol:
toleraceReached = True
break
return fs[-1], toleraceReached
def getFf(self, seedFS, lambda_=0):
'''
Method for getting the factor of safety with respect to the forces
equilimrium; this is done by using the Equation [23] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_.
Since the factor of safety is updated with each iteration, is necessary
to input a factor of safety as a seed and the current value of lambda
to relate the interslice normal force and the interslice force function
with respect to the interslice shear force (Eq. [16] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_).
Args:
seedFS (`int` or `float`): Seed factor of safety.
lambda_ (`int` or `float`): Seed value of lambda. ``0`` is the
default value.
Returns:
(`dict`): Dictionary with the value of the factor of safety and a\
tuple with the boolean that indicated if the toleranfe was\
reached and the number of the iteration.
Examples:
>>> # Example Case 1 - Fig. 9 (Fredlund & Krahn, 1977)
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50,
>>> watertabCoords=None, bim=None)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, minLambda=0,
>>> interSlcFunc=1, nLambda=10)
>>> stabAnalysis.getFf(stabAnalysis.FS['fs'],
>>> stabAnalysis.FS['lambda'])
(2.0741545445738296, True)
'''
from numpy import tan, cos, sin
from numpy import radians as rad
# Doing the iteration
toleraceReached = False
fs = [seedFS]
for i in range(self.maxIter):
# Calculating the normal force at each base slice_.
self.calculateNormalForce(fs[-1])
num = 0
den1, den2, den3 = 0, 0, 0
for slice_ in self.slices.slices:
c = slice_.material.cohesion
phi = rad(slice_.material.frictAngle)
num += c * slice_.width + (slice_.P - slice_.U) * tan(phi) * \
cos(rad(slice_.alpha))
den1 += slice_.P * sin(rad(slice_.alpha))
den2 += slice_.extL * cos(rad(slice_.w))
den3 += self.Kh * slice_.weight
fs.append(num / (den1 - den2 + den3))
# Recalculating the interslice forces
self.intersliceForces(fs[-1], lambda_)
self.calculateNormalForce(fs[-1])
self.intersliceForces(fs[-1], lambda_)
# Verifying if the tolerance is reached
if i > 5 and all((fs[-1] - fs[-2], fs[-2] - fs[-3])) <= self.tol:
toleraceReached = True
break
return fs[-1], toleraceReached
def intersliceForces(self, seedFS, lambda_):
'''
Method for getting the shear and normal interslice forces, ; this is
done by using the Equation of section 14.8 of
`GeoSlope (2015) <http://downloads.geo-slope.com/geostudioresources/books/8/15/slope%20modeling.pdf>`_
for the rigth normal force and the Equation [16] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_ for the
shear force.
Since the interslice forces are updated with each iteration, is
necessary to input a factor of safety as a seed and the current value
of lambda to relate the interslice normal force and the interslice
force function with respect to the interslice shear force (Eq. [16] of
`Fredlund & Krahn (1977) <https://doi.org/10.1139/t77-045>`_).
Args:
seedFS (`int` or `float`): Seed factor of safety.
lambda_ (`int` or `float`): Seed value of lambda. ``0`` is the
default value.
Returns:
(`tuple`): tuple with the interslice forces. the first element\
contains the normal interslice forces and the second contains\
the shear interslice forces.
Examples:
>>> from numpy import array
>>> import matplotlib.pyplot as plt
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=5)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0)
>>> stabAnalysis.intersliceForces(stabAnalysis.FS['fs'],
>>> stabAnalysis.FS['lambda'])
([0, -24561.260979675248, -42085.32887504204, -38993.844201424305,
-18464.723052348225, -61.4153504520018],
[0, -5511.202498703704, -15279.673506543182, -14157.266298947989,
-4143.22489013017, -2.8712090198929304e-15])
'''
from numpy import tan, cos, sin
from numpy import radians as rad
forcesE = [self.slices.slices[0].El]
forcesX = [self.slices.slices[0].Xl]
for i in range(self.slices.numSlices):
slice_ = self.slices.slices[i]
c = slice_.material.cohesion
phi = rad(slice_.material.frictAngle)
Sm = (c * slice_.l + (slice_.P - slice_.U) * tan(phi))/seedFS
setattr(slice_, 'Sm', Sm)
# Eq. [19] of Fredlund & Kranh (1977) does not work for now
# slice_.Er = slice_.El + (slice_.weight - slice_.Xr + slice_.Xl) *\
# tan(rad(slice_.alpha)) - Sm / cos(rad(slice_.alpha)) + \
# self.Kh * slice_.weight
# Eq. gotten from the section 14.8 of GEO-SLOPE (2015)
slice_.Er = slice_.El - slice_.P * sin(rad(slice_.alpha)) + \
Sm * cos(rad(slice_.alpha)) - self.Kh * slice_.weight + \
slice_.extL * cos(rad(slice_.w))
slice_.Xr = slice_.Er * lambda_ * slice_.fR
if i < self.slices.numSlices - 1:
nextSlice = self.slices.slices[i+1]
nextSlice.El = slice_.Er
nextSlice.Xl = slice_.Xr
forcesE.append(slice_.Er)
forcesX.append(slice_.Xr)
return (forcesE, forcesX)
def iterateGLE(self):
'''
Method for getting the factor of safety against sliding through
the algorithm of the General Limit Equilibrium (GLE) proposed by
`<NAME> (1977) <https://doi.org/10.1139/t77-045>`_).
Returns:
(`tuple` or `None`): factor of safety against sliding is the\
solution exists.
Examples:
>>> from numpy import array
>>> import matplotlib.pyplot as plt
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=5)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0)
>>> stabAnalysis.iterateGLE()
{'fs': 2.0258090954552275, 'lambda': 0.38174822248691215}
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, nLambda=0)
>>> stabAnalysis.iterateGLE()
{'fsBishop': 2.0267026043637175, 'fsFellenius': 1.770864711650081}
'''
import numpy as np
from pybimstab.tools import getIntersect
from pybimstab.smoothcurve import SmoothCurve
if self.nLambda > 0:
# Getting the values of lambda to iterate GLE
lambdaVal = np.unique(list(np.linspace(
self.minLambda, self.maxLambda, self.nLambda)) + [0])
# Iteration for moments
fsBishop, tol = self.getFm(self.seedFS, lambda_=0) # Moment equil.
setattr(self, 'fsBishop', fsBishop)
fsMoment, tolM = [fsBishop], list()
for lambda_ in lambdaVal:
fsM, tol = self.getFm(fsMoment[-1], lambda_)
if max(fsM, fsMoment[-1]) / min(fsM, fsMoment[-1]) > 1.5:
tol = False
fsMoment.append(fsM)
tolM.append(tol)
self.intersliceForces(fsMoment[-1], lambda_)
self.calculateNormalForce(fsMoment[-1])
fsMoment.pop(0)
for slice_ in self.slices.slices:
slice_.Er, slice_.El, slice_.Xr, slice_.Xl = 0, 0, 0, 0
# Iteration for forces
fsFellenius, tol = self.getFf(self.seedFS, lambda_=0) # Force eq.
setattr(self, 'fsFellenius', fsFellenius)
fsForces, tolF = [fsFellenius], list()
for lambda_ in lambdaVal:
fsF, tol = self.getFf(fsForces[-1], lambda_)
if max(fsF, fsForces[-1]) / min(fsF, fsForces[-1]) > 1.5:
tol = False
fsForces.append(fsF)
tolF.append(tol)
self.intersliceForces(fsMoment[-1], lambda_)
self.calculateNormalForce(fsMoment[-1])
fsForces.pop(0)
# Creating the attributes
idx2interp = np.where(tolM and tolF)
setattr(self, 'fsMoment', list(np.array(fsMoment)[idx2interp]))
setattr(self, 'fsForces', list(np.array(fsForces)[idx2interp]))
setattr(self, 'lambda_', list(np.array(lambdaVal)[idx2interp]))
# Get intersection of factors of safety
momentLine = SmoothCurve(
x=self.lambda_, y=self.fsMoment, k=3, n=100)
forcesLine = SmoothCurve(
x=self.lambda_, y=self.fsForces, k=3, n=100)
x, momentsY = momentLine.smoothing
forcesY = forcesLine.smoothing[1]
setattr(self, 'adjustment', (x, momentsY, forcesY))
intersect = getIntersect(x=x, y1=momentsY, y2=forcesY)
if intersect is None:
root, fs = None, None
setattr(self, 'FS', {'fs': None, 'lambda': None})
else:
root, fs = intersect
setattr(self, 'FS', {'fs': fs, 'lambda': root})
# Slices forces when full equilibrium is found (lambda=root)
if fs is not None:
self.getFf(fs, root)
return {'fs': fs, 'lambda': root}
else:
fsBishop, tol = self.getFm(self.seedFS, lambda_=0)
setattr(self, 'fsBishop', fsBishop)
fsFellenius, tol = self.getFf(self.seedFS, lambda_=0)
setattr(self, 'fsFellenius', fsFellenius)
return {'fsBishop': fsBishop, 'fsFellenius': fsFellenius}
def plot(self):
'''Method for generating a graphic of the slope stability analysis,
including the plot of the convergences
Returns:
(`matplotlib.figure.Figure`): object with the matplotlib structure\
of the plot. You might use it to save the figure for example.
Examples:
>>> # Example Case 1 - Fig. 9 (Fredlund & Krahn, 1977)
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50,
>>> watertabCoords=None, bim=None)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, minLambda=0,
>>> interSlcFunc=1, nLambda=10)
>>> fig = stabAnalysis.plot()
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/slopestabl_example1.svg
:alt: slopestabl_example1
.. only:: html
:download:`example script<../examples/figuresScripts/slopestabl_example1.py>`.
>>> # Example Case 5 (Fredlund & Krahn, 1977)
>>> from numpy import array
>>> from pybimstab.slope import AnthropicSlope
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> slope = AnthropicSlope(slopeHeight=40, slopeDip=[2, 1],
>>> crownDist=60, toeDist=30, depth=20)
>>> surface = CircularSurface(slopeCoords=slope.coords,
>>> dist1=45.838, dist2=158.726,
>>> radius=80)
>>> material = MaterialParameters(cohesion=600, frictAngle=20,
>>> unitWeight=120,
>>> wtUnitWeight=62.4)
>>> watertable = WaterTable(slopeCoords=slope.coords,
>>> watertabDepths=array([[0, 140],
>>> [20, 0]]))
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=50,
>>> watertabCoords=watertable.coords, bim=None)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, minLambda=0)
>>> fig = stabAnalysis.plot()
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/slopestabl_example2.svg
:alt: slopestabl_example2
.. only:: html
:download:`example script<../examples/figuresScripts/slopestabl_example2.py>`.
>>> from numpy import array
>>> from pybimstab.slope import NaturalSlope
>>> from pybimstab.watertable import WaterTable
>>> from pybimstab.bim import BlocksInMatrix
>>> from pybimstab.slipsurface import CircularSurface
>>> from pybimstab.slipsurface import TortuousSurface
>>> from pybimstab.slices import MaterialParameters, Slices
>>> from pybimstab.slopestabl import SlopeStabl
>>> terrainCoords = array(
>>> [[-2.49, 0.1, 1.7, 3.89, 5.9, 8.12, 9.87, 13.29, 20.29,
>>> 21.43, 22.28, 23.48, 24.65, 25.17],
>>> [18.16, 17.88, 17.28, 15.73, 14.31, 13.58, 13, 3.61, 3.61,
>>> 3.32, 2.71, 2.23, 1.21, 0.25]])
>>> slope = NaturalSlope(terrainCoords)
>>> bim = BlocksInMatrix(slopeCoords=slope.coords, blockProp=0.2,
>>> tileSize=0.35, seed=3210)
>>> watertabDepths = array([[0, 5, 10, 15],
>>> [8, 7, 3, 0]])
>>> watertable = WaterTable(slopeCoords=slope.coords,
>>> watertabDepths=watertabDepths,
>>> smoothFactor=3)
>>> preferredPath = CircularSurface(
>>> slopeCoords=slope.coords, dist1=5, dist2=15.78, radius=20)
>>> surface = TortuousSurface(
>>> bim, dist1=4, dist2=15.5, heuristic='euclidean',
>>> reverseLeft=False, reverseUp=False, smoothFactor=2,
>>> preferredPath=preferredPath.coords, prefPathFact=2)
>>> material = MaterialParameters(
>>> cohesion=15, frictAngle=23, unitWeight=17,
>>> blocksUnitWeight=21, wtUnitWeight=9.8)
>>> slices = Slices(
>>> material=material, slipSurfCoords=surface.coords,
>>> slopeCoords=slope.coords, numSlices=20,
>>> watertabCoords=watertable.coords, bim=bim)
>>> stabAnalysis = SlopeStabl(slices, seedFS=1, Kh=0, nLambda=13,
>>> minLambda=0)
>>> fig = stabAnalysis.plot()
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/slopestabl_example3.svg
:alt: slopestabl_example3
.. only:: html
:download:`example script<../examples/figuresScripts/slopestabl_example3.py>`.
'''
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap as newcmap
from matplotlib import gridspec
# Variables to control the color map and its legend
if self.slices.bim is not None:
if
|
np.any(self.slices.bim.grid == -1)
|
numpy.any
|
import numpy as np
import os
import argparse
import os.path as osp
def check_size(submission_file):
max_size = 60*1024*1024
if osp.getsize(submission_file) > max_size:
raise IOError #File size exceeds the specified maximum size, which is 60M for the server.
def judge_overlap(pbox,ignore_box):
overlap=[]
delete=[]
for p in pbox:
pl=min(p[0],p[2])
pr=max(p[0],p[2])
pb=min(p[1],p[3])
pt=max(p[1],p[3])
s_p=(pr-pl)*(pt-pb)
s_lap=-0.01
for c in ignore_box:
cl=min(c[0],c[2])
cr=max(c[0],c[2])
cb=min(c[1],c[3])
ct=max(c[1],c[3])
if not (cr<pl or cl>pr or ct<pb or cb>pt):
s_lap+=(min(cr,pr)-max(cl,pl))*(min(ct,pt)-max(cb,pb))
if s_lap>0:
overlap.append([p,s_lap/s_p])
for o in overlap:
if o[1]>0.5:
delete.append(o[0])
remain_id = [p for p in pbox if p not in delete]
return remain_id
def parse_ignore_file(ignore_file):
with open(ignore_file,'r') as f:
lines = f.readlines()
ig = [x.strip().split() for x in lines]
ignore = {}
for item in ig:
key = item[0]
ignore_num = (len(item)-1)/4
bbox = []
for i in range(int(ignore_num)):
b = []
b.append(int(item[1+4*i]))
b.append(int(item[2+4*i]))
b.append(int(item[1+4*i])+int(item[3+4*i]))
b.append(int(item[2+4*i])+int(item[4+4*i]))
bbox.append(b)
ignore[key] = bbox
return ignore
def parse_submission(submission_file,ignore_file):
ignore_zone = parse_ignore_file(ignore_file)
ignore_keys = ignore_zone.keys()
with open(submission_file, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split() for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = []
for x in splitlines:
bb = []
bb.append(float(x[2]))
bb.append(float(x[3]))
bb.append(float(x[2])+float(x[4]))
bb.append(float(x[3])+float(x[5]))
BB.append(bb)
sub_key = []
for x in image_ids:
if x not in sub_key:
sub_key.append(x)
final_confidence = []
final_ids = []
final_BB = []
for key in sub_key:
find = [i for i,v in enumerate(image_ids) if v == key]
BB_sub = [BB[i] for i in find]
confid_sub = [confidence[i] for i in find]
if key in ignore_keys:
ignore_bbox = ignore_zone[key]
bbox_remain = judge_overlap(BB_sub,ignore_bbox)
find_remain = []
for i,v in enumerate(BB_sub):
if v in bbox_remain:
find_remain.append(i)
confid_remain = [confid_sub[i] for i in find_remain]
BB_sub = bbox_remain
confid_sub = confid_remain
ids_sub = [key]*len(BB_sub)
final_ids.extend(ids_sub)
final_confidence.extend(confid_sub)
final_BB.extend(BB_sub)
final_BB = np.array(final_BB)
final_confidence = np.array(final_confidence)
sorted_ind = np.argsort(-final_confidence)
final_BB = final_BB[sorted_ind, :]
final_ids = [final_ids[x] for x in sorted_ind]
return final_ids, final_BB
def parse_gt_annotation(gt_file,ignore_file):
ignore_zone = parse_ignore_file(ignore_file)
ignore_keys = ignore_zone.keys()
with open(gt_file, 'r') as f:
lines = f.readlines()
info = [x.strip().split() for x in lines]
gt = {}
for item in info:
bbox = []
bbox_num = (len(item)-1)/5
for i in range(int(bbox_num)):
b = []
b.append(int(item[2+5*i]))
b.append(int(item[3+5*i]))
b.append(int(item[2+5*i])+int(item[4+5*i]))
b.append(int(item[3+5*i])+int(item[5+5*i]))
bbox.append(b)
if item[0] in ignore_keys:
ignore_bbox = ignore_zone[item[0]]
bbox_remain = judge_overlap(bbox,ignore_bbox)
else:
bbox_remain = bbox
gt[item[0]] = np.array(bbox_remain)
return gt
def compute_ap(rec, prec):
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
i = np.where(mrec[1:] != mrec[:-1])[0]
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def pedestrian_eval(input, gt_file, ignore_file, ovthresh):
gt = parse_gt_annotation(gt_file,ignore_file)
image_ids, BB = parse_submission(input,ignore_file)
npos = 0
recs = {}
for key in gt.keys():
det = [False]*len(gt[key])
recs[key] = {'bbox': gt[key], 'det': det}
npos += len(gt[key])
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
if image_ids[d] not in recs.keys():
raise KeyError("Can not find image {} in the groundtruth file, did you submit the result file for the right dataset?".format(image_ids[d]))
for d in range(nd):
R = recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos+1e-8)
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = compute_ap(rec, prec)
return ap
def wider_ped_eval(input, gt,ignore_file):
aap = []
for ove in
|
np.arange(0.5, 1.0, 0.05)
|
numpy.arange
|
import numpy as np
import pytest
import xarray as xr
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import (
mean_absolute_error,
mean_squared_error,
median_absolute_error as sklearn_med_abs,
r2_score,
)
from xskillscore.core.deterministic import (
mae,
mape,
median_absolute_error,
mse,
pearson_r,
pearson_r_p_value,
r2,
rmse,
smape,
spearman_r,
spearman_r_p_value,
)
@pytest.fixture
def a():
time = xr.cftime_range('2000-01-01', '2000-01-03', freq='D')
da = xr.DataArray(np.random.rand(len(time)), dims=['time'], coords=[time])
return da
@pytest.fixture
def b(a):
b = a.copy()
b.values = np.random.rand(b.shape[0])
return b
def test_pearsonr_same_as_scipy(a, b):
"""Tests that pearson r correlation and pvalue is same as computed from
scipy."""
xs_corr = pearson_r(a, b, 'time')
xs_p = pearson_r_p_value(a, b, 'time')
scipy_corr, scipy_p = pearsonr(a, b)
assert np.allclose(xs_corr, scipy_corr)
assert np.allclose(xs_p, scipy_p)
def test_r2_same_as_sklearn(a, b):
"""Tests that r2 is same as computed from sklearn."""
xs_r2 = r2(a, b, 'time')
sklearn_r2 = r2_score(a, b)
assert np.allclose(xs_r2, sklearn_r2)
def test_spearmanr_same_as_scipy(a, b):
"""Tests that spearman r correlation and pvalue is same as computed from
scipy."""
xs_corr = spearman_r(a, b, 'time')
xs_p = spearman_r_p_value(a, b, 'time')
scipy_corr, scipy_p = spearmanr(a, b)
assert np.allclose(xs_corr, scipy_corr)
assert
|
np.allclose(xs_p, scipy_p)
|
numpy.allclose
|
'''
Route script used for running Control-based continuation.
'''
# import required packages
import numpy as np
import scipy.linalg as la
import math
import dtLib.nonlinearcbc.nonlinearcbc2 as cbc2
from flask import render_template, request, redirect, Response, url_for, flash, send_file
from dtApp import app
from dtApp import date
import plotly
import plotly.graph_objs as go
import json
from plotly.subplots import make_subplots
from scipy.integrate import odeint
from pathlib import Path
@app.route('/nonlinearcbc', methods=['GET', 'POST']) #@app.route('/bristolcbc', methods=['GET', 'POST'])
def bristolcbc():
# define input data
pars_file = Path('dtApp/dtData/bris_pars.npy')
if pars_file.is_file():
# file exists
pars = np.load('dtApp/dtData/bris_pars.npy')
m1, m2, m3 = pars[0], pars[1], pars[2]
b1, b2, b3 = pars[3], pars[4], pars[5]
s1, s2, s3 = pars[6], pars[7], pars[8]
s13 = pars[9]
else:
m1, m2, m3 = 0.2, 0.2, 0.2 #masses
s1, s2, s3, s13 = 200.0, 200.0, 200.0, 200.0 #stiffnesses
b1, b2, b3 = 0.1, 0.1, 0.1 #viscous damping
sweep_file = Path('dtApp/dtData/sweep_pars.npy')
if sweep_file.is_file():
# file exists
sweep_pars = np.load('dtApp/dtData/sweep_pars.npy')
else:
sweep_pars = np.array([1.0, 50.0, 1.0, 80.0, 0.945])
#Mass, damping and stiffness matrices
MM = np.array([[m1, 0.0, 0.0], [0.0, m2, 0.0], [0.0, 0.0, m3]])
BB = np.array([[b1+b2, -b2, 0.0], [-b2, b2+b3, -b3], [0.0, -b3, b3]])
SS = np.array([[s1+s2, -s2, 0.0], [-s2, s2+s3, -s3], [0.0, -s3, s3]])
Minv = la.inv(MM)
IC = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
Minfreq = sweep_pars[0]
Maxfreq = sweep_pars[1]
Freq_list_def = np.linspace(Minfreq, Maxfreq, 200)
Min_exc_amp = sweep_pars[2]
Max_exc_amp = sweep_pars[3]
Exc_amp_list_def = np.linspace(Min_exc_amp, Max_exc_amp, 40)
Freq_list_up = np.empty(shape=(0))
Freq_list_down = np.empty(shape=(0))
Amplist_up = np.empty(shape=(0))
Amplist_down = np.empty(shape=(0))
Amplist_up2 = np.empty(shape=(0))
Amplist_down2 = np.empty(shape=(0))
Amplist_up3 = np.empty(shape=(0))
Amplist_down3 = np.empty(shape=(0))
Exc_amp_list_incr = np.empty(shape=(0))
Exc_amp_list_decr = np.empty(shape=(0))
Amplist_incr = np.empty(shape=(0))
Amplist_decr = np.empty(shape=(0))
Amplist_incr2 = np.empty(shape=(0))
Amplist_decr2 = np.empty(shape=(0))
Amplist_incr3 = np.empty(shape=(0))
Amplist_decr3 = np.empty(shape=(0))
branch_cont = np.empty(shape=(0, 4))
IC = np.zeros(6)
status = np.array([1.0])
np.save('dtApp/dtData/CBC/Freq_list_def.npy', Freq_list_def)
np.save('dtApp/dtData/CBC/Freq_list_up.npy', Freq_list_up)
np.save('dtApp/dtData/CBC/Freq_list_down.npy', Freq_list_down)
np.save('dtApp/dtData/CBC/Amplist_up.npy', Amplist_up)
np.save('dtApp/dtData/CBC/Amplist_down.npy', Amplist_down)
np.save('dtApp/dtData/CBC/Amplist_up2.npy', Amplist_up2)
np.save('dtApp/dtData/CBC/Amplist_down2.npy', Amplist_down2)
np.save('dtApp/dtData/CBC/Amplist_up3.npy', Amplist_up3)
np.save('dtApp/dtData/CBC/Amplist_down3.npy', Amplist_down3)
np.save('dtApp/dtData/CBC/Exc_amp_list_def.npy', Exc_amp_list_def)
np.save('dtApp/dtData/CBC/Exc_amp_list_incr.npy', Exc_amp_list_incr)
np.save('dtApp/dtData/CBC/Exc_amp_list_decr.npy', Exc_amp_list_decr)
np.save('dtApp/dtData/CBC/Amplist_incr.npy', Amplist_incr)
np.save('dtApp/dtData/CBC/Amplist_decr.npy', Amplist_decr)
np.save('dtApp/dtData/CBC/Amplist_incr2.npy', Amplist_incr2)
np.save('dtApp/dtData/CBC/Amplist_decr2.npy', Amplist_decr2)
np.save('dtApp/dtData/CBC/Amplist_incr3.npy', Amplist_incr3)
np.save('dtApp/dtData/CBC/Amplist_decr3.npy', Amplist_decr3)
np.save('dtApp/dtData/CBC/branch_cont.npy', branch_cont)
np.save('dtApp/dtData/CBC/IC.npy', IC)
np.save('dtApp/dtData/CBC/mode.npy', status)
np.save('dtApp/dtData/CBC/Exc_amp_list_incr_cbc.npy', Exc_amp_list_incr)
np.save('dtApp/dtData/CBC/Exc_amp_list_decr_cbc.npy', Exc_amp_list_decr)
np.save('dtApp/dtData/CBC/Amplist_incr_cbc.npy', Amplist_incr)
np.save('dtApp/dtData/CBC/Amplist_decr_cbc.npy', Amplist_decr)
graph1 = cbc_dataplot(Freq_list_up, Freq_list_down, Amplist_up, Amplist_down, Exc_amp_list_incr, Exc_amp_list_decr, Amplist_incr, Amplist_decr, branch_cont, Exc_amp_list_incr, Exc_amp_list_decr, Amplist_incr, Amplist_decr)
return render_template("nonlinearcbc.html", plot=graph1,date=date)
@app.route("/par_submit", methods=['GET', 'POST'])
def par_submit():
if request.method=='POST':
req = request.form
# print(req)
m1 = float(req.get("m1"))
m2 = float(req.get("m2"))
m3 = float(req.get("m3"))
s1 = float(req.get("s1"))
s2 = float(req.get("s2"))
s3 = float(req.get("s3"))
b1 = float(req.get("b1"))
b2 = float(req.get("b2"))
b3 = float(req.get("b3"))
s13 = float(req.get("s13"))
F0 = float(req.get("F0"))
om0 = float(req.get("om0"))
kp = float(req.get("kp"))
kd = float(req.get("kd"))
om_min = float(req.get("om_min"))
om_max = float(req.get("om_max"))
F_min = float(req.get("F_min"))
F_max = float(req.get("F_max"))
A_max = float(req.get("A_max"))
fsw_per = int(req.get("fsw_per"))
asw_per = int(req.get("asw_per"))
cbc_per = int(req.get("cbc_per"))
cbc_maxiter = int(req.get("cbc_maxiter"))
cbc_etol = float(req.get("cbc_etol"))
pars = np.array([m1, m2, m3, b1, b2, b3, s1, s2, s3, s13, F0, om0, kp, kd])
np.save('dtApp/dtData/CBC/pars.npy', pars)
sweep_pars = np.array([om_min, om_max, F_min, F_max, A_max])
np.save('dtApp/dtData/CBC/sweep_pars.npy', sweep_pars)
np.save('dtApp/dtData/CBC/fsw_per.npy', fsw_per)
np.save('dtApp/dtData/CBC/asw_per.npy', asw_per)
np.save('dtApp/dtData/CBC/cbc_per.npy', cbc_per)
np.save('dtApp/dtData/CBC/cbc_maxiter.npy', cbc_maxiter)
np.save('dtApp/dtData/CBC/cbc_etol.npy', cbc_etol)
Freq_list_def = np.load('dtApp/dtData/CBC/Freq_list_def.npy')
Freq_list_up = np.load('dtApp/dtData/CBC/Freq_list_up.npy')
Freq_list_down = np.load('dtApp/dtData/CBC/Freq_list_down.npy')
Amplist_up = np.load('dtApp/dtData/CBC/Amplist_up.npy')
Amplist_down = np.load('dtApp/dtData/CBC/Amplist_down.npy')
Exc_amp_list_def = np.load('dtApp/dtData/CBC/Exc_amp_list_def.npy')
Exc_amp_list_incr = np.load('dtApp/dtData/CBC/Exc_amp_list_incr.npy')
Exc_amp_list_decr = np.load('dtApp/dtData/CBC/Exc_amp_list_decr.npy')
Amplist_incr = np.load('dtApp/dtData/CBC/Amplist_incr.npy')
Amplist_decr = np.load('dtApp/dtData/CBC/Amplist_decr.npy')
branch_cont = np.load('dtApp/dtData/CBC/branch_cont.npy')
IC = np.load('dtApp/dtData/CBC/IC.npy')
Exc_amp_list_incr_cbc = np.load('dtApp/dtData/CBC/Exc_amp_list_incr_cbc.npy')
Exc_amp_list_decr_cbc = np.load('dtApp/dtData/CBC/Exc_amp_list_decr_cbc.npy')
Amplist_incr_cbc = np.load('dtApp/dtData/CBC/Amplist_incr_cbc.npy')
Amplist_decr_cbc = np.load('dtApp/dtData/CBC/Amplist_decr_cbc.npy')
Minfreq = sweep_pars[0]
Maxfreq = sweep_pars[1]
Freq_list_def = np.linspace(Minfreq, Maxfreq, 200)
Min_exc_amp = sweep_pars[2]
Max_exc_amp = sweep_pars[3]
Exc_amp_list_def = np.linspace(Min_exc_amp, Max_exc_amp, 40)
np.save('dtApp/dtData/CBC/Freq_list_def.npy', Freq_list_def)
np.save('dtApp/dtData/CBC/Exc_amp_list_def.npy', Exc_amp_list_def)
graph1 = cbc_dataplot(Freq_list_up, Freq_list_down, Amplist_up, Amplist_down, Exc_amp_list_incr, Exc_amp_list_decr, Amplist_incr, Amplist_decr, branch_cont, Exc_amp_list_incr_cbc, Exc_amp_list_decr_cbc, Amplist_incr_cbc, Amplist_decr_cbc)
return render_template("nonlinearcbc2.html", plot=graph1, m1=m1, m2=m2, m3=m3, s1=s1, s2=s2, s3=s3, b1=b1, b2=b2, b3=b3, s13=s13, F0=F0, om0=om0, kp=kp, kd=kd, fsw_per=fsw_per, asw_per=asw_per, cbc_per=cbc_per, cbc_maxiter=cbc_maxiter, cbc_etol=cbc_etol, om_min=om_min, om_max=om_max, F_min=F_min, F_max=F_max, A_max=A_max,date=date)
@app.route('/download')
def downloadFile ():
#For windows you need to use drive name [ex: F:/Example.pdf]
path = "dtApp/dtData/CBC/data_download.txt"
path_d = "dtData/CBC/data_download.txt"
Freq_list_def = np.load('dtApp/dtData/CBC/Freq_list_def.npy')
Freq_list_up = np.load('dtApp/dtData/CBC/Freq_list_up.npy')
Freq_list_down =
|
np.load('dtApp/dtData/CBC/Freq_list_down.npy')
|
numpy.load
|
import copy
import os
from functools import partial
import joblib
import numpy as np
import optuna
import pandas as pd
import lightgbm as lgbm
from .enums import ProblemType
from .logger import logger
from .metrics import Metrics
from .params import get_params
optuna.logging.set_verbosity(optuna.logging.INFO)
def reduce_memory_usage(df, verbose=True):
# NOTE: Original author of this function is unknown
# if you know the *original author*, please let me know.
numerics = ["int8", "int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max <
|
np.finfo(np.float16)
|
numpy.finfo
|
"""
First created on Mon Aug 13 10:01:03 2018
Module used for analysis of the image created from Zernike analysis
Versions:
Oct 31, 2019: 0.22 -> 0.22b introduced verbosity
Mar 10, 2020: 0.22b -> 0.23 if std too small, disregard error calculation
Apr 01, 2020: 0.23 -> 0.24 added options to create_basic_comparison_plot
Apr 29, 2020: 0.24 -> 0.24a added check for image for both side of defocus in create_solution
Jun 17, 2020: 0.24a -> 0.24b cleaned the STAMPS_FOLDER specification
Jun 25, 2020: 0.24b -> 0.25 improved create_res_data
Jul 03, 2020: 0.25 -> 0.26 included multi analysis
Jul 15, 2020: 0.26 -> 0.26a modified to include PSF_DIRECTORY
Sep 08, 2020: 0.26a -> 0.26b small changed around create_chains functions
Dec 07, 2020: 0.26b -> 0.26c added dataset=6
Feb 04, 2021: 0.26c -> 0.26d finalAr_Feb2020.pkl to finalAr_Feb2020
Feb 25, 2021: 0.26d -> 0.26e different folder for dataset=6 for tiger
Mar 10, 2021: 0.26e -> 0.26f added mask options for create_basic_comparison_plot
Mar 24, 2021: 0.26f -> 0.26g updated create_res_data and find_centroid
Apr 02, 2021: 0.26g -> 0.26h added option to save in create_basic_comparison_plot
Apr 21, 2021: 0.26h -> 0.26i expanded support for Tiger
Jul 26, 2021: 0.26i -> 0.26j changed default directory on loca, to point to Saturn_USA
Sep 28. 2021: 0.26j -> 0.26k modified parameters in plot_1D_residual
Nov 20. 2021: 0.26k -> 0.26l Hilo modifications
@author: <NAME>
@contact: <EMAIL>
@web: www.ncaplar.com
"""
########################################
# standard library imports
from __future__ import absolute_import, division, print_function
import os
# import time
# import sys
# import math
import socket
# os.environ["MKL_NUM_THREADS"] = "1"
# os.environ["NUMEXPR_NUM_THREADS"] = "1"
# os.environ["OMP_NUM_THREADS"] = "1"
import numpy as np
# print(np.__config__)
# from multiprocessing import current_process
# from functools import lru_cache
# from tqdm import tqdm
# import pyfftw
# import pandas as pd
########################################
# Related third party imports
# none at the moment
########################################
# Local application/library specific imports
# galsim
import galsim
# astropy
# import astropy
# import astropy.convolution
# from astropy.convolution import Gaussian2DKernel
from astropy.stats import bootstrap
# scipy and skimage
import scipy.misc
# import skimage.transform
import scipy.optimize as optimize
# from scipy.ndimage.filters import gaussian_filter
# pickle
import pickle
# lmfit
# import lmfit
# matplotlib
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
# pandas
import pandas as pd
# needed for resizing routines
from typing import Tuple, Iterable
np.set_printoptions(suppress=True)
np.seterr(divide='ignore', invalid='ignore')
galsim.GSParams.maximum_fft_size = 12000
########################################
__all__ = ['Zernike_Analysis', 'Zernike_result_analysis', 'create_mask', 'resize', 'create_res_data']
__version__ = "0.26l"
############################################################
# name your directory where you want to have files!
if socket.gethostname() == 'IapetusUSA':
PSF_DIRECTORY = '/Volumes/Saturn_USA/PFS/'
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
PSF_DIRECTORY = '/work/ncaplar/'
else:
PSF_DIRECTORY = '/tigress/ncaplar/'
############################################################
TESTING_FOLDER = PSF_DIRECTORY + 'Testing/'
TESTING_PUPIL_IMAGES_FOLDER = TESTING_FOLDER + 'Pupil_Images/'
TESTING_WAVEFRONT_IMAGES_FOLDER = TESTING_FOLDER + 'Wavefront_Images/'
TESTING_FINAL_IMAGES_FOLDER = TESTING_FOLDER + 'Final_Images/'
class Zernike_Analysis(object):
"""Class for analysing results of the cluster run
"""
def __init__(self, date, obs, single_number, eps, arc=None, dataset=None,
multi_var=False, list_of_defocuses=None, verbosity=1):
"""!
@param[in] date date
@param[in] obs observatio
@param[in] single_number single number determining which spot we are analyzing
@param[in] eps analysis parameter
@param[in] arc arc-lamp used
@param[in] dataset dataset number
@param[in] multi_var is this multi analysis
@param[in] list_of_defocuses at which defocuses we are analyzing
"""
############
# initializing
###########
if arc is None:
arc = ''
self.date = date
self.obs = obs
self.single_number = single_number
self.eps = eps
self.arc = arc
self.multi_var = multi_var
self.list_of_defocuses = list_of_defocuses
method = 'P'
self.method = method
self.verbosity = verbosity
#############
# where are poststamps of spots located
if dataset == 0:
STAMPS_FOLDER = PSF_DIRECTORY+"Data_Nov_14/Stamps_cleaned/"
if dataset == 1:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Feb_5/Stamps_cleaned/"
if dataset == 2:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_May_28/Stamps_cleaned/"
if dataset == 3:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Jun_25/Stamps_cleaned/"
if dataset == 4 or dataset == 5:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Aug_14/Stamps_cleaned/"
if dataset == 6:
if socket.gethostname() == 'IapetusUSA':
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Nov_20_2020/Stamps_cleaned/"
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
STAMPS_FOLDER = '/work/ncaplar/ReducedData/Data_Nov_20/Stamps_cleaned/'
else:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_Nov_20/Stamps_cleaned/"
if dataset == 7:
STAMPS_FOLDER = PSF_DIRECTORY+"ReducedData/Data_May_21_2021/Stamps_cleaned/"
if dataset == 8:
if socket.gethostname() == 'IapetusUSA':
STAMPS_FOLDER = '/Volumes/Saturn_USA/PFS/'+"ReducedData/Data_May_21/Stamps_cleaned/"
elif socket.gethostname() == 'pfsa-usr01-gb.subaru.nao.ac.jp' or \
socket.gethostname() == 'pfsa-usr02-gb.subaru.nao.ac.jp':
STAMPS_FOLDER = '/work/ncaplar/ReducedData/Data_May_25_2021/Stamps_cleaned/'
else:
STAMPS_FOLDER = '/tigress/ncaplar/ReducedData/Data_May_25_2021/Stamps_cleaned/'
print('STAMPS_FOLDER: '+str(STAMPS_FOLDER))
# which observation numbers associated with each dataset
if dataset == 0:
if arc is not None:
if arc == "HgAr":
single_number_focus = 8603
elif arc == "Ne":
single_number_focus = 8693
if dataset == 1:
# F/3.4 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 11748
obs_possibilites = np.array([11796, 11790, 11784, 11778, 11772, 11766, 11760, 11754,
11748, 11748, 11694, 11700, 11706, 11712, 11718, 11724,
11730, 11736])
elif arc == "Ne":
single_number_focus = 11748+607
obs_possibilites = np.array([12403, 12397, 12391, 12385, 12379, 12373,
12367, 12361, 12355, 12355, 12349, 12343,
12337, 12331, 12325, 12319, 12313, 12307])
if dataset == 2:
# F/2.8 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 17017+54
obs_possibilites = np.array([17023, 17023+6, 17023+12, 17023+18, 17023+24, 17023+30,
17023+36, 17023+42, -99, 17023+48, 17023+54, 17023+60,
17023+66, 17023+72, 17023+78, 17023+84, 17023+90, 17023+96,
17023+48])
if arc == "Ne":
single_number_focus = 16292
obs_possibilites = np.array([16238+6, 16238+12, 16238+18,
16238+24, 16238+30, 16238+36,
16238+42, 16238+48, -99, 16238+54,
16238+60, 16238+66, 16238+72,
16238+78, 16238+84, 16238+90,
16238+96, 16238+102, 16238+54])
if arc == "Kr":
single_number_focus = 17310+54
obs_possibilites = np.array([17310+6, 17310+12, 17310+18,
17310+24, 17310+30, 17310+36,
17310+42, 17310+48, -99, 17310+54,
17310+60, 17310+66, 17310+72,
17310+78, 17310+84, 17310+90,
17310+96, 17310+102, 17310+54])
if dataset == 3:
# F/2.5 stop
if arc is not None:
if arc == "HgAr":
single_number_focus = 19238+54
obs_possibilites = np.array([19238, 19238+6, 19238+12,
19238+18, 19238+24, 19238+30,
19238+36, 19238+42, -99, 19238+48,
19238+54, 19238+60, 19238+66,
19238+72, 19238+78, 19238+84,
19238+90, 19238+96, 19238+48])
elif arc == "Ne":
single_number_focus = 19472
obs_possibilites = np.array([19472+6, 19472+12, 19472+18,
19472+24, 19472+30, 19472+36,
19472+42, 19472+48, -99, 19472+54,
19472+60, 19472+66, 19472+72,
19472+78, 19472+84, 19472+90,
19472+96, 19472+102, 19472+54])
if dataset == 4:
# F/2.8 stop, July LAM data, full defocus
if arc is not None:
if arc == "HgAr":
single_number_focus = 21346+54
obs_possibilites = np.array([21346+6, 21346+12, 21346+18,
21346+24, 21346+30, 21346+36,
21346+42, 21346+48, -99, 21346+54,
21346+60, 21346+66, 21346+72,
21346+78, 21346+84, 21346+90,
21346+96, 21346+102, 21346+48])
if arc == "Ne":
single_number_focus = 21550+54
obs_possibilites = np.array([21550+6, 21550+12, 21550+18,
21550+24, 21550+30, 21550+36,
21550+42, 21550+48, -99, 21550+54,
21550+60, 21550+66, 21550+72,
21550+78, 21550+84, 21550+90,
21550+96, 21550+102, 21550+54])
if str(arc) == "Kr":
single_number_focus = 21754+54
obs_possibilites = np.array([21754+6, 21754+12, 21754+18,
21754+24, 21754+30, 21754+36,
21754+42, 21754+48, -99, 21754+54,
21754+60, 21754+66, 21754+72,
21754+78, 21754+84, 21754+90,
21754+96, 21754+102, 21754+54])
if dataset == 5:
# F/2.8 stop, July LAM data, fine defocus
if arc == 'HgAr':
obs_possibilites = np.arange(21280, 21280+11*6, 6)
if arc == 'Ne':
obs_possibilites = np.arange(21484, 21484+11*6, 6)
if arc == 'Kr':
obs_possibilites = np.arange(21688, 21688+11*6, 6)
if dataset == 6:
if arc == 'Ar':
single_number_focus = 34341+48
obs_possibilites = np.array([34341, 34341+6, 34341+12,
34341+18, 34341+24, 34341+30,
34341+36, 34341+42, 34341+48, 34341+48,
34341+54, 34341+60, 34341+66,
34341+72, 34341+78, 34341+84,
34341+90, 34341+96, 21346+48])
if arc == 'Ne':
single_number_focus = 34217+48
obs_possibilites = np.array([34217, 34217+6, 34217+12,
34217+18, 34217+24, 34217+30,
34217+36, 34217+42, 34217+48, 34217+48,
34217+54, 34217+60, 34217+66,
34217+72, 34217+78, 34217+84,
34217+90, 34217+96, 34217+48])
if arc == 'Kr':
single_number_focus = 34561+48
obs_possibilites = np.array([34561, 34561+6, 34561+12,
34561+18, 34561+24, 34561+30,
34561+36, 34561+42, 34561+48, 34561+48,
34561+54, 34561+60, 34561+66,
34561+72, 34561+78, 34561+84,
34561+90, 34561+96, 34561+48])
if dataset == 7:
# if str(arc) == "Ar":
# single_number_focus=34341+48
if str(arc) == "Ne":
single_number_focus = 27677
if multi_var is True:
obs_multi = 27719
obs_possibilites = np.array([27713, -999, 27683,
-999, -999, -999, -999,
-999, 27677, -999,
-999, -999, -999,
-999, -999, 27698,
-999, 27719, -999])
if dataset == 8:
if arc == 'Ar':
single_number_focus = 51485+8*12
obs_possibilites = np.array([51485, 51485+12, 51485+2*12,
51485+3*12, 51485+4*12, 51485+5*12,
51485+6*12, 51485+7*12, 51485+8*12,
52085+8*12, 51485+9*12, 51485+10*12,
51485+11*12, 51485+12*12, 51485+13*12,
51485+14*12, 51485+15*12, 51485+16*12, 51485+8*12])
if arc == 'Ne':
single_number_focus = 59655+8*12
obs_possibilites = np.array([59655, 59655+12, 59655+2*12,
59655+3*12, 59655+4*12, 59655+5*12,
59655+6*12, 59655+7*12, 59655+8*12,
52085+8*12, 59655+9*12, 59655+10*12,
59655+11*12, 59655+12*12, 59655+13*12,
59655+14*12, 59655+15*12, 59655+16*12, 59655+8*12])
if arc == 'Kr':
single_number_focus = 52085+8*12
obs_possibilites = np.array([52085, 52085+12, 52085+2*12,
52085+3*12, 52085+4*12, 52085+5*12,
52085+6*12, 52085+7*12, 52085+8*12,
52085+8*12, 52085+9*12, 52085+10*12,
52085+11*12, 52085+12*12, 52085+13*12,
52085+14*12, 52085+15*12, 52085+16*12, 52085+8*12])
# elif str(arc)=="Kr":
# single_number_focus=34561+48
# if multi ??
if multi_var is True and dataset < 7:
obs_multi = single_number_focus + 48
# if multi ??
if multi_var is True and dataset == 8:
obs_multi = single_number_focus + 96
if multi_var is True:
self.obs_multi = obs_multi
obs_single = obs
self.obs_single = obs_single
label = ['m4', 'm35', 'm3',
'm25', 'm2', 'm15',
'm1', 'm05', '0', '0d',
'p05', 'p1', 'p15',
'p2', 'p25', 'p3',
'p35', 'p4', '0p']
label_fine_defocus = ['m05ff', 'm04ff', 'm03ff',
'm02ff', 'm01ff', '0ff',
'p01ff', 'p02ff', 'p03ff',
'p04ff', 'p05ff']
if type(obs) == str:
labelInput = obs
obs = obs_possibilites[label.index(labelInput)]
obs_int = int(obs)
if dataset in [0, 1, 2, 3, 4, 6, 7]:
labelInput = label[list(obs_possibilites).index(obs_int)]
if dataset in [5]:
labelInput = label_fine_defocus[list(obs_possibilites).index(obs_int)]
if multi_var is True:
if self.verbosity == 1:
print('labelInput: ' + str(labelInput))
print('self.single_number: '+str(self.single_number))
index_of_single_image_in_list_of_images = self.list_of_defocuses.index(labelInput)
self.index_of_single_image_in_list_of_images = index_of_single_image_in_list_of_images
list_of_obs = []
if multi_var is True:
for labelInput in self.list_of_defocuses:
if dataset in [0, 1, 2, 3, 4, 6, 7, 8]:
obs_single = obs_possibilites[label.index(labelInput)]
if dataset in [5]:
obs_single = obs_possibilites[label_fine_defocus.index(labelInput)]
list_of_obs.append(obs_single)
else:
list_of_obs.append(obs_single)
##########################
# import data
##########################
if multi_var is True:
list_of_sci_images = []
list_of_mask_images = []
list_of_var_images = []
if self.verbosity == 1:
print('list_of_defocuses: ' + str(self.list_of_defocuses))
print('list_of_obs: ' + str(list_of_obs))
# for obs_v in list_of_obs:
# if obs_v>0:
# sci_image =np.load(STAMPS_FOLDER+'sci'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# mask_image =np.load(STAMPS_FOLDER+'mask'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# var_image =np.load(STAMPS_FOLDER+'var'+str(obs_v)+str(single_number)+
# str(arc)+'_Stacked.npy')
# else:
# # if the image is not avaliable (has obs_v negative) make some dummy images
# sci_image=np.ones((20,20))
# mask_image=np.ones((20,20))
# var_image=np.ones((20,20))
for obs_v in list_of_obs:
try:
sci_image = np.load(STAMPS_FOLDER+'sci'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
mask_image = np.load(STAMPS_FOLDER+'mask'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
var_image = np.load(STAMPS_FOLDER+'var'+str(obs_v)+
str(single_number)+str(arc)+'_Stacked.npy')
except:
# if the image is not avaliable (has obs_v negative) make some dummy images
sci_image = np.ones((20,20))
mask_image = np.ones((20,20))
var_image = np.ones((20,20))
list_of_sci_images.append(sci_image)
list_of_mask_images.append(mask_image)
list_of_var_images.append(var_image)
sci_image =np.load(STAMPS_FOLDER+'sci'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
mask_image =np.load(STAMPS_FOLDER+'mask'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
var_image =np.load(STAMPS_FOLDER+'var'+str(obs)+str(single_number)+str(arc)+'_Stacked.npy')
try:
sci_image_focus_large =np.load(STAMPS_FOLDER+'sci'+str(single_number_focus)+str(single_number)+str(arc)+'_Stacked_large.npy')
var_image_focus_large =np.load(STAMPS_FOLDER+'var'+str(single_number_focus)+str(single_number)+str(arc)+'_Stacked_large.npy')
except:
pass
self.list_of_sci_images=list_of_sci_images
self.list_of_mask_images=list_of_mask_images
self.list_of_var_images=list_of_var_images
self.sci_image=sci_image
self.var_image=var_image
self.mask_image=mask_image
self.STAMPS_FOLDER=STAMPS_FOLDER
if dataset==1:
if arc=="HgAr":
finalArc=finalHgAr_Feb2019
elif arc=="Ne":
finalArc=finalNe_Feb2019
else:
print("Not recognized arc-line")
if dataset==2:
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalNe_May2019.pkl', 'rb') as f:
finalNe_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalHgAr_May2019.pkl', 'rb') as f:
finalHgAr_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_May_28/Dataframes/finalKr_May2019.pkl', 'rb') as f:
finalKr_May2019=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_May2019
elif arc=="Ne":
finalArc=finalNe_May2019
elif arc=="Kr":
finalArc=finalKr_May2019
else:
print("Not recognized arc-line")
if dataset==3:
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalNe_May2019.pkl', 'rb') as f:
finalNe_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalHgAr_May2019.pkl', 'rb') as f:
finalHgAr_May2019=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Jun_25/Dataframes/finalKr_May2019.pkl', 'rb') as f:
finalKr_May2019=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_May2019
elif arc=="Ne":
finalArc=finalNe_May2019
else:
print("Not recognized arc-line")
if dataset==4 or dataset==5:
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalHgAr_Feb2020', 'rb') as f:
print(f)
finalHgAr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalNe_Feb2020', 'rb') as f:
finalNe_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Aug_14/Dataframes/finalKr_Feb2020', 'rb') as f:
finalKr_Feb2020_dataset=pickle.load(f)
if arc=="HgAr":
finalArc=finalHgAr_Feb2020_dataset
elif arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
else:
print("Not recognized arc-line")
if dataset==6 or dataset==7:
if socket.gethostname()=='IapetusUSA':
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalHgAr_Feb2020', 'rb') as f:
finalHgAr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalNe_Feb2020', 'rb') as f:
finalNe_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalKr_Feb2020', 'rb') as f:
finalKr_Feb2020_dataset=pickle.load(f)
with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20_2020/Dataframes/finalAr_Feb2020', 'rb') as f:
finalAr_Feb2020_dataset=pickle.load(f)
else:
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalHgAr_Feb2020', 'rb') as f:
# finalHgAr_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalNe_Feb2020', 'rb') as f:
# finalNe_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalKr_Feb2020', 'rb') as f:
# finalKr_Feb2020_dataset=pickle.load(f)
#with open(PSF_DIRECTORY+'ReducedData/Data_Nov_20/Dataframes/finalAr_Feb2020', 'rb') as f:
# finalAr_Feb2020_dataset=pickle.load(f)
finalHgAr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalHgAr_Feb2020',
allow_pickle=True)
finalNe_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalNe_Feb2020',
allow_pickle=True)
finalKr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalKr_Feb2020',
allow_pickle=True)
finalAr_Feb2020_dataset = np.load(PSF_DIRECTORY
+ 'ReducedData/Data_Nov_20/Dataframes/finalAr_Feb2020',
allow_pickle=True)
if arc=="HgAr":
finalArc=finalHgAr_Feb2020_dataset
elif arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
elif arc=="Ar":
finalArc=finalAr_Feb2020_dataset
else:
print("Not recognized arc-line")
if dataset==8:
if socket.gethostname()=='IapetusUSA':
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalNe_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalKr_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
with open('/Volumes/Saturn_USA/PFS/'+'ReducedData/Data_May_21/DataFrames/finalAr_Jul2021', 'rb') as f:
finalArc=pickle.load(f)
else:
finalNe_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalNe_Jul2021')
finalKr_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalKr_Jul2021')
finalAr_Feb2020_dataset=np.load(PSF_DIRECTORY+'ReducedData/Data_May_25_2021/Dataframes/finalAr_Jul2021')
if arc=="Ne":
finalArc=finalNe_Feb2020_dataset
elif arc=="Kr":
finalArc=finalKr_Feb2020_dataset
elif arc=="Ar":
finalArc=finalAr_Feb2020_dataset
else:
print("Not recognized arc-line")
##########################
# import column names
##########################
"""
columns=['z4','z5','z6','z7','z8','z9','z10','z11',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'radiometricEffect','radiometricExponent',
'x_ilum','y_ilum',
'x_fiber','y_fiber','effective_ilum_radius','frd_sigma','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_radius','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22=['z4','z5','z6','z7','z8','z9','z10','z11',
'z12','z13','z14','z15','z16','z17','z18','z19','z20','z21','z22',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'radiometricEffect','radiometricExponent','x_ilum','y_ilum',
'x_fiber','y_fiber','effective_radius_illumination',
'frd_sigma','frd_lorentz_factor','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
"""
columns=['z4','z5','z6','z7','z8','z9','z10','z11',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'wide_0','wide_23','wide_43','misalign',
'x_fiber','y_fiber','effective_ilum_radius','frd_sigma','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_radius','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22=['z4','z5','z6','z7','z8','z9','z10','z11',
'z12','z13','z14','z15','z16','z17','z18','z19','z20','z21','z22',
'hscFrac','strutFrac','dxFocal','dyFocal','slitFrac','slitFrac_dy',
'wide_0','wide_23','wide_43','misalign',
'x_fiber','y_fiber','effective_radius_illumination',
'frd_sigma','frd_lorentz_factor','det_vert','slitHolder_frac_dx',
'grating_lines','scattering_slope','scattering_amplitude',
'pixel_effect','fiber_r','flux']
columns22_analysis=columns22+['chi2','chi2max']
self.columns=columns
self.columns22=columns22
self.columns22_analysis=columns22_analysis
##########################
# where are results from Tiger placed
##########################
############################################################
# name your directory where you want to have files!
if socket.gethostname()=='IapetusUSA':
RESULT_FOLDER=PSF_DIRECTORY+'TigerAnalysis/ResultsFromTiger/'+date+'/'
if os.path.exists(RESULT_FOLDER):
pass
else:
RESULT_FOLDER='/Volumes/My Passport for Mac/Old_Files/PFS/TigerAnalysis/ResultsFromTiger/'+date+'/'
if os.path.exists(RESULT_FOLDER):
pass
else:
RESULT_FOLDER='/Volumes/Saturn_USA/PFS/TigerAnalysis/ResultsFromTiger/'+date+'/'
else:
# if the analysis is done on Tiger
RESULT_FOLDER='/tigress/ncaplar/Results/'
self.RESULT_FOLDER=RESULT_FOLDER
############################################################
IMAGES_FOLDER=PSF_DIRECTORY+'/Images/'+date+'/'
if not os.path.exists(IMAGES_FOLDER):
os.makedirs(IMAGES_FOLDER)
self.IMAGES_FOLDER=IMAGES_FOLDER
#print('finalArc[close].loc[int(single_number)]'+str(finalArc['close'].loc[int(single_number)]))
if finalArc['close'].loc[int(single_number)]=='1' or finalArc['close'].loc[int(single_number)]==1:
double_sources=False
else:
double_sources=True
#print('double_sources'+str(double_sources))
self.double_sources=double_sources
double_sources_positions_ratios=finalArc.loc[int(single_number)][['second_offset','second_ratio']].values
self.double_sources_positions_ratios=double_sources_positions_ratios
if self.verbosity==1:
print('analyzing label: '+str(obs))
print('double_sources_positions_ratios for this spot is: '+str(double_sources_positions_ratios))
def return_double_sources(self):
return self.double_sources,self.double_sources_positions_ratios
def return_lists_of_images(self):
assert self.multi_var==True
return self.list_of_sci_images,self.list_of_var_images,self.list_of_mask_images
def return_index_of_single_image_in_list_of_images(self):
return self.index_of_single_image_in_list_of_images
def return_columns(self):
return self.columns,self.columns22,self.columns22_analysis
def create_list_of_var_or_ln_sums(self,sigma_offset=0):
"""
gives likelihood for chi**2 =1
"""
list_of_var_sums=[]
for i in range(len(self.list_of_var_images)):
# taking from create_chi_2_almost function in LN_PFS_single
mask_image=self.list_of_mask_images[i]
var_image=self.list_of_var_images[i]
# array that has True for values which are good and False for bad values
inverted_mask=~mask_image.astype(bool)
#
var_image_masked=var_image*inverted_mask
var_image_masked_without_nan = var_image_masked.ravel()[var_image_masked.ravel()>0]
var_sum=-(1/2)*(len(var_image_masked_without_nan)*sigma_offset+np.sum(np.log(2*np.pi*var_image_masked_without_nan)))
list_of_var_sums.append(var_sum)
array_of_var_sums=np.array(list_of_var_sums)
return array_of_var_sums
def create_likelihood(self):
if self.multi_var is True:
self.obs = self.obs_multi
# self.len_of_chains()
# Swarm1
# likechain_Swarm1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+
# '_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm1.npy')
likechain_Swarm1 = self.likechain_Swarm1
like_min_swarm1 = []
for i in range(likechain_Swarm1.shape[0]):
like_min_swarm1.append(np.min(np.abs(likechain_Swarm1[i])))
#
if self.chain_Emcee2 is not None:
# Emcee1
# likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+
# str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+
# str(self.arc)+'Emcee2.npy')
likechain_Emcee1 = self.likechain_Emcee1
like_min_Emcee1 = []
for i in range(likechain_Emcee1.shape[1]):
like_min_Emcee1.append(np.min(np.abs(likechain_Emcee1[:, i])))
# Swarm2
likechain_Swarm2 = np.load(self.RESULT_FOLDER + 'likechain' + str(self.date) + '_Single_' +
str(self.method) + '_'+str(self.obs) + str(self.single_number) +
str(self.eps) + str(self.arc)+'Swarm2.npy')
likechain_Swarm2 = self.likechain_Swarm2
like_min_swarm2 = []
for i in range(likechain_Swarm2.shape[0]):
like_min_swarm2.append(np.min(np.abs(likechain_Swarm2[i])))
# Emcee 2
# chain_Emcee3=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+
# '_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
# likechain_Emcee3=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+
# str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
chain_Emcee2 = self.chain_Emcee2
likechain_Emcee2 = self.likechain_Emcee2
# get chain number 0, which is has lowest temperature
# if len(likechain_Emcee3)<=4:
# likechain0_Emcee3=likechain_Emcee3[0]
# chain0_Emcee3=chain_Emcee3[0]
# else:
# likechain0_Emcee3=likechain_Emcee3
# chain0_Emcee3=chain_Emcee3
# check the shape of the chain (number of walkers, number of steps, number of parameters)
if self.verbosity == 1:
print('(number of walkers, number of steps, number of parameters for Emcee): ' +
str(chain_Emcee2.shape))
# see the best chain
minchain = chain_Emcee2[np.abs(likechain_Emcee2) == np.min(np.abs(likechain_Emcee2))][0]
# print(minchain)
self.minchain = minchain
like_min_Emcee2 = []
for i in range(likechain_Emcee2.shape[1]):
like_min_Emcee2.append(np.min(np.abs(likechain_Emcee2[:, i])))
like_min = like_min_swarm1 + like_min_Emcee1+like_min_swarm2 + like_min_Emcee2
else:
# see the best chain
minchain = self.chain_Swarm1[np.abs(self.likechain_Swarm1) ==
np.min(np.abs(self.likechain_Swarm1))][0]
# print(minchain)
self.minchain = minchain
like_min = like_min_swarm1
list_of_var_sums = self.create_list_of_var_or_ln_sums(0)
# print('list_of_var_sums: '+str(list_of_var_sums))
array_of_var_sum = np.array(list_of_var_sums)
max_of_array_of_var_sum = np.max(array_of_var_sum)
renormalization_of_var_sum = array_of_var_sum/max_of_array_of_var_sum
zero_sigma_ln = np.mean(list_of_var_sums/renormalization_of_var_sum)
self.zero_sigma_ln = zero_sigma_ln
list_of_var_sums_1 = self.create_list_of_var_or_ln_sums(1)
one_sigma_ln = np.mean(list_of_var_sums_1/renormalization_of_var_sum)
self.one_sigma_ln = one_sigma_ln
# print(len(like_min))
if self.verbosity == 1:
print('minimal likelihood is: '+str(np.min(like_min)))
min_like_min = np.min(like_min)
self.min_like_min = min_like_min
# chi2 = (np.array(like_min)*(2)-np.sum(np.log(2*np.pi*self.var_image)))/(self.sci_image.shape[0])**2
# min_chi2 = -(min_like_min+zero_sigma_ln)/(one_sigma_ln-zero_sigma_ln)
# print('average chi2 reduced is: ' + str(min_chi2))
return minchain, like_min
def len_of_chains(self):
if self.multi_var==True:
self.obs=self.obs_multi
self.create_chains_Emcee_1()
self.create_chains_Emcee_2()
self.create_chains_swarm_1()
self.create_chains_swarm_2()
# (number of walkers, number of steps, number of parameters) for Emcee
# (number of steps, number of walkers, number of parameters) for Swarm
if self.chain_Emcee2 is None:
print(self.chain_Swarm1.shape)
return [len(self.chain_Swarm1),0,0,0]
else:
print(self.chain_Swarm1.shape,self.chain_Emcee2.shape,self.chain_Swarm2.shape,self.chain_Emcee3.shape)
return [len(self.chain_Swarm1),(self.chain_Emcee2).shape[1],len(self.chain_Swarm2),(self.chain_Emcee3).shape[1]]
def create_chains(self):
#chain_Emcee1=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee1.npy')
#likechain_Emcee1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee1.npy')
# get chain number 0, which is has lowest temperature
#likechain0_Emcee1=likechain_Emcee1[0]
#chain0_Emcee1=chain_Emcee1[0]
#chain_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee2.npy')
#likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+'Emcee2.npy')
#likechain0_Emcee2=likechain_Emcee2[0]
#chain0_Emcee2=chain_Emcee2[0]
if self.multi_var==True:
self.obs=self.obs_multi
chain_Emcee3=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain_Emcee3=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
# get chain number 0, which is has lowest temperature
likechain0_Emcee3=likechain_Emcee3
chain0_Emcee3=chain_Emcee3
self.chain0_Emcee3=chain0_Emcee3
self.likechain0_Emcee3=likechain0_Emcee3
return chain0_Emcee3,likechain0_Emcee3
def create_chains_Emcee_1(self):
"""
get chain and likelihood chain for first run of Emcee
unfortunately the file name is ``Emcee2'', because of historical reasons
Returns
-------
chain0_Emcee1 : chain
likechain0_Emcee1 : likelihood chain
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Emcee1=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee2.npy')
likechain_Emcee1=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee2.npy')
self.chain_Emcee1=chain_Emcee1
self.likechain_Emcee1=likechain_Emcee1
except:
self.chain_Emcee1=None
self.likechain_Emcee1=None
return self.chain_Emcee1,self.likechain_Emcee1
def create_chains_Emcee_2(self):
"""
get chain and likelihood chain for the second run of Emcee
unfortunately the file name is ``Emcee3'', because of historical reasons
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
self.chain_Emcee2=chain_Emcee2
self.likechain_Emcee2=likechain_Emcee2
except:
self.chain_Emcee2=None
self.likechain_Emcee2=None
return self.chain_Emcee2,self.likechain_Emcee2
def create_Emcee2_stack(self):
if self.multi_var==True:
self.obs=self.obs_multi
chain0_Emcee2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
likechain0_Emcee2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Emcee3.npy')
for i in range(chain0_Emcee2.shape[1]):
if i==0:
chain0_Emcee2_reshaped=chain0_Emcee2[:,0]
likechain0_Emcee2_reshaped=likechain0_Emcee2[:,0]
else:
chain0_Emcee2_reshaped=np.vstack((chain0_Emcee2_reshaped,chain0_Emcee2[:,i]))
likechain0_Emcee2_reshaped=np.vstack((likechain0_Emcee2_reshaped,likechain0_Emcee2[:,i]))
chain0_stack=chain0_Emcee2_reshaped
likechain0_stack=likechain0_Emcee2_reshaped.ravel()
likechain0_stack=likechain0_stack-np.max(likechain0_stack)
return chain0_stack,likechain0_stack
def create_chains_swarm_1(self):
"""get chain and likelihood chain from the swarm analysis
"""
if self.multi_var is True:
self.obs = self.obs_multi
try:
chain_Swarm1 = np.load(self.RESULT_FOLDER+'chain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc)+'Swarm1.npy')
likechain_Swarm1 = np.load(self.RESULT_FOLDER + 'likechain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc)+'Swarm1.npy')
print('create_chains_swarm_1: Swarm1 and likechainSwarm1 found')
print('Path searched was: ' + str(self.RESULT_FOLDER+'chain' + str(self.date) + '_Single_' +
str(self.method) + '_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) +
'Swarm1.npy'))
except : # noqa
print('Swarm1 or likechainSwarm1 not found')
print('Path searched for chain was: ' +
str(self.RESULT_FOLDER + 'chain' + str(self.date) +
'_Single_' + str(self.method)+'_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) + 'Swarm1.npy'))
print('Path searched for likechain was: ' +
str(self.RESULT_FOLDER + 'likechain' + str(self.date) +
'_Single_' + str(self.method)+'_' + str(self.obs) +
str(self.single_number) + str(self.eps) + str(self.arc) + 'Swarm1.npy'))
self.chain_Swarm1 = chain_Swarm1
self.likechain_Swarm1 = likechain_Swarm1
return chain_Swarm1, likechain_Swarm1
def create_chains_swarm_2(self):
"""
get chain and likelihood chain for the second run of cosmoHammer optimizer
"""
if self.multi_var==True:
self.obs=self.obs_multi
try:
chain_Swarm2=np.load(self.RESULT_FOLDER+'chain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm2.npy')
likechain_Swarm2=np.load(self.RESULT_FOLDER+'likechain'+str(self.date)+'_Single_'+str(self.method)+'_'+str(self.obs)+\
str(self.single_number)+str(self.eps)+str(self.arc)+'Swarm2.npy')
self.chain_Swarm2=chain_Swarm2
self.likechain_Swarm2=likechain_Swarm2
except:
self.chain_Swarm2=None
self.likechain_Swarm2=None
return self.chain_Swarm2,self.likechain_Swarm2
def create_allparameters_single(self,mm,array_of_polyfit_1_parameterizations,zmax=None):
"""
copied from multi
transfroms linear fits as a function of defocus of parametrizations into form acceptable for creating single images
workhorse function used by create_list_of_allparameters
@param mm [float] defocus of the slit
@param array_of_polyfit_1_parameterizations parametrs describing linear fit for the parameters as a function of focus
@param zmax largerst Zernike used
"""
if zmax==None:
zmax=11
#for single case, up to z11
if zmax==11:
z_parametrizations=array_of_polyfit_1_parameterizations[:8]
g_parametrizations=array_of_polyfit_1_parameterizations[8:]
allparameters_proposal_single=np.zeros((8+len(g_parametrizations)))
for i in range(0,8,1):
allparameters_proposal_single[i]=self.value_at_defocus(mm,z_parametrizations[i][0],z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[i+8]=g_parametrizations[i][1]
if zmax==22:
z_parametrizations=array_of_polyfit_1_parameterizations[:19]
g_parametrizations=array_of_polyfit_1_parameterizations[19:]
allparameters_proposal_single=np.zeros((19+len(g_parametrizations)))
for i in range(0,19,1):
#print(str([i,mm,z_parametrizations[i]]))
allparameters_proposal_single[i]=self.value_at_defocus(mm,z_parametrizations[i][0],z_parametrizations[i][1])
for i in range(len(g_parametrizations)):
allparameters_proposal_single[19+i]=g_parametrizations[i][1]
return allparameters_proposal_single
def entrance_exit_pupil_plot(self):
ilum=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum.npy')
radiometricEffectArray=np.load(TESTING_PUPIL_IMAGES_FOLDER+'radiometricEffectArray.npy')
ilum_radiometric=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum_radiometric.npy')
plt.figure(figsize=(30,8))
plt.subplot(131)
plt.imshow(ilum,origin='lower',vmax=1,vmin=0)
plt.title('entrance pupil')
plt.colorbar()
plt.subplot(132)
plt.title('ent->exit pupil')
plt.imshow(radiometricEffectArray,origin='lower',vmax=1,vmin=0)
plt.colorbar()
plt.subplot(133)
plt.title('exit pupil')
plt.imshow(ilum_radiometric,origin='lower',vmax=1,vmin=0)
plt.colorbar()
def wavefront_plot(self):
wf_full = np.load(TESTING_WAVEFRONT_IMAGES_FOLDER + 'wf_full.npy')
plt.figure(figsize=(36, 6))
plt.subplot(141)
plt.imshow(wf_full)
plt.colorbar()
plt.subplot(142)
plt.imshow(np.real(np.exp(2j*np.pi * wf_full/800)))
plt.colorbar()
plt.subplot(143)
plt.imshow(np.imag(np.exp(2j*np.pi * wf_full/800)))
plt.colorbar()
def illumination_wavefront_plot(self,return_Images=False):
ilum=np.load(TESTING_PUPIL_IMAGES_FOLDER+'ilum.npy')
wf_full=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full.npy')
wf_full_fake_0=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full_fake_0.npy')
midpoint=int(len(ilum)/2)
ilum_zoom=ilum[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4)]
plt.figure(figsize=(28,8))
plt.subplot(131)
plt.imshow(ilum_zoom,origin='lower',vmax=1,vmin=0)
plt.title('illumination of the pupil',fontsize=25)
plt.subplot(132)
ilum_1=np.copy(ilum)
ilum_1[ilum_1>0.01]=1
wavefront=ilum_1*wf_full
wavefront=wavefront/800
wavefront_zoom=wavefront[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):\
int(midpoint+len(ilum)/4)]
plt.imshow(wavefront_zoom,cmap=plt.get_cmap('bwr'),vmax=np.max(np.abs(wavefront))*0.75,vmin=-np.max(np.abs(wavefront))*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('wavefront [units of waves]',fontsize=25)
plt.subplot(133)
ilum_1=np.copy(ilum)
ilum_1[ilum_1>0.01]=1
wavefront=ilum_1*wf_full_fake_0
wavefront=wavefront/800
wavefront_0_zoom=wavefront[int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4),int(midpoint-len(ilum)/4):int(midpoint+len(ilum)/4)]
plt.imshow(wavefront_0_zoom,cmap=plt.get_cmap('bwr'),vmax=np.max(np.abs(wavefront))*0.75,vmin=-np.max(np.abs(wavefront))*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('wavefront w.o. defocus [u. of waves]',fontsize=25)
if return_Images==True:
return ilum_zoom,wavefront_zoom,wavefront_0_zoom
def wavefront_gradient_plot(self):
wf_full=np.load(TESTING_WAVEFRONT_IMAGES_FOLDER+'wf_full.npy')
plt.figure(figsize=(30,8))
plt.subplot(131)
vgrad = np.gradient(wf_full)
fulgrad = np.sqrt(vgrad[0]**2 + vgrad[1]**2)
plt.title('gradient (magnitude)')
plt.imshow(fulgrad,cmap=plt.get_cmap('hot'), vmin = np.amin(fulgrad),vmax = np.amax(fulgrad))
plt.colorbar()
plt.subplot(132)
x, y = range(0, len(wf_full)), range(0,len(wf_full))
xi, yi = np.meshgrid(x, y)
plt.title('gradient (direction)')
plt.streamplot(xi, yi, vgrad[0], vgrad[1])
plt.subplot(133)
laplace_of_wf = scipy.ndimage.filters.laplace(wf_full)
plt.title('Laplacian')
plt.imshow(laplace_of_wf,cmap=plt.get_cmap('hot'), vmin = -1,vmax = 1)
plt.colorbar()
def create_basic_data_image(self,return_Images=False):
sci_image=self.sci_image
var_image=self.var_image
mask_image=self.mask_image
plt.figure(figsize=(30,8))
plt.subplot(131)
plt.imshow(sci_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('sci_image')
plt.subplot(132)
plt.imshow(var_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('var_image')
plt.subplot(133)
plt.imshow(sci_image,norm=LogNorm(vmin=1,vmax=np.max(sci_image)),origin='lower')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
plt.imshow(mask_image,origin='lower',vmin=0,vmax=np.max(mask_image),alpha=0.2)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('sci+mask_image')
if return_Images==True:
return sci_image,var_image,mask_image
def create_fitting_evolution_plot(self):
minchain, like_min = self.create_likelihood()
len_of_chains = self.len_of_chains()
# chain0_Emcee3,likechain0_Emcee3=self.create_chains()
# size=self.chain_swarm1.shape[1]
matplotlib.rcParams.update({'font.size': 18})
plt.figure(figsize=(24, 12))
plt.subplot(211)
plt.plot(np.linspace(0, len(like_min)-1, len(like_min)), like_min, 'blue', ls='-', marker='o')
plt.ylabel('likelihood')
plt.xlabel('steps')
plt.axvline(np.sum(len_of_chains[:1])+0.5, ls='--')
if np.min(like_min) < -self.zero_sigma_ln:
plt.ylim(np.min(like_min) * 0.95, 1.05 * np.max(like_min))
else:
plt.ylim(-self.zero_sigma_ln, 1.05 * np.max(like_min))
plt.axhline(self.min_like_min, ls='--')
# plt.axhline(-self.one_sigma_ln, ls='--', color='black')
plt.subplot(212)
plt.plot(np.linspace(0, len(like_min)-1, len(like_min)), np.log10(like_min),
'blue', ls='-', marker='o')
plt.ylabel('log10(likelihood)')
plt.xlabel('steps')
plt.axvline(np.sum(len_of_chains[:1])+0.5, ls='--')
def create_basic_comparison_plot(self, custom_model_image=None, custom_mask=None,
custom_sci_image=None, custom_var_image=None,
use_max_chi_scaling=False, use_max_flux_scaling=False,
show_flux_mask=False, show_impact_pixels_mask=False,
save=False, multi_background_factor=3):
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled = np.load(TESTING_FINAL_IMAGES_FOLDER +
'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus = optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus = custom_model_image
if custom_sci_image is None:
sci_image = self.sci_image
else:
sci_image = custom_sci_image
if custom_var_image is None:
var_image = self.var_image
else:
var_image = custom_var_image
mean_value_of_background_via_var = np.mean([np.median(var_image[0]),
np.median(var_image[-1]),
np.median(var_image[:, 0]),
np.median(var_image[:, -1])]) *\
multi_background_factor
mean_value_of_background_via_sci = np.mean([np.median(sci_image[0]),
np.median(sci_image[-1]),
np.median(sci_image[:, 0]),
np.median(sci_image[:, -1])]) *\
multi_background_factor
mean_value_of_background = np.max([mean_value_of_background_via_var,
mean_value_of_background_via_sci])
print(str(multi_background_factor) + 'x mean_value_of_background via sci is estimated to be: ' +
str(mean_value_of_background))
if type(show_flux_mask) == bool:
flux_mask = sci_image > (mean_value_of_background)
else:
flux_mask = sci_image > (show_flux_mask)
show_flux_mask = True
size = sci_image.shape[0]
# if size==40:
# dithering=2
# else:
# dithering=1
# dithering=1
if size == 20:
x_center = find_centroid_of_flux(res_iapetus)[0]
else:
x_center = (size/2)
left_limit = np.round(x_center-3.5)+0.5
right_limit = np.round(x_center+3.5)-0.5
chi2_image = (sci_image-res_iapetus)**2/((1)*var_image)
if show_impact_pixels_mask is True:
mask_most_impactful_pixels = np.zeros(sci_image.shape)
mask_most_impactful_pixels[chi2_image > (np.quantile(chi2_image[flux_mask].ravel(), 0.99))] = 1
value_of_chi2_1 = np.sum(chi2_image[flux_mask].ravel()
[chi2_image[flux_mask].ravel() >
np.quantile(chi2_image[flux_mask].ravel(), 0.99)])
total_chi2 = np.sum(chi2_image[flux_mask].ravel())
print('fraction of chi2 due to 1% of pixels: ' + str(value_of_chi2_1/total_chi2))
plt.figure(figsize=(20, 20))
plt.subplot(221)
plt.imshow(res_iapetus, origin='lower', vmax=np.max(np.abs(sci_image)))
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='white')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='white')
plt.colorbar(fraction=0.046, pad=0.04)
if show_flux_mask is True:
plt.imshow(flux_mask, origin='lower', alpha=0.4)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, origin='lower', alpha=0.35, cmap='magma')
plt.title('Model')
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image, origin='lower', vmax=np.max(np.abs(sci_image)))
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='white')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='white')
plt.colorbar(fraction=0.046, pad=0.04)
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.4, origin='lower',)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.35, cmap='magma', origin='lower',)
plt.title('Data')
plt.grid(False)
plt.subplot(223)
if use_max_flux_scaling is False:
plt.imshow(sci_image-res_iapetus, origin='lower', cmap='bwr',
vmin=-np.max(np.abs(sci_image))/20, vmax=np.max(np.abs(sci_image))/20)
else:
max_flux = np.max(np.abs(sci_image-res_iapetus))
plt.imshow((sci_image-res_iapetus), origin='lower', cmap='bwr',
vmax=-max_flux*0.75, vmin=max_flux*0.75)
plt.colorbar(fraction=0.046, pad=0.04)
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='black')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='black')
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.55, vmin=0, vmax=1, origin='lower',)
if custom_mask is None:
pass
else:
if np.sum(custom_mask) == 0:
alpha_value = 0
else:
alpha_value = 0.25
plt.imshow(custom_mask, origin='lower', alpha=alpha_value)
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.55, cmap='magma', origin='lower',)
plt.title('Residual (data - model)')
plt.grid(False)
plt.subplot(224)
if use_max_chi_scaling is False:
plt.imshow((sci_image-res_iapetus)/np.sqrt(var_image),
origin='lower', cmap='bwr', vmax=5, vmin=-5)
else:
max_chi = np.max(np.abs((sci_image-res_iapetus)/np.sqrt(var_image))) * 0.75
plt.imshow((sci_image-res_iapetus)/np.sqrt(var_image), origin='lower',
cmap='bwr', vmax=-max_chi, vmin=max_chi)
plt.plot(np.ones(len(sci_image))*(left_limit), np.array(range(len(sci_image))), '--', color='black')
plt.plot(np.ones(len(sci_image))*(right_limit), np.array(range(len(sci_image))), '--', color='black')
plt.colorbar(fraction=0.046, pad=0.04)
plt.title('chi map')
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-10.0)
if show_flux_mask is True:
plt.imshow(flux_mask, alpha=0.55, origin='lower')
if show_impact_pixels_mask is True:
plt.imshow(mask_most_impactful_pixels, alpha=0.55, cmap='magma', origin='lower')
chi2_max_reduced = np.sum((res_iapetus)**2/((var_image.shape[0]*var_image.shape[1])*var_image))
chi2_reduced = np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))
chi_max_reduced = np.sum(np.abs(res_iapetus)**1/((var_image.shape[0]*var_image.shape[1]) *
np.sqrt(var_image)))
chi_reduced = np.sum(np.abs(res_iapetus-sci_image)**1/((var_image.shape[0]*var_image.shape[1]) *
np.sqrt(var_image)))
print('---------------------')
print('chi**2 max reduced is: '+str(chi2_max_reduced))
print('chi**2 reduced is: '+str(chi2_reduced) + ' for log improvement: ' +
str(np.log10(chi2_reduced/chi2_max_reduced)))
print('chi max reduced is: '+str(chi_max_reduced))
print('chi reduced is: '+str(chi_reduced) + ' for log improvement: ' +
str(np.log10(chi_reduced/chi_max_reduced)))
print('---------------------')
if custom_mask is None:
pass
else:
custom_mask = ~custom_mask.astype('bool')
print('chi**2 reduced within custom mask area is: ' +
str(np.mean((res_iapetus[custom_mask]-sci_image[custom_mask])**2/(var_image[custom_mask]))))
chi2_max_flux_reduced = np.sum((res_iapetus[flux_mask])**2/(len(var_image[flux_mask]) *
var_image[flux_mask]))
chi2_flux_reduced = np.mean((res_iapetus[flux_mask]-sci_image[flux_mask])**2/(var_image[flux_mask]))
chi_max_flux_reduced = np.sum(np.abs(res_iapetus[flux_mask])**1/(len(var_image[flux_mask]) *
np.sqrt(var_image)[flux_mask]))
chi_flux_reduced = np.mean(np.abs(res_iapetus[flux_mask] -
sci_image[flux_mask])**1/np.sqrt(var_image[flux_mask]))
print('---------------------')
print('chi**2 max reduced within flux mask area is: ' +
str(chi2_max_flux_reduced))
print('chi**2 reduced within flux mask area is: ' +
str(chi2_flux_reduced) + ' for log improvement: ' +
str(np.log10(chi2_flux_reduced/chi2_max_flux_reduced)))
print('chi max reduced within flux mask area is: ' +
str(chi_max_flux_reduced))
print('chi reduced within flux mask area is: ' +
str(chi_flux_reduced) + ' for log improvement: ' +
str(np.log10(chi_flux_reduced/chi_max_flux_reduced)))
print('---------------------')
print('Abs of residual divided by total flux is: ' +
str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: ' +
str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
if save is not False:
plt.savefig('/Users/nevencaplar/Documents/PFS/Images/Jan2921/Spot_figures/spot_'+save)
plt.clf()
def create_basic_comparison_plot_log(self,custom_model_image=None,custom_mask=None,custom_sci_image=None,custom_var_image=None,use_max_chi_scaling=False,\
show_flux_mask=False):
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
if custom_sci_image is None:
sci_image=self.sci_image
else:
sci_image=custom_sci_image
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
mean_value_of_background_via_var=np.mean([np.median(var_image[0]),np.median(var_image[-1]),\
np.median(var_image[:,0]),np.median(var_image[:,-1])])*3
mean_value_of_background_via_sci=np.mean([np.median(sci_image[0]),np.median(sci_image[-1]),\
np.median(sci_image[:,0]),np.median(sci_image[:,-1])])*3
mean_value_of_background=np.max([mean_value_of_background_via_var,mean_value_of_background_via_sci])
flux_mask=sci_image>(mean_value_of_background)
size=sci_image.shape[0]
if size==40:
dithering=2
else:
dithering=1
if size==20:
x_center=find_centroid_of_flux(res_iapetus)[0]
else:
x_center=(size/2)
left_limit=np.round(x_center-3.5)+0.5
right_limit=np.round(x_center+3.5)-0.5
chi2_image=(sci_image-res_iapetus)**2/((1)*var_image)
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(res_iapetus,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Model')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Data')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(223)
plt.imshow(np.abs(sci_image-res_iapetus),origin='lower',vmax=np.max(np.abs(sci_image))/20,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('abs(Residual ( data-model))')
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.grid(False)
plt.subplot(224)
plt.imshow(chi2_image,origin='lower',vmin=1,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
if show_flux_mask==True:
plt.imshow(flux_mask,alpha=0.55)
plt.title('chi**2 map')
print('chi**2 max reduced is: '+str(np.sum((res_iapetus)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-7.0)
print('chi**2 reduced is: '+str(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
print('Abs of residual divided by total flux is: '+str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: '+str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
def create_basic_comparison_plot_log_artifical(self,custom_model_image=None,custom_mask=None,custom_sci_image=None,custom_var_image=None,use_max_chi_scaling=False):
# need to update for multivar
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
noise=self.create_artificial_noise(custom_model_image=custom_model_image,custom_var_image=custom_var_image)
if custom_sci_image is None:
sci_image=self.sci_image
else:
sci_image=custom_sci_image
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
size=sci_image.shape[0]
if size==40:
dithering=2
else:
dithering=1
if size==20:
x_center=find_centroid_of_flux(res_iapetus)[0]
else:
x_center=(size/2)
left_limit=np.round(x_center-3.5)+0.5
right_limit=np.round(x_center+3.5)-0.5
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(res_iapetus+noise,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Model with artifical noise')
plt.grid(False)
plt.subplot(222)
plt.imshow(sci_image,origin='lower',vmin=1,vmax=np.max(np.abs(sci_image)),norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('Data')
plt.grid(False)
plt.subplot(223)
plt.imshow(np.abs(res_iapetus-sci_image),origin='lower',vmax=np.max(np.abs(sci_image))/20,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('abs(Residual (model - data))')
plt.grid(False)
plt.subplot(224)
plt.imshow((res_iapetus-sci_image)**2/((1)*var_image),origin='lower',vmin=1,norm=LogNorm())
plt.plot(np.ones(len(sci_image))*(left_limit),np.array(range(len(sci_image))),'--',color='white')
plt.plot(np.ones(len(sci_image))*(right_limit),np.array(range(len(sci_image))),'--',color='white')
cbar=plt.colorbar(fraction=0.046, pad=0.04)
cbar.set_ticks([10,10**2,10**3,10**4,10**5])
plt.title('chi**2 map')
print(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image)))
np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))
plt.tight_layout(pad=0.0, w_pad=1.8, h_pad=-7.0)
print('chi**2 reduced is: '+str(np.sum((res_iapetus-sci_image)**2/((var_image.shape[0]*var_image.shape[1])*var_image))))
print('Abs of residual divided by total flux is: '+str(np.sum(np.abs((res_iapetus-sci_image)))/np.sum((res_iapetus))))
print('Abs of residual divided by largest value of a flux in the image is: '+str(np.max(np.abs((res_iapetus-sci_image)/np.max(res_iapetus)))))
def create_artificial_noise(self, custom_model_image=None,custom_var_image=None):
if custom_var_image is None:
var_image=self.var_image
else:
var_image=custom_var_image
if custom_model_image is None:
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
else:
res_iapetus=custom_model_image
artifical_noise=np.zeros_like(res_iapetus)
artifical_noise=np.array(artifical_noise)
for i in range(len(artifical_noise)):
for j in range(len(artifical_noise)):
artifical_noise[i,j]=np.random.randn()*np.sqrt(var_image[i,j]+40)
return artifical_noise
def create_cut_plots(self):
var_image=self.var_image
artifical_noise=self.create_artificial_noise()
sci_image=self.sci_image
optPsf_cut_fiber_convolved_downsampled=np.load(TESTING_FINAL_IMAGES_FOLDER+'optPsf_cut_fiber_convolved_downsampled.npy')
res_iapetus=optPsf_cut_fiber_convolved_downsampled
mid_point_of_sci_image=int(sci_image.shape[0]/2)
plt.figure(figsize=(25,10))
plt.subplot(121)
plt.title('horizontal direction')
plt.plot(np.array(range(len(res_iapetus))),np.log10(res_iapetus[mid_point_of_sci_image]),'blue',linestyle='--',label='model')
plt.plot(np.array(range(len(res_iapetus))),np.log10(np.abs(sci_image[mid_point_of_sci_image])),'orange',linestyle='--',label='data')
plt.plot(np.array(range(len(res_iapetus))),np.ones(len(res_iapetus))*np.log10(np.max(sci_image[:,mid_point_of_sci_image])*(1/2)),'--',color='black')
plt.legend(fontsize=25)
plt.subplot(122)
plt.title('wavelength direction')
plt.plot(np.array(range(len(res_iapetus))),np.log10(res_iapetus[:,mid_point_of_sci_image]),'blue',linestyle='--',label='model')
plt.plot(np.array(range(len(res_iapetus))),np.log10(np.abs(sci_image[:,mid_point_of_sci_image])),'orange',linestyle='--',label='data')
plt.plot(np.array(range(len(res_iapetus))),np.ones(len(res_iapetus))*np.log10(np.max(sci_image[:,mid_point_of_sci_image])*(1/2)),'--',color='black')
plt.legend(fontsize=20)
plt.figure(figsize=(30,10))
plt.subplot(121)
plt.title('horizontal direction, with noise')
plt.plot(np.array(range(len(res_iapetus))),np.log10(res_iapetus[mid_point_of_sci_image]+artifical_noise[mid_point_of_sci_image]),'blue',linestyle='--',label='model')
plt.plot(np.array(range(len(res_iapetus))),np.log10(
|
np.abs(sci_image[mid_point_of_sci_image])
|
numpy.abs
|
import numpy as np
import pandas as pd
import sys, os, pdb
from .jindlib import JindLib
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import umap
import matplotlib.pyplot as plt
import multiprocessing
from functools import partial
import pickle
import seaborn as sns
class JindVis:
def __init__(self, mat, labels, libobj, direc):
self.class2num = libobj.class2num
self.num2class = libobj.num2class
self.reduce_method = libobj.reduce_method
self.model = None
self.preprocess = False
self.dir = direc
self.obj = libobj
os.system('mkdir -p {}'.format(self.dir))
self.gene_names = libobj.gene_names
self.classes = libobj.classes
self.shortclasses = [i[:3] + "-" + i[-3:] for i in self.classes]
self.n_classes = libobj.n_classes
libobj.path = self.dir
if libobj.val_stats is None:
print("The library object doesn't have validation stats")
sys.exit()
self.val_stats = libobj.val_stats
self.mat = mat
self.labels = labels
self.embeddings = {}
def evaluate(self, mat, labels):
predictions = self.obj.evaluate(mat, labels)
return predictions
def setup(self, test=False):
self.y_pred = self.obj.predict(self.mat, test=test)
self.y_true = np.array([self.class2num[i] for i in self.labels])
# Freeze the predictions and labels
self.y_pred.flags.writeable = False
self.y_true.flags.writeable = False
print("Setup Complete")
def display_mean_prob(self):
probs_train = self.val_stats['pred']
y_train = self.val_stats['true']
probs_test = self.y_pred
y_test = self.y_true
# Using for Loop
for klass in range(self.n_classes):
self.plot_prob(probs_train, y_train, probs_test, y_test, klass)
# Using multiple cores
# pool = multiprocessing.Pool(processes=6)
# func = partial(self.plot_prob, probs_train, y_train, probs_test, y_test)
# pool.map(func, (i for i in range(self.n_classes)))
def plot_prob(self, probs_train, y_train, probs_test, y_test, klass):
factor = 1 + self.n_classes//10
fig = plt.figure(figsize=(6*factor, 6))
indices = np.argmax(probs_train, axis=1)==klass
probs = probs_train[indices]
y_klass = y_train[indices]
class_name = self.classes[klass]
plt.subplot(2, 2, 1)
if len(indices) != 0:
probs_TP = probs[y_klass==klass]
if len(probs_TP) != 0:
mean = np.mean(probs_TP, axis=0)
std = np.std(probs_TP, axis=0)
plt.bar(np.arange(0, len(mean)), mean, yerr=std)
plt.xticks(np.arange(0, len(self.shortclasses), 1.0), labels=self.shortclasses, rotation=60, ha='right')
plt.xlabel("Class")
plt.ylabel("Probability")
plt.title("Val TP Frac {:.4f}".format(len(probs_TP)/(len(probs)+1e-8)))
plt.subplot(2, 2, 2)
probs_FP = probs[y_klass!=klass]
if len(probs_FP) != 0:
mean = np.mean(probs_FP, axis=0)
std = np.std(probs_FP, axis=0)
plt.bar(np.arange(0, len(mean)), mean, yerr=std)
plt.xticks(np.arange(0, len(self.shortclasses), 1.0), labels=self.shortclasses, rotation=60, ha='right')
plt.xlabel("Class")
plt.ylabel("Probability")
plt.title("Val FP Frac {:.4f}".format(len(probs_FP)/(len(probs)+1e-8)))
indices = np.argmax(probs_test, axis=1)==klass
plt.subplot(2, 2, 3)
if len(indices) != 0:
probs = probs_test[indices]
y_klass = y_test[indices]
probs_TP = probs[y_klass==klass]
if len(probs_TP) != 0:
mean = np.mean(probs_TP, axis=0)
std = np.std(probs_TP, axis=0)
plt.bar(np.arange(0, len(mean)), mean, yerr=std)
plt.xticks(np.arange(0, len(self.shortclasses), 1.0), labels=self.shortclasses, rotation=60, ha='right')
plt.xlabel("Class")
plt.ylabel("Probability")
plt.title("Test TP Frac {:.4f}".format(len(probs_TP)/(len(probs)+1e-8)))
plt.subplot(2, 2, 4)
probs_FP = probs[y_klass!=klass]
if len(probs_FP) != 0:
mean = np.mean(probs_FP, axis=0)
std =
|
np.std(probs_FP, axis=0)
|
numpy.std
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 22:04:28 2021
@author: DELL
"""
import torch.utils.data as D
from torchvision import transforms as T
import gdal
import random
import numpy as np
import torch
DEVICE = 'cuda:0' if torch.cuda.is_available() else 'cpu'
# 读取图像像素矩阵
# fileName 图像路径
def imgread(fileName, addNDVI=False):
dataset = gdal.Open(fileName)
width = dataset.RasterXSize
height = dataset.RasterYSize
data = dataset.ReadAsArray(0, 0, width, height)
# 如果是image的话,因为label是单通道
if(len(data.shape) == 3):
# 添加归一化植被指数NDVI特征
if(addNDVI):
nir, r = data[3], data[0]
ndvi = (nir - r) / (nir + r + 0.00001) * 1.0
# 和其他波段保持统一,归到0-255,后面的totensor会/255统一归一化
# 统计了所有训练集ndvi的值,最小值为0,最大值很大但是数目很少,所以我们取了98%处的25
ndvi = (ndvi - 0) / (25 - 0) * 255
ndvi = np.clip(ndvi, 0, 255)
data_add_ndvi =
|
np.zeros((5, 256, 256), np.uint8)
|
numpy.zeros
|
#! /bin/python
import json
from PIL import Image
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
base_path = "../../../../data/vgg_2000/"
def load_json(filename='data.json'):
with open(filename, 'r') as outfile:
data = json.load(outfile)
return data
def write_to_json(data, filename='data.json'):
with open(filename, 'w') as outfile:
json.dump(data, outfile)
class GANDL(Dataset):
def __init__(self, tuple_set, spk_feats):
self.tuple_set = tuple_set
self.voice_embeds = spk_feats
def __len__(self):
return len(self.tuple_set)
def __getitem__(self, index):
utt_id, face_path, label = self.tuple_set[index][0], self.tuple_set[index][1], self.tuple_set[index][2]
voice_embed = torch.from_numpy(
|
np.array(self.voice_embeds[utt_id])
|
numpy.array
|
"""
Notes
-----
Important attributes of continuous (order > 0) :class:`Field` and
:class:`SurfaceField` instances:
- `vertex_remap` : `econn[:, :n_vertex] = vertex_remap[conn]`
- `vertex_remap_i` : `conn = vertex_remap_i[econn[:, :n_vertex]]`
where `conn` is the mesh vertex connectivity, `econn` is the
region-local field connectivity.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import output, get_default, assert_
from sfepy.base.base import Struct
from sfepy.discrete.common.fields import parse_shape, Field
from sfepy.discrete.fem.mesh import Mesh
from sfepy.discrete.fem.meshio import convert_complex_output
from sfepy.discrete.fem.utils import (extend_cell_data, prepare_remap,
invert_remap, get_min_value)
from sfepy.discrete.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.discrete.fem.poly_spaces import PolySpace
from sfepy.discrete.fem.fe_surface import FESurface
from sfepy.discrete.integrals import Integral
from sfepy.discrete.fem.linearizer import (get_eval_dofs, get_eval_coors,
create_output)
import six
def set_mesh_coors(domain, fields, coors, update_fields=False, actual=False,
clear_all=True, extra_dofs=False):
if actual:
if not hasattr(domain.mesh, 'coors_act'):
domain.mesh.coors_act = nm.zeros_like(domain.mesh.coors)
domain.mesh.coors_act[:] = coors[:domain.mesh.n_nod]
else:
domain.cmesh.coors[:] = coors[:domain.mesh.n_nod]
if update_fields:
for field in six.itervalues(fields):
field.set_coors(coors, extra_dofs=extra_dofs)
field.clear_mappings(clear_all=clear_all)
def eval_nodal_coors(coors, mesh_coors, region, poly_space, geom_poly_space,
econn, only_extra=True):
"""
Compute coordinates of nodes corresponding to `poly_space`, given
mesh coordinates and `geom_poly_space`.
"""
if only_extra:
iex = (poly_space.nts[:,0] > 0).nonzero()[0]
if iex.shape[0] == 0: return
qp_coors = poly_space.node_coors[iex, :]
econn = econn[:, iex].copy()
else:
qp_coors = poly_space.node_coors
##
# Evaluate geometry interpolation base functions in (extra) nodes.
bf = geom_poly_space.eval_base(qp_coors)
bf = bf[:,0,:].copy()
##
# Evaluate extra coordinates with 'bf'.
cmesh = region.domain.cmesh
conn = cmesh.get_incident(0, region.cells, region.tdim)
conn.shape = (econn.shape[0], -1)
ecoors = nm.dot(bf, mesh_coors[conn])
coors[econn] = nm.swapaxes(ecoors, 0, 1)
def _interp_to_faces(vertex_vals, bfs, faces):
dim = vertex_vals.shape[1]
n_face = faces.shape[0]
n_qp = bfs.shape[0]
faces_vals = nm.zeros((n_face, n_qp, dim), nm.float64)
for ii, face in enumerate(faces):
vals = vertex_vals[face,:dim]
faces_vals[ii,:,:] = nm.dot(bfs[:,0,:], vals)
return(faces_vals)
def get_eval_expression(expression,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Get the function for evaluating an expression given a list of elements,
and reference element coordinates.
"""
from sfepy.discrete.evaluate import eval_in_els_and_qp
def _eval(iels, coors):
val = eval_in_els_and_qp(expression, iels, coors,
fields, materials, variables,
functions=functions, mode=mode,
term_mode=term_mode,
extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
return val[..., 0]
return _eval
def create_expression_output(expression, name, primary_field_name,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None,
min_level=0, max_level=1, eps=1e-4):
"""
Create output mesh and data for the expression using the adaptive
linearizer.
Parameters
----------
expression : str
The expression to evaluate.
name : str
The name of the data.
primary_field_name : str
The name of field that defines the element groups and polynomial
spaces.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
min_level : int
The minimum required level of mesh refinement.
max_level : int
The maximum level of mesh refinement.
eps : float
The relative tolerance parameter of mesh adaptivity.
Returns
-------
out : dict
The output dictionary.
"""
field = fields[primary_field_name]
vertex_coors = field.coors[:field.n_vertex_dof, :]
ps = field.poly_space
gps = field.gel.poly_space
vertex_conn = field.econn[:, :field.gel.n_vertex]
eval_dofs = get_eval_expression(expression,
fields, materials, variables,
functions=functions,
mode=mode, extra_args=extra_args,
verbose=verbose, kwargs=kwargs)
eval_coors = get_eval_coors(vertex_coors, vertex_conn, gps)
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors,
vertex_conn.shape[0], ps,
min_level=min_level,
max_level=max_level, eps=eps)
mesh = Mesh.from_data('linearized_mesh', coors, None, [conn], [mat_ids],
field.domain.mesh.descs)
out = {}
out[name] = Struct(name='output_data', mode='vertex',
data=vdofs, var_name=name, dofs=None,
mesh=mesh, level=level)
out = convert_complex_output(out)
return out
class FEField(Field):
"""
Base class for finite element fields.
Notes
-----
- interps and hence node_descs are per region (must have single
geometry!)
Field shape information:
- ``shape`` - the shape of the base functions in a point
- ``n_components`` - the number of DOFs per FE node
- ``val_shape`` - the shape of field value (the product of DOFs and
base functions) in a point
"""
def __init__(self, name, dtype, shape, region, approx_order=1):
"""
Create a finite element field.
Parameters
----------
name : str
The field name.
dtype : numpy.dtype
The field data type: float64 or complex128.
shape : int/tuple/str
The field shape: 1 or (1,) or 'scalar', space dimension (2, or (2,)
or 3 or (3,)) or 'vector', or a tuple. The field shape determines
the shape of the FE base functions and is related to the number of
components of variables and to the DOF per node count, depending
on the field kind.
region : Region
The region where the field is defined.
approx_order : int or tuple
The FE approximation order. The tuple form is (order, has_bubble),
e.g. (1, True) means order 1 with a bubble function.
Notes
-----
Assumes one cell type for the whole region!
"""
shape = parse_shape(shape, region.domain.shape.dim)
if not self._check_region(region):
raise ValueError('unsuitable region for field %s! (%s)' %
(name, region.name))
Struct.__init__(self, name=name, dtype=dtype, shape=shape,
region=region)
self.domain = self.region.domain
self._set_approx_order(approx_order)
self._setup_geometry()
self._setup_kind()
self._setup_shape()
self.surface_data = {}
self.point_data = {}
self.ori = None
self._create_interpolant()
self._setup_global_base()
self.setup_coors()
self.clear_mappings(clear_all=True)
self.clear_qp_base()
self.basis_transform = None
self.econn0 = None
self.unused_dofs = None
self.stored_subs = None
def _set_approx_order(self, approx_order):
"""
Set a uniform approximation order.
"""
if isinstance(approx_order, tuple):
self.approx_order = approx_order[0]
self.force_bubble = approx_order[1]
else:
self.approx_order = approx_order
self.force_bubble = False
def get_true_order(self):
"""
Get the true approximation order depending on the reference
element geometry.
For example, for P1 (linear) approximation the true order is 1,
while for Q1 (bilinear) approximation in 2D the true order is 2.
"""
gel = self.gel
if (gel.dim + 1) == gel.n_vertex:
order = self.approx_order
else:
order = gel.dim * self.approx_order
if self.force_bubble:
bubble_order = gel.dim + 1
order = max(order, bubble_order)
return order
def is_higher_order(self):
"""
Return True, if the field's approximation order is greater than one.
"""
return self.force_bubble or (self.approx_order > 1)
def _setup_global_base(self):
"""
Setup global DOF/base functions, their indices and connectivity of the
field. Called methods implemented in subclasses.
"""
self._setup_facet_orientations()
self._init_econn()
self.n_vertex_dof, self.vertex_remap = self._setup_vertex_dofs()
self.vertex_remap_i = invert_remap(self.vertex_remap)
aux = self._setup_edge_dofs()
self.n_edge_dof, self.edge_dofs, self.edge_remap = aux
aux = self._setup_face_dofs()
self.n_face_dof, self.face_dofs, self.face_remap = aux
aux = self._setup_bubble_dofs()
self.n_bubble_dof, self.bubble_dofs, self.bubble_remap = aux
self.n_nod = self.n_vertex_dof + self.n_edge_dof \
+ self.n_face_dof + self.n_bubble_dof
self._setup_esurface()
def _setup_esurface(self):
"""
Setup extended surface entities (edges in 2D, faces in 3D),
i.e. indices of surface entities into the extended connectivity.
"""
node_desc = self.node_desc
gel = self.gel
self.efaces = gel.get_surface_entities().copy()
nd = node_desc.edge
if nd is not None:
efs = []
for eof in gel.get_edges_per_face():
efs.append(nm.concatenate([nd[ie] for ie in eof]))
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
efs = node_desc.face
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.efaces = nm.hstack((self.efaces, efs))
if gel.dim == 3:
self.eedges = gel.edges.copy()
efs = node_desc.edge
if efs is not None:
efs = nm.array(efs).squeeze()
if efs.ndim < 2:
efs = efs[:,nm.newaxis]
self.eedges = nm.hstack((self.eedges, efs))
def set_coors(self, coors, extra_dofs=False):
"""
Set coordinates of field nodes.
"""
# Mesh vertex nodes.
if self.n_vertex_dof:
indx = self.vertex_remap_i
self.coors[:self.n_vertex_dof] = nm.take(coors,
indx.astype(nm.int32),
axis=0)
n_ex_dof = self.n_bubble_dof + self.n_edge_dof + self.n_face_dof
# extra nodes
if n_ex_dof:
if extra_dofs:
if self.n_nod != coors.shape[0]:
raise NotImplementedError
self.coors[:] = coors
else:
gps = self.gel.poly_space
ps = self.poly_space
eval_nodal_coors(self.coors, coors, self.region,
ps, gps, self.econn)
def setup_coors(self):
"""
Setup coordinates of field nodes.
"""
mesh = self.domain.mesh
self.coors = nm.empty((self.n_nod, mesh.dim), nm.float64)
self.set_coors(mesh.coors)
def get_vertices(self):
"""
Return indices of vertices belonging to the field region.
"""
return self.vertex_remap_i
def _get_facet_dofs(self, rfacets, remap, dofs):
facets = remap[rfacets]
return dofs[facets[facets >= 0]].ravel()
def get_data_shape(self, integral, integration='volume', region_name=None):
"""
Get element data dimensions.
Parameters
----------
integral : Integral instance
The integral describing used numerical quadrature.
integration : 'volume', 'surface', 'surface_extra', 'point' or 'custom'
The term integration type.
region_name : str
The name of the region of the integral.
Returns
-------
data_shape : 4 ints
The `(n_el, n_qp, dim, n_en)` for volume shape kind,
`(n_fa, n_qp, dim, n_fn)` for surface shape kind and
`(n_nod, 0, 0, 1)` for point shape kind.
Notes
-----
- `n_el`, `n_fa` = number of elements/facets
- `n_qp` = number of quadrature points per element/facet
- `dim` = spatial dimension
- `n_en`, `n_fn` = number of element/facet nodes
- `n_nod` = number of element nodes
"""
region = self.domain.regions[region_name]
shape = region.shape
dim = region.dim
if integration in ('surface', 'surface_extra'):
sd = self.surface_data[region_name]
# This works also for surface fields.
key = sd.face_type
weights = self.get_qp(key, integral).weights
n_qp = weights.shape[0]
if integration == 'surface':
data_shape = (sd.n_fa, n_qp, dim, sd.n_fp)
else:
data_shape = (sd.n_fa, n_qp, dim, self.econn.shape[1])
elif integration in ('volume', 'custom'):
_, weights = integral.get_qp(self.gel.name)
n_qp = weights.shape[0]
data_shape = (shape.n_cell, n_qp, dim, self.econn.shape[1])
elif integration == 'point':
dofs = self.get_dofs_in_region(region, merge=True)
data_shape = (dofs.shape[0], 0, 0, 1)
else:
raise NotImplementedError('unsupported integration! (%s)'
% integration)
return data_shape
def get_dofs_in_region(self, region, merge=True):
"""
Return indices of DOFs that belong to the given region and group.
"""
node_desc = self.node_desc
dofs = []
vdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.vertex is not None:
vdofs = self.vertex_remap[region.vertices]
vdofs = vdofs[vdofs >= 0]
dofs.append(vdofs)
edofs = nm.empty((0,), dtype=nm.int32)
if node_desc.edge is not None:
edofs = self._get_facet_dofs(region.edges,
self.edge_remap,
self.edge_dofs)
dofs.append(edofs)
fdofs = nm.empty((0,), dtype=nm.int32)
if node_desc.face is not None:
fdofs = self._get_facet_dofs(region.faces,
self.face_remap,
self.face_dofs)
dofs.append(fdofs)
bdofs = nm.empty((0,), dtype=nm.int32)
if (node_desc.bubble is not None) and region.has_cells():
els = self.bubble_remap[region.cells]
bdofs = self.bubble_dofs[els[els >= 0]].ravel()
dofs.append(bdofs)
if merge:
dofs = nm.concatenate(dofs)
return dofs
def clear_qp_base(self):
"""
Remove cached quadrature points and base functions.
"""
self.qp_coors = {}
self.bf = {}
def get_qp(self, key, integral):
"""
Get quadrature points and weights corresponding to the given key
and integral. The key is 'v' or 's#', where # is the number of
face vertices.
"""
qpkey = (integral.order, key)
if qpkey not in self.qp_coors:
if (key[0] == 's') and not self.is_surface:
dim = self.gel.dim - 1
n_fp = self.gel.surface_facet.n_vertex
geometry = '%d_%d' % (dim, n_fp)
else:
geometry = self.gel.name
vals, weights = integral.get_qp(geometry)
self.qp_coors[qpkey] = Struct(vals=vals, weights=weights)
return self.qp_coors[qpkey]
def substitute_dofs(self, subs, restore=False):
"""
Perform facet DOF substitutions according to `subs`.
Modifies `self.econn` in-place and sets `self.econn0`,
`self.unused_dofs` and `self.basis_transform`.
"""
if restore and (self.stored_subs is not None):
self.econn0 = self.econn
self.econn, self.unused_dofs, basis_transform = self.stored_subs
else:
if subs is None:
self.econn0 = self.econn
return
else:
self.econn0 = self.econn.copy()
self._substitute_dofs(subs)
self.unused_dofs =
|
nm.setdiff1d(self.econn0, self.econn)
|
numpy.setdiff1d
|
"""
Mutual Information Least-Dependent Component Analysis (MILCA)
Reference: <NAME>.; <NAME>.; <NAME>.; <NAME>.
Phys. Rev. E, 2004, 70, 066123.
https://doi.org/10.1103/PhysRevE.70.066123
https://arxiv.org/abs/physics/0405044
"""
# Author: <NAME> (<EMAIL>)
# License: BSD 3 clause
import numpy as np
import scipy as sp
from sklearn.feature_selection import mutual_info_ as mi
from joblib import Parallel, delayed
class MILCA:
def __init__(self, n_components=None, n_neighbours=None, n_angles = None, smoothing_band = None, n_jobs=None):
self.n_components = n_components
self.n_jobs = n_jobs
self.n_neighbours = n_neighbours
self.n_angles = n_angles
self.smoothing_band = smoothing_band
self.parallelizer = Parallel(n_jobs = self.n_jobs)
def fit(self, X, y=None):
if self.n_components is None:
self.n_components = X.shape[1]
if self.n_neighbours is None:
self.n_neighbours = 10
if self.n_angles is None:
self.n_angles = 128
if self.smoothing_band is None:
self.smoothing_band = int(self.n_angles/4)
d, E = np.linalg.eigh(np.cov(X.T))
indx = np.argsort(d)[::-1][:self.n_components]
d, E = d[indx], E[:, indx]
D =
|
np.diag(d)
|
numpy.diag
|
from time import time
import numpy as np
import copy
from multiprocessing import Process, Queue
from .mp_corr3_err import mp_corr
# from scipy.optimize import leastsq
from collections.abc import Iterable
from ..misc.progressbar import progress
from .xpcsmethods import ttc_to_g2, bin_multitau
import pdb
def rebin(a, newshape):
"""Rebin an array to a new shape."""
assert len(a.shape) == len(newshape)
slices = [slice(0, old, float(old) / new) for old, new in zip(a.shape, newshape)]
coordinates = np.mgrid[slices]
indices = coordinates.astype("i")
return a[tuple(indices)]
def errfunc(pa, xdata, ydata):
"""Fit function for fitting the variance of the two-time correlation function."""
return (pa[0] + pa[1] * xdata - ydata) / np.sqrt(ydata * 1e-8)
def avr(saxs, ctr=-1, mask=None):
"""Old version of normalization function"""
dim1, dim2 = np.shape(saxs)
if mask is None:
mask = np.ones((dim1, dim2))
saxs = saxs * mask
if ctr == -1:
return np.ones((dim1, dim2)) * np.mean(saxs)
cx, cy = ctr
[X, Y] = np.mgrid[1 - cy : dim1 + 1 - cy, 1 - cx : dim2 + 1 - cx]
q = np.round(np.sqrt(X ** 2 + Y ** 2)).astype(np.int64)
q = q.ravel()
mask = mask.flatten()
saxs = saxs.flatten()
qm = list(range(int(q.max() + 1)))
qr = list(range(len(qm)))
for i in qm:
qr[i] = []
for i in range(len(q)):
if mask[i]:
qr[q[i]].append(i)
while [] in qr:
qr.remove([])
for i in qr:
saxs[i] = np.mean(saxs[i])
return saxs.reshape(dim1, dim2)
def avr_better(saxs, ctr, mask):
"""Return an average saxs image for normalization of images."""
cx, cy = ctr
dim1, dim2 = np.shape(saxs)
[X, Y] = np.mgrid[1 - cy : dim1 + 1 - cy, 1 - cx : dim2 + 1 - cx]
q = np.float32(np.sqrt(X ** 2 + Y ** 2))
n = np.int16(q + 0.5)
q[mask == 0] = 0
n[mask == 0] = 0
max_n = n.max() + 1
mean_saxs = np.zeros(max_n + 1, np.float32)
new_saxs = np.zeros_like(saxs, np.float32)
for i in range(max_n):
ind = np.where((n == i) & (mask == 1))
if ind[0].size:
mean_saxs[i] = np.mean(saxs[ind])
for i in range(dim1):
for j in range(dim2):
if q[i, j] > 0:
par = int(q[i, j])
f1 = q[i, j] - par
if mean_saxs[par + 1] > 0 and mean_saxs[par] > 0:
new_saxs[i, j] = mean_saxs[par + 1] * f1 + mean_saxs[par] * (1 - f1)
if mean_saxs[par + 1] > 0 and mean_saxs[par] == 0:
new_saxs[i, j] = mean_saxs[par + 1]
if mean_saxs[par + 1] == 0 and mean_saxs[par] > 0:
new_saxs[i, j] = mean_saxs[par]
return new_saxs
def get_norm_saxs(saxs, qroi, qsec, ctr, mask, verbose=False):
"""Get an average SAXS for normalization"""
if verbose:
print("Start computing SAXS for normalization.")
dim2, dim1 = np.shape(saxs)
saxs_img = saxs.copy()
ctr = (ctr[0] - qsec[1], ctr[1] - qsec[0])
saxs_img = saxs_img * mask
saxs_img = avr_better(saxs_img, ctr, mask)
saxs_imgc = np.ones((dim2, dim1))
saxs_img[saxs_img == 0] = 1.0
for i in range(len(qroi)):
q0 = qroi[i][0] - qsec[0]
q1 = qroi[i][1] - qsec[1]
saxs_imgc[q0, q1] = np.mean(saxs_img[q0, q1]) / saxs_img[q0, q1]
# saxs_imgc[np.where(np.isinf(saxs_imgc))] = 1.0
if verbose:
print("Done")
print("Shape of saxs_img:", np.shape(saxs_img))
print("Sum of saxs_img:", np.sum(saxs_img))
return saxs_imgc
def calc_twotime_cf(ttdata, tt_max_images=5000, crossttdata=None):
"""Calculate two-time correlation function of a large data set.
Args:
ttdata (list): list of arrays each of shape (nimages, npixels).
"""
def trc(matr):
"""Calculate the two-time correlation function."""
meanmatr = np.mean(matr, axis=1)
meanmatr[meanmatr <= 0] = 1.0
tmp, lenmatr = np.shape(matr)
meanmatr.shape = 1, tmp
trcm = np.dot(matr, matr.T) / lenmatr / np.dot(meanmatr.T, meanmatr)
return trcm
def crosstrc(matr1, matr2):
"""Calculate the cross-two-time correlation function."""
meanmatr1 = np.mean(matr1, axis=1)
meanmatr1[meanmatr1 <= 0] = 1.0
meanmatr2 = np.mean(matr2, axis=1)
meanmatr2[meanmatr2 <= 0] = 1.0
tmp, lenmatr = np.shape(matr1)
meanmatr1 = meanmatr1.reshape(1, tmp)
meanmatr2 = meanmatr2.reshape(1, tmp)
trcm = np.dot(matr1, matr2.T) / lenmatr / np.dot(meanmatr1.T, meanmatr2)
return trcm
def vartrc(ttc):
"""Calculate the variance of the two-time correlation function."""
# pc0 = [1.0, 0.1]
n, tmp = np.shape(ttc)
vtmp = []
for it in range(1, n - 1):
# ydata=diag(ttc,it)
# xdata=arange(1,len(ydata)+1)
# p1,success=leastsq(errfuncc,pc0,args=(xdata,ydata))
# vtmp.append(var(ydata/(p1[0]+p1[1]*xdata)))
vtmp.append(np.var(np.diag(ttc, it)))
return vtmp
def recurf(ll):
"""Helper function used for calculating the two-time correlation function."""
# global l, y, v
y[ll + 1].append((y[ll][0] + y[ll][1]) * 0.5)
y[ll] = []
v[ll + 1].append(vartrc(y[ll + 1][-1]))
if l[ll + 1] == 1:
recurf(ll + 1)
else:
l[ll + 1] += 1
l[ll] = 0
crossdata_avail = False if crossttdata is None else True
nbins = len(ttdata)
output_ttc = []
output_z = []
for ibin in range(nbins):
data = ttdata[ibin]
if crossdata_avail:
cdata = crossttdata[ibin]
nf, lind = data.shape
if nf > tt_max_images:
ttchunk = nf // tt_max_images
nfnew = ttchunk * tt_max_images
print(
"Reducing two-time correlation data from "
"{} to {} images by rebinning.".format(nf, tt_max_images)
)
data = np.mean(data[:nfnew].reshape(ttchunk, -1, lind, order="F"), 0)
lind2 = lind // 16
l = np.zeros(5)
y = []
v = []
for i in range(5):
y.append([])
v.append([])
ib = 0
for i in range(16):
ie = ib + lind2
if not crossdata_avail:
y[0].append(trc(data[:, ib:ie]))
else:
c1 = crosstrc(data[:, ib:ie], cdata[:, ib:ie])
c2 = crosstrc(cdata[:, ib:ie], data[:, ib:ie])
y[0].append((c1 + c2) / 2.0)
v[0].append(vartrc(y[0][-1]))
if l[0] == 1:
recurf(0)
else:
l[0] += 1
ib += lind2
vm = []
for i in range(4, -1, -1):
vm.append(np.mean(v[i], 0))
vm = np.array(vm)
del data
del v
ttcf = y[4][-1]
dia1 = np.mean(np.diag(ttcf, 1))
t = np.arange(np.shape(ttcf)[0])
ttcf[t, t] = dia1
N = np.array([1, 2, 4, 8, 16]) / float(lind)
z = vm.T / N
# p0=[0,1]
# it=range(len(ttcf[1:,0]))
# p1=zeros((len(ttcf[1:,0]),len(p0)+1))
# p1[:,0]=(asfarray(it)+1.0)*dt
# xdata=ttcf[0,:]
# for i in it:
# ydata=ttcf[i+1,:]
# p1[i,1:], success = leastsq(errfunc, p0, args=(xdata,ydata))
output_ttc.append(ttcf)
output_z.append(z)
return output_ttc, output_z
def norm_qdata(chunk, trace_slice, q0, q1, lin_mask, norm):
if norm == "symmetric_bins":
normfactor = trace_slice
normfactor[normfactor == 0] = 1.0
data_loop = chunk[:, q0, q1] / normfactor[:, None]
elif norm == "symmetric_frame":
data_loop = (
chunk[:, q0, q1]
/ np.mean(chunk[:, lin_mask[0], lin_mask[1]], axis=1)[:, None]
)
elif norm == "symmetric":
data_loop = chunk[:, q0, q1]
elif norm == "corrcoef":
tmp_mat = chunk[:, q0, q1] / trace_slice[:, None]
data_loop = (tmp_mat - tmp_mat.mean(-1)[:, None]) / np.sqrt(
np.var(tmp_mat, -1)
)[:, None]
elif norm == "corrcoef_frame":
tmp_mat = (
chunk[:, q0, q1]
/ np.mean(chunk[:, lin_mask[0], lin_mask[1]], axis=1)[:, None]
)
data_loop = (tmp_mat - tmp_mat.mean(-1)[:, None]) / np.sqrt(
np.var(tmp_mat, -1)
)[:, None]
else:
raise ValueError(f"{norm} is not a valid normalization method.")
return data_loop
#######################
# ---MAIN FUNCTION--- #
#######################
def pyxpcs(
data,
qroi=None,
dt=1.0,
qv=None,
saxs=None,
mask=None,
crossdata=None,
ctr=(0, 0),
twotime_par=None,
qsec=(0, 0),
norm="symmetric",
nprocs=1,
time_spacing=None,
verbose=True,
chn=16,
tt_max_images=5000,
use_multitau="auto",
rebin_g2="auto",
):
"""Calculate g2 correlation functions.
Args:
data (np.ndarray, dict): If data is an array, time is the first
dimension followed by x and y. A dictionary is passed by Xana
containing multiprocessing queues.
dt (float, iterable): The spacing of the time between frames. In
case of unequally spaced data, a 1D vector must be passed.
"""
time0 = time()
crossdata_avail = False
if isinstance(data, np.ndarray):
if data.ndim == 1:
# handling 1D data
data = data.reshape(-1, 1, 1)
qroi = [
(np.array([0]), np.array([0])),
]
nf, *dim = np.shape(data)
def get_chunk():
return (0, data)
if crossdata is not None:
assert crossdata.shape == data.shape
crossdata_avail = True
use_multitau = False
elif isinstance(data, dict):
use_mp = True # make sure that multiprocessing is available
nf = data["nimages"]
dim = data["dim"]
def get_chunk():
return data["dataQ"].get()
else:
raise ValueError(f"Cannot process data of type {type(data)}")
# q-bins
lqv = len(qroi)
rlqv = range(lqv)
if qv is None:
qv = np.arange(lqv)
# check time spacing
if isinstance(dt, (float, int)) and not isinstance(time_spacing, Iterable):
equally_spaced = True
tvec = np.arange(1, nf + 1) * dt
elif isinstance(time_spacing, Iterable):
if len(np.unique(np.diff(time_spacing))) == 1:
equally_spaced = True
else:
equally_spaced = False
tvec = np.asarray(time_spacing) * dt
nprocs = 1
print("Switching off multiprocessing due to " "unequally spaced lag times.")
assert nf == len(tvec), "Time vector does not match data shape"
else:
raise ValueError(
"Time axis variable `time_spacing` must not be of "
f"type {type(time_spacing)}"
)
# computation modes
if use_multitau == "auto":
use_multitau = True if equally_spaced else False
if use_multitau is False:
if twotime_par is None:
twotime_par = np.arange(lqv)
print(
"With multitau being disabled, TTCs are required for the g2 "
"calculation;\nhowever, the twotime_par argument was not "
"provided.\nDefault is to calculate the TTC for each q-bin."
)
use_mp = True if nprocs > 1 and use_multitau else False
if rebin_g2 == "auto":
rebin_g2 = True if equally_spaced and not use_multitau else False
if verbose:
print("Number of images is:", nf)
print("shape of image section is:", dim)
if not isinstance(mask, np.ndarray):
mask =
|
np.ones(dim, "int8")
|
numpy.ones
|
#--> This code was developed using Anaconda3 installed as
#--> administrator and with PATH option selected during the install so
#--> that python can be used from a Windows command line. NOTE: to
#--> get command line arguments passed, which is essential for this
#--> code, you need need to edit registry to make
# Computer\HKEY_CLASSES_ROOT\Applications\python.exe\shell\open\command
# "C:\ProgramData\Anaconda3\python.exe" "%1" %*
# Thanks to
# https://stackoverflow.com/questions/29540541/executable-python-script-not-take-sys-argv-in-windows
# Alternately, you can just call python (with the full path to python
# if needed) and then specify the full path to the module and then the
# module's arguments
import importlib
import sys
import os
import socket
import time
import subprocess
import argparse
import json
import numpy as np
from scipy import signal
from astropy import log
from astropy import wcs
from astropy.io import fits
from astropy import units as u
from astropy.time import Time, TimeDelta
if sys.platform == 'win32':
# --> also check out pythonnet
try:
import win32com.client
except:
log.info('You are missing the win32com.client. This should be in the Anaconda package. MaxIm/telescope control will not work.')
else:
# http://timgolden.me.uk/pywin32-docs/html/com/win32com/HTML/QuickStartClientCom.html
# Use makepy.py -i to poke around in what might be useful
try:
# 'ASCOM Master Interfaces for .NET and COM' constants.
# Use example: win32com.client.constants.shutterOpen
win32com.client.gencache.EnsureModule('{76618F90-032F-4424-A680-802467A55742}', 0, 1, 0)
except:
log.info('ASCOM does not seem to be installed. MaxIm/telescope control will not work.')
else:
try:
# MaxIm constants. The long string is the GUID of MaxIm found by makepy.py
win32com.client.gencache.EnsureModule('{B4955EC7-F7F2-11D2-AA9C-444553540000}', 0, 1, 0)
except:
log.info('MaxIm not found. MaxIm/telescope control will not work.')
else:
log.info('You are not on a Windows system. The MaxIm/telescope control features of this package will not work unless you are on a Windows system.')
import define as D
# --> these are things that eventually I would want to store in a
# --> configuration file
# --> CHANGE ME BACK TO 1s(or 7s) and filter 0 (0.7s or 0.3 on
# --> Vega filter 1 works for day)
default_exptime = 1
default_filt = 0
default_cent_tol = 5 # Pixels
default_guider_exptime = 1 # chage back to 1 for night, 0.2 for day
# In principle, it is possible to use only MaxIm guiding stuff for
# this, alleviating the need for us to connect directly to the
# telescope. In practice, with a GEM and for setting DEC conveniently
# in the guider, MaxImControl really needs to be connected to the scope.
# Since unexpected behavior may result if there is not a hard failure
# when a GEM is not connected, indicate here whether or not you want
# that hard failure
telescope_must_be_connectable = True
# These are necessary for GEMs because MaxIm does not reveal the
# contents of the Camera Control -> Guide Tab -> Settings dialog box
# -> Advanced Tab -> Guider Motor Control radio buttons to the
# scripting interface. As explained in guider_motor_reverse_setup,
# when using ACP or otherwise not having MaxIm connected to the
# telescope, we need to manage motor reversal ourselves
guider_motor_control_reverseX = True
guider_motor_control_reverseY = False
# Misalignment in deg
guider_cal_astrometry_max_misalignment = 10
horizon_limit = 8.5
# --> I may improve this location or the technique of message passing
hostname = socket.gethostname()
if hostname == "snipe" or hostname == "byted":
raw_data_root = '/data/io/IoIO/raw'
elif hostname == "greyhound" or hostname == "gigabyte":
# --> This doesn't work. I need Unc?
#raw_data_root = '//snipe/data/io/IoIO/raw'
raw_data_root = r'\\snipe\data\io\IoIO\raw'
default_telescope = 'ScopeSim.Telescope'
elif socket.gethostname() == "IoIO1U1":
raw_data_root = r'C:\Users\PLANETARY SCIENCE\Desktop\IoIO\data'
# --> Eventually, it would be nice to have this in a chooser
default_telescope = 'AstroPhysicsV2.Telescope'
# For weather synchronization with ACP
ACPUtil = 'ACP.Util'
default_guide_box_command_file = os.path.join(raw_data_root, 'GuideBoxCommand.txt')
default_guide_box_log_file = os.path.join(raw_data_root, 'GuideBoxLog.txt')
run_level_main_astrometry = os.path.join(
raw_data_root, '2021-04_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2021-03_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-09_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-03_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Main_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Main_Astrometry_West_of_Pier.fit')
#raw_data_root, '2019-02_Astrometry/PinPointSolutionEastofPier.fit')
#raw_data_root, '2019-02_Astrometry/PinPointSolutionWestofPier.fit')
#raw_data_root, '2018-04_Astrometry/PinPointSolutionEastofPier.fit')
# --> Currently only guider WestofPier (looking east) works properly,
# --> which might indicate that calculations need to be made with true
# --> north of CCD aligned with true north button on mount. Although
# --> pier flip doesn't affect N/S because tube rolls over too, E/W is
# --> affected
run_level_guider_astrometry = os.path.join(
raw_data_root, '2021-04_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2021-03_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-09_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2020-03_Astrometry/Guider_Astrometry_East_of_Pier.fit')
#raw_data_root, '2019-04_Astrometry/Guider_Astrometry_West_of_Pier.fit')
#raw_data_root, '2019-02_Astrometry/GuiderPinPointSolutionWestofPier.fit')
#raw_data_root, '2019-02_Astrometry/GuiderPinPointSolutionEastofPier.fit')
#raw_data_root, '2018-04_Astrometry/GuiderPinPointSolutionWestofPier.fit')
#raw_data_root, '2018-01_Astrometry//GuiderPinPointSolutionEastofPier.fit')
def angle_norm(angle, maxang):
"""Normalize an angle to run up to maxang degrees"""
angle += 360
angle %= 360
if angle > maxang: # handles 180 case
angle -= 360
return angle
def iter_linfit(x, y, max_resid=None):
"""Performs least squares linear fit iteratively to discard bad points
If you actually know the statistical weights on the points,
just use polyfit directly.
"""
# Let polyfit report errors in x and y
coefs = np.polyfit(x, y, 1)
# We are done if we have just two points
if len(x) == 2:
return coefs
# Our first fit may be significantly pulled off by bad
# point(s), particularly if the number of points is small.
# Construct a repeat until loop the Python way with
# while... break to iterate to squeeze bad points out with
# low weights
last_redchi2 = None
iterations = 1
while True:
# Calculate weights roughly based on chi**2, but not going
# to infinity
yfit = x * coefs[0] + coefs[1]
resid = (y - yfit)
if resid.all == 0:
break
# Add 1 to avoid divide by zero error
resid2 = resid**2 + 1
# Use the residual as the variance + do the algebra
redchi2 = np.sum(1/(resid2))
coefs = np.polyfit(x, y, 1, w=1/resid2)
# Converge to a reasonable epsilon
if last_redchi2 and last_redchi2 - redchi2 < np.finfo(float).eps*10:
break
last_redchi2 = redchi2
iterations += 1
# The next level of cleanliness is to exclude any points above
# max_resid from the fit (if specified)
if max_resid is not None:
goodc = np.where(np.abs(resid) < max_resid)
# Where returns a tuple of arrays!
if len(goodc[0]) >= 2:
coefs = iter_linfit(x[goodc], y[goodc])
return coefs
# I am either phasing this out or I could potentially make it work
# with __enter__ and __exit__ for a context manager
def get_HDUList(HDUList_im_or_fname):
"""Returns an astropy.fits.HDUList given a filename, image or
HDUList. If you have a set of HDUs, you'll need to put them
together into an HDUList yourself, since this can't guess how
to do that"""
if isinstance(HDUList_im_or_fname, fits.HDUList):
return HDUList_im_or_fname
elif isinstance(HDUList_im_or_fname, str):
return fits.open(HDUList_im_or_fname)
elif isinstance(HDUList_im_or_fname, np.ndarray):
hdu = fits.PrimaryHDU(HDUList_im_or_fname)
return fits.HDUList(hdu)
else:
raise ValueError('Not a valid input, HDUList_im_or_fname, expecting, fits.HDUList, string, or np.ndarray')
def pier_flip_astrometry(header_in):
"""Adjust FITS astrometry CD* keywords to emulate a pier flip (rotate FOV 180 deg)
header_in : input FITS header
return value : copy of header_in with CD* keywords adjusted"""
header = header_in.copy()
header['CDELT1'] *= -1
header['CDELT2'] *= -1
header['CD1_1'] *= -1
header['CD1_2'] *= -1
header['CD2_1'] *= -1
header['CD2_2'] *= -1
if header.get('PIERSIDE'):
if header['PIERSIDE'] == 'EAST':
header['PIERSIDE'] = 'WEST'
else:
header['PIERSIDE'] = 'EAST'
header['FLIPAPPL'] = (True, 'Artificially flipped pier side')
header['HISTORY'] = 'Artificially flipped pier side, modified CD* and PIERSIDE'
return header
# --> Really what I think I want is a PGData for all of the center and
# --> rate stuff. That will clean up the ObsData property and
# --> __init__
class ObsData():
"""Base class for observations, enabling object centering, etc.
This is intended to work in an active obsering setting, so
generally an image array will be received, the desired properties
will be calculated from it and those properties will be read by
the calling code.
"""
def __init__(self,
HDUList_im_or_fname=None,
desired_center=None,
recalculate=False,
readnoise=5):
if HDUList_im_or_fname is None:
raise ValueError('No HDUList_im_or_fname provided')
self.recalculate = recalculate
self.readnoise = readnoise
# Set up our basic FITS image info
self.header = None
self._binning = None
self._subframe_origin = None
self._HDU_unbinned = None
self._we_opened_file = None
# Keep property for later use/speedy access
self._hist_of_im = None
self._back_level = None
# These are in pixels
self._obj_center = None
self._desired_center = desired_center
if not self._desired_center is None:
self._desired_center = np.asarray(self._desired_center)
# --> Work with these
self.obj_center_err = np.asarray((1.,1.))
self.desired_center_tolerance = np.asarray((5.,5.))
# 0 -- 10 scale indicating quality of obj_center and
# desired_center calculations
self.quality = 0
# astropy time object for calc_flex_pix_rate
self.TRateChange = None
self.Tmidpoint = None
# Amount of guide box motion since first observation
# units=main camera pixels
self.total_flex_dpix = None
# one-time motion, just before exposure
self.delta_pix = None
# Make the guts of __init__ methods that can be overridden
# --> Here is where I would make the division between ObsData
# and PGData. PGData would init the rates and stuff + read
# the ObsData. The ObsData would have a cleanup method that
# otherwise would not be called
# Read our image
self.read_im(HDUList_im_or_fname)
# Populate our object
self.populate_obj()
self.cleanup()
def populate_obj(self):
"""Calculate quantities that will be stored long-term in object"""
# Note that if MaxIm is not configured to write IRAF-complient
# keywords, IMAGETYP gets a little longer and is capitalized
# http://diffractionlimited.com/wp-content/uploads/2016/11/sbfitsext_1r0.pdf
kwd = self.header['IMAGETYP'].upper()
if 'DARK' in kwd or 'BIAS' in kwd or 'FLAT' in kwd:
raise ValueError('Not able to process IMAGETYP = ' + self.header['IMAGETYP'])
# Do our work & leave the results in the property
self.obj_center
self.desired_center
# --> CHANGE ME BACK
self._desired_center = np.asarray((1100, 1150))
def cleanup(self):
"""Close open file, deference large arrays"""
if self._we_opened_file:
self.close_fits()
del self.HDUList
del self._HDU_unbinned
def read_im(self, HDUList_im_or_fname=None):
"""Populate ObsData with HDUList and associated info"""
self.HDUList = get_HDUList(HDUList_im_or_fname)
# Store the original shape of our image so we can do
# coordinate calculations without it
self.oshape = np.asarray(self.HDUList[0].data.shape)
if isinstance(HDUList_im_or_fname, np.ndarray):
# We don't have any metadata
return self.HDUList
# All other options should have HDUList already populated with
# stuff we need. Copy stuff into our local property as needed
if isinstance(HDUList_im_or_fname, str):
self._we_opened_file = True
# Store the header in our object. This is just a
# reference at first, but after HDUList is deleted, this
# becomes the only copy
# https://stackoverflow.com/questions/22069727/python-garbage-collector-behavior-on-compound-objects
self.header = self.HDUList[0].header
# Calculate an astropy Time object for the midpoint of the
# observation for ease of time delta calculations.
# Account for darktime, if available
try:
exptime = self.header.get('DARKTIME')
if exptime is None:
exptime = self.header['EXPTIME']
# Use units to help with astropy.time calculations
exptime *= u.s
self.Tmidpoint = (Time(self.header['DATE-OBS'],
format='fits')
+ exptime/2)
except:
log.warning('Cannot read DARKTIME and/or EXPTIME keywords from FITS header')
try:
# Note Astropy Pythonic transpose Y, X order
self._binning = (self.header['YBINNING'],
self.header['XBINNING'])
self._binning = np.asarray(self._binning)
# This is in binned coordinates
self._subframe_origin = (self.header['YORGSUBF'],
self.header['XORGSUBF'])
self._subframe_origin = np.asarray(self._subframe_origin)
except:
log.warning('Could not read binning or subframe origin from image header. Did you pass a valid MaxIm-recorded image and header? Assuming binning = 1, subframe_origin = 0,0')
self._binning = np.asarray((1,1))
self._subframe_origin = (0,0)
if self.recalculate == True:
# We don't want to use values stored in the file, this
# forces recalculate
return self.HDUList
try:
cx = self.header['OBJ_CR0']
cy = self.header['OBJ_CR1']
self._obj_center = np.asarray((cy, cx))
dx = self.header['DES_CR0']
dy = self.header['DES_CR1']
self._desired_center = np.asarray((dy, dx))
except:
# It was worth a try
pass
return self.HDUList
def unbinned(self, coords):
"""Returns coords referenced to full CCD given internally stored binning/subim info"""
coords = np.asarray(coords)
return np.asarray(self._binning * coords + self._subframe_origin)
def binned(self, coords):
"""Assuming coords are referenced to full CCD, return location in binned coordinates relative to the subframe origin"""
coords = np.asarray(coords)
return np.asarray((coords - self._subframe_origin) / self._binning)
def im_unbinned(self, a):
"""Returns an unbinned version of a. a must be same shape
as the primary HDU image
"""
assert a.shape == self.HDUList[0].data.shape
# Don't bother if we are already unbinned
if np.sum(self._binning) == 2:
return a
newshape = self._binning * a.shape
# From http://scipy-cookbook.readthedocs.io/items/Rebinning.html
assert len(a.shape) == len(newshape)
slices = [ slice(0,old, float(old)/new)
for old,new in zip(a.shape,newshape) ]
coordinates = np.mgrid[slices]
indices = coordinates.astype('i') #choose the biggest smaller integer index
unbinned = a[tuple(indices)]
# Check to see if we need to make a larger array into which to
# plop unbinned array
if np.sum(self._subframe_origin) > 0:
# Note subframe origin reads in binned pixels
origin = self.unbinned(self._subframe_origin)
full_unbinned = np.zeros(origin + unbinned.shape)
full_unbinned[origin[0]:, origin[1]:] = unbinned
unbinned = full_unbinned
return unbinned
@property
def HDU_unbinned(self):
"""Returns an unbinned version of the primary HDU image or the primary HDU image if it is not binned.
"""
if self._HDU_unbinned is not None:
return self._HDU_unbinned
self._HDU_unbinned = self.im_unbinned(self.HDUList[0].data)
return self._HDU_unbinned
def close_fits(self):
if self.HDUList.fileinfo is not None:
self.HDUList.close()
self._we_opened_file = None
def imshow(self, im=None):
if im is None:
im = self.HDUList[0].data
plt.imshow(im)
plt.show()
@property
def obj_center(self):
"""Returns pixel coordinate of the brightests object in the image in
UNBINNED Y, X coordinates. Does basic median filtering to get
rid of cosmic rays. It is assumed this will be overridden
with better object finders, such as one that uses PinPoint
astrometry.
"""
if self._obj_center is not None:
return self._obj_center
# Take the median to get rid of cosmic rays
im = self.HDUList[0].data
im = signal.medfilt(im, kernel_size=3)
im_center = np.unravel_index(np.argmax(im), im.shape)
# Pretty-print our object center before we unbin
log.debug('Object center (X, Y; binned) = ' + str(im_center[::-1]))
self._obj_center = self.unbinned(im_center)
# Set quality just above the border, since we haven't done
# much work on this
self.quality = 6
self.header['OBJ_CR0'] = (self._obj_center[1], 'Object center X')
self.header['OBJ_CR1'] = (self._obj_center[0], 'Object center Y')
self.header['QUALITY'] = (self.quality, 'Quality on 0-10 scale of center determination')
return self._obj_center
@property
def desired_center(self):
"""If desired_center hasn't been explicitly set, this returns the
geometric center of image. NOTE: The return order of indices
is astropy FITS Pythonic: Y, X
"""
if self._desired_center is not None:
return self._desired_center
im = self.HDUList[0].data
im_center = np.asarray(im.shape)/2
self._desired_center = self.unbinned(im_center)
self.header['DES_CR0'] = (self._desired_center[1], 'Desired center X')
self.header['DES_CR1'] = (self._desired_center[0], 'Desired center Y')
return self._desired_center
# Allow user to move desired center around
@desired_center.setter
def desired_center(self, value):
self._desired_center = value
# --> I don't think I need these
## World coordinates may be calculated by of some subclasses.
## Worst case scenario, we calculate them with MaxImControl.scope_wcs
## when we need them
#@property
#def w_obj_center(self):
# """World coordinates of object center"""
# return self._w_obj_center
#
#@w_obj_<EMAIL>
#def w_obj_center(self, value):
# self._w_obj_center = value
#
#@property
#def w_desired_center(self):
# """World coordinates of object center"""
# return self._w_desired_center
#
#@w_desired_center.setter
#def w_desired_center(self, value):
# self._w_desired_center = value
#
#@property
#def dra_ddec(self):
# if self._dra_ddec is not None:
# return self._dra_ddec
# # This will raise its own error if the world coordinates have
# # not been calculated
# self._dra_ddec = self.w_obj_center - self.w_desired_center
# return self._dra_ddec
#Daniel
if True:
class MakeList():
def __init__(self, mlist=None):
if mlist is None:
self._mlist = []
else:
if isinstance(mlist, list):
self._mlist = mlist
else:
raise TypeError('Input must be a list.')
def append(self, item):
self._mlist.append(item)
#Daniel
class FakeWeather():
def __init__(self):
self.Safe = True
class MaxImControl():
"""Controls MaxIm DL via ActiveX/COM events.
Notes:
MaxIm camera, guide camera, and telescope must be set up properly
first (e.g. you have used the setup for interactive observations).
Even so, the first time this is run, keep an eye out for MaxIm
dialogs, as this program will hang until they are answered. To
fix this, a wathdog timer would need to be used.
Technical note for downstream object use: we don't have access to
the MaxIm CCDCamera.ImageArray, but we do have access to similar
information (and FITS keys) in the Document object. The CCDCamera
object is linked to the actual last image read, where the Document
object is linked to the currently active window. This means the
calling routine could potentially expect the last image read in
but instead get the image currently under focus by the user. The
solution to this is to (carefully) use notify events to interrupt
MaxIm precisely when the event you expect happens (e.g. exposure
or guide image acuired). Then you are sure the Document object
has the info you expect. Beware that while you have control,
MaxIm is stuck and bad things may happen, like the guider might
get lost, etc. If your program is going to take a long time to
work with the information it just got, figure out a way to do so
asynchronously
"""
def __init__(self,
main_astrometry=None,
guider_astrometry=None,
default_filt=default_filt):
if sys.platform != 'win32':
raise EnvironmentError('Can only control camera and telescope from Windows platform')
self.main_astrometry = main_astrometry
if self.main_astrometry is None:
self.main_astrometry = run_level_main_astrometry
if isinstance(self.main_astrometry, str):
with fits.open(self.main_astrometry) as HDUList:
self.main_astrometry = HDUList[0].header
self.guider_astrometry = guider_astrometry
if self.guider_astrometry is None:
self.guider_astrometry = run_level_guider_astrometry
if isinstance(self.guider_astrometry, str):
with fits.open(self.guider_astrometry) as HDUList:
self.guider_astrometry = HDUList[0].header
self.default_filt = default_filt
self.alignment_mode = None
self.guider_cal_pierside = None
self.pier_flip_on_side = None
# Pattern this after Telescope.GuideRateRightAscension and
# Telescope.GuideRateDeclination, which are the telescope
# guide rates in deg/s, assuming they can be read from the
# telescope. Here we can store them as an np.array
self.guide_rates = None # degrees/s
self.guider_exptime = None
self.guider_commanded_running = None
# --> Eventually make this some sort of configurable
# --> thing, since not all filter wheels need it
self.main_filt_change_time = 10 # seconds it takes to guarantee filter change
# Don't move the guide box too fast
self.guide_box_steps_per_pix = 2
self.guider_settle_cycle = 5
self.guider_settle_tolerance = 0.5
self.loop_sleep_time = 0.2
self.guider_max_settle_time = 120 # seconds
# --> Kind of a hack to have this here. Eventually want to
# --> integrate MaxImControl object with ACP better
self.ACPUtil = None
self.weather_server = None
# Create containers for all of the objects that can be
# returned by MaxIm. We'll only populate them when we need
# them. Some of these we may never use or write code for
self.Application = None
self.CCDCamera = None
self.Document = None
self.Telescope = None
# This helps with logic that I don't use yet
self.telescope_connectable = None
# This helps PrecisionGuide dance around ACP controlling the
# focuser directly
self.focuser_previously_connected = None
self.previous_guider_filter = None
self.original_GuiderAutoPierFlip = None
# There is no convenient way to get the FITS header from MaxIm
# unless we write the file and read it in. Instead allow for
# getting a selection of FITS keys to pass around in a
# standard astropy fits HDUList
self.FITS_keys = None
self.HDUList = None
self.required_FITS_keys = ('DATE-OBS', 'EXPTIME', 'EXPOSURE', 'XBINNING', 'YBINNING', 'XORGSUBF', 'YORGSUBF', 'FILTER', 'IMAGETYP', 'OBJECT')
# We can use the CCDCamera.GuiderMaxMove[XY] property for an
# indication of how long it is safe to press the guider
# movement buttons, but "Max" is not very much -- 0.1 - 3.0s
# according to MaxIm documentation, so be liberal with this
self.guider_max_move_multiplier = 20
# --> Too little motion seems to freeze the system, at
# least sometimes
self.horizon_limit_value = horizon_limit
self.max_guide_num_steps = 8
self.connect()
if self.telescope_connectable:
self.alignment_mode = self.Telescope.AlignmentMode
else:
# --> Eventually this warning might go away as we might
# --> use some property of our own to track the mount type
log.error("Mount is not connected -- did you specify one in setup [currently the software source code]? If you have a German equatorial mount (GEM), this software will likely not work properly upon pier flips [because code has not yet been written to let you specify the mode of your telescope on the fly]. Other mount types will work OK, but you should keep track of the Scope Dec. box in MaxIm's Guide tab.")
# This sets self.pier_flip_on_side and makes sure the guider
# astrometry is aligned with the guider cal
self.guider_motor_reverse_setup()
# Now line up our main astrometry with the guider in case it
# was recorded on the opposite side of a GEM flip
main_astrometry_pierside = self.main_astrometry.get('PIERSIDE')
if self.alignment_mode == win32com.client.constants.algGermanPolar:
if main_astrometry_pierside is None:
raise EnvironmentError('Connected to GEM mount yet no PIERSIDE was recorded in main astrometry FITS header. Was MaxIm connected to the telescope when the astrometry was recorded?')
if main_astrometry_pierside != self.guider_cal_pierside:
self.main_astrometry \
= pier_flip_astrometry(self.main_astrometry)
self.check_guider_speeds()
self.previous_GuiderReverseX = self.CCDCamera.GuiderReverseX
self.previous_GuiderReverseY = self.CCDCamera.GuiderReverseY
def __enter__(self):
return(self)
def __exit__(self, exception_type, exception_value, traceback):
# --> Try to get telescope disconnected properly so APCC can
# --> exit without having to kill it
if self.telescope_connectable:
self.Telescope.Connected = False
# This kills the link to the CCD camera unless
# self.CCDCamera.DisableAutoShutdown = True by someone. ACP
# sets this to True but FocusMax does not. See notes in
# IoIO.notebk about C-c dance if you want to have
# CCDCamera.DisableAutoShutdown = False and not kill camera
# link
if self.CCDCamera:
self.guider_stop()
# --> consider putting this back to 0 instead of previous
# --> and really this should be in the coronagraph
# --> subclass instead of here, but that is a project for
# --> a later date
self.CCDCamera.GuiderFilter = self.previous_guider_filter
#self.CCDCamera.LinkEnabled = False
# Put the MaxIm focuser connection back to its previous state
self.Application.FocuserConnected = self.focuser_previously_connected
self.CCDCamera.GuiderAutoPierFlip = self.original_GuiderAutoPierFlip
# --> Not sure if I need to do these or if they mess it up worse
self.Application = None
self.CCDCamera = None
self.Telescope = None
def connect(self):
"""Link to weather safety monitor, telescope, CCD camera(s), filter wheels, etc."""
try:
self.ACPUtil = win32com.client.Dispatch(ACPUtil)
self.weather_server = self.ACPUtil.Weather
test = self.ACPUtil.Weather.Safe
except Exception as e:
log.error('Received the following error: ' + str(e))
log.warning('Seems to be an ACP weather server problem, forging ahead with no weather protection!')
self.weather_server = FakeWeather()
# MaxIm can connect to the telescope and use things like
# pier side to automatically adjust guiding calculations,
# but it doesn't make the telescope pier side available to
# the user. That means we need to connect separately for
# our calculations. Furthermore, ACP doesn't like to have
# MaxIm connected to the telescope while guiding (except
# through the ASCOM guide ports or relays), so we need to
# do everything out-of-band
self.getTelescope()
if self.telescope_connectable:
self.Telescope.Connected = True
if self.Telescope.Connected == False:
raise EnvironmentError('Link to telescope failed. Is the power on to the mount?')
self.getApplication()
## --> ACP doesn't like MaxIm being connected to the
## --> telescope. We will have to use the property of
## --> telecsope and copy over to appropriate places in
## --> MaxIm, as if we were operating by hand
#self.Application.TelescopeConnected = True
#if self.Application.TelescopeConnected == False:
# raise EnvironmentError('MaxIm link to telescope failed. Is the power on to the mount?')
# --> ACP doesn't like MaxIm being connected to the focuser,
# but some of my IoIO things do need that for the time being
# --> This is really at the wrong level -- I should have
# ACP_IPT_Na_R take care of this as an object
self.focuser_previously_connected = self.Application.FocuserConnected
self.getCCDCamera()
self.CCDCamera.LinkEnabled = True
if self.CCDCamera.LinkEnabled == False:
raise EnvironmentError('Link to camera hardware failed. Is the power on to the CCD (including any connection hardware such as USB hubs)?')
# Let the guider filter and AutoPierFlip be put back to
# previous states after we use it
self.previous_guider_filter = self.CCDCamera.GuiderFilter
self.original_GuiderAutoPierFlip = self.CCDCamera.GuiderAutoPierFlip
# Keep CCD link up after script exits (thanks to Daniel!)
self.CCDCamera.DisableAutoShutdown = True
def getTelescope(self):
if self.Telescope is not None:
return
try:
self.Telescope = win32com.client.Dispatch(default_telescope)
self.telescope_connectable = True
except:
if telescope_must_be_connectable:
raise EnvironmentError('Error instantiating telescope control object ' + default_telescope + '. Is the telescope on and installed?')
else:
log.warning('Not able to connect to telescope. Some features like auto pier flip for German equatorial mounts (GEMs) and automatic declination compensation for RA motions will not be available. --> eventually make some sort of menu to select mount type or grep that from ACP config')
self.telescope_connectable = False
else:
# Catch any other weird errors
assert isinstance(self.Telescope, win32com.client.CDispatch)
def getApplication(self):
if self.Application is not None:
return True
try:
self.Application = win32com.client.Dispatch("MaxIm.Application")
except:
raise EnvironmentError('Error creating MaxIM application object. Is MaxIM installed?')
# Catch any other weird errors
assert isinstance(self.Application, win32com.client.CDispatch)
def getCCDCamera(self):
if self.CCDCamera is not None:
return True
try:
self.CCDCamera = win32com.client.Dispatch("MaxIm.CCDCamera")
#win32com.client.WithEvents(self.CCDCamera,
# self.CCDCameraEventHandler)
except:
raise EnvironmentError('Error creating CCDCamera object. Is there a CCD Camera set up in MaxIm?')
# Catch any other weird errors
assert isinstance(self.CCDCamera, win32com.client.CDispatch)
# --> This is an event handler that doesn't work
class CCDCameraEventHandler():
#"""This hopefully magically receives the names of events from the client"""
# https://vlasenkov.blogspot.ru/2017/03/python-win32com-multithreading.html
def CCDCamera_Notify(self, event_code):
log.debug('Received event_code = ' + str(event_code))
def getDocument(self):
"""Gets the document object of the last CCD camera exposure"""
#"""Gets the document object of the current window"""
# The CurrentDocument object gets refreshed when new images
# are taken, so all we need is to make sure we are connected
# to begin with
# NOTE: the Application.CurrentDocument is not what we
# want, since that depends on which windows has focus.
if self.Document is not None:
return True
self.getCCDCamera()
try:
self.Document = self.CCDCamera.Document
except:
raise EnvironmentError('Error retrieving document object')
#self.getApplication()
#try:
# self.Document = self.Application.CurrentDocument
#except:
# raise EnvironmentError('Error retrieving document object')
# Catch any other weird errors
assert isinstance(self.Document, win32com.client.CDispatch)
def guider_motor_reverse_setup(self):
"""Set up property for guiding and moving the telescope with guider
slews.
Details: Set up property so guiding works regardless of
whether or not MaxIm is connected to the telescope and
regardless of how the guider was calibrated. Also set up
property so we can use use MaxIm's control of the guider
inputs of the mount to do small motions to center our target.
To briefly review how autoguiding works, MaxIm measures the
current X and Y position of the guide star and calculates
the number of pixels it needs to move to get to the desired
guide position. Of course, MaxIm doesn't physically move
pixels in the camera, it moves the telescope. So there
needs to be some sort of calibration that goes between the
two. GuiderXSpeed, GuiderYSpeed, and GuiderAngle is that
calibration.
MaxIm calibrates the guider by finding the brightest star in
the FOV. The +X and +Y buttons are operated such that the
star is moved in an "L" shape. First in the +-X direction
and then in the +/-Y direction. It is up to the user to
connect the X and Y leads that MaxIm is operating to the RA
and DEC leads of the mount. Generally +X is considered +RA,
which is E, or *left* as you look at the sky with no camera
mirroring or rotation effects and the +Y lead is north.
Because it is the mount that moves E and N, the star, which
is fixed on the sky, appears to move W and S. MaxIm, of
course, knows this and applies the appropriate minus sign
when calculating the GuiderXSpeed and GuiderYSpeed
quantities. These are the speed in guider camera pixels per
second the mount moves when X and Y are pressed. Note some
confusion may arise because MaxIm pixel coordinates (0,0)
are at the *top* left of the image, which is the up/down
mirror from normal Cartesian coordinates. Thus, if N is up
as displayed by MaxIm, +N ends up being -Y in MaxIm
GuiderYSpeed. A final quantity is necessary: GuiderAngle,
the angle CCW from N of the +Y telescope motion direction.
Whew!
Now add the complication of a GEM.
A GEM effectively operates in two modes because it has to
carry the telescope on one side or the other of the pier to
be able to view all of the sky
https://ascom-standards.org/Help/Platform/html/P_ASCOM_DeviceInterface_ITelescopeV3_SideOfPier.htm
has a discussion of how this is somewhat awkwardly
implemented in ASCOM. Add to this the complication that
different GEM mounts do different things to their coordinate
systems when the mount is on the two different sides. RA is
generally left alone, since on either side of the pier, the
RA axis still has to rotate toward the west. But DEC
conventions vary. When the mount flips and points the
telscope at the same place on the sky (issues of
counterweight up aside), RA flips 180 and DEC flips 180,
resulting in the telescope tube *rotating* 180. Some
mounts, like my Astro-Physics 1100, leave the DEC motors
connected in the same sense on both sides of the mount.
This results in somewhat counter-intuitive behavior of the
telescope tube when you press N. When near the equator, on
one side of the mount (ASCOM pierWest, looking east), N
moves you torward N, on the other (e.g. when I am in Park
4), it moves you toward S. Other mounts (Paramount, I
think) define the preferred N goes N side as ASCOM pierEast.
Still other mounts (PlaneWave, I think) flip N and S when
you flip the mount, so N always moves the tube N.
No matter what the mount does with N/S, one thing never
changes on pier flip: the tube rotates 180 degrees such that
if N was up before, it is now down. And if E was left in
the camera, it is now right. When doing absolute
astrometry, this matters a lot, but when guiding, it gets a
little simpler. On mounts that don't reverse the N/S motor
sense on flip (e.g. Astro-Physics, Paramount), because both
astrometric N/S and motion N/S have flipped, the guider can
blithly press the same N or S button to get the star to move
the expected direction in camera Y coordinates. X is not so
lucky. RA has really flipped in the camera but not on the
mount. Someone needs to keep track of that flip. As
described below, MaxIm has that capacity both for
interactive and scripted use.
For interactive use, MaxIm uses the Pier Flip box in the
Camera->Guide tab together with the Guider Motor Control On
Pier Flip radio buttons in the Guide -> Settings -> Advanced
tab to let the user fix guider pier flip issues. These
controls tell MaxIm to swap the RA and/or DEC connections on
pier flip. For mounts that properly report their pier flip
state via ASCOM, the Auto Pier Flip box can be checked,
which basically keeps track of checking the Pier Flip box
for you. For scripting use, the GuiderReverseX and
GuiderReverseY property can be used. More on those later.
The problem with the "Pier Flip" nomenclature is, of course,
that it is not absolute. "Pier Flip" relative to what? At
some level it doesn't matter. If you calibrated the guider on
one side of the pier and then do a pier flip, then you need to
tell the guider you have pier flipped. Fiddle with the Pier
Flip radio buttons in the Guider Settings Advanced tab until
guiding works and you are done. This also works for
AutoPierFlip, but it happens to have an absolute:
MaxIm "Pier Flip" is ASCOM pierWest looking east (through the
pole). MaxIm normal (no pier flip) is pierEast looking west
So if your mount reports its pierside state properly via
ASCOM, it is probably best to do your first guider
calibration on pierEast looking west (normal). Then enable
MaxIm's AutoPierFlip, do a pier flip, guide, and poke the
radio buttons in Guider Settings Advanced until guiding
works. Once you have those radio buttons set, you can
recalibrate the guider on any side of the mount because
MaxIm will automatically reverse the motor connections as
per your specifications as it is doing the calibration.
Sure you will get GuiderAngle of +180 on opposite sides of
the mount, but if you take that into consideration with the
guider speeds, everything will end up looking like it is
calibrated relative to the normal ASCOM pointing state.
Whew!
Now enter ACP.
It is not clearly stated why but
http://acp.dc3.com/RotatedGuiding.pdf explains that ACP has
chosen the opposite of the mount from MaxIm to act as the
guider calibration standard. ACP users are instructed to
put the mount on pierWest looking east, turn AutoPierFlip
off, and calibrate. This is unfortunate, since it
completely breaks MaxIm guiding. Furthermore, MaxIM
profiles can't be used to fully change between ACP guiding
mode and MaxIm guiding mode since the profile doesn't
preserve the AutoPierFlip state button state.
Since the ACP rotated guiding document states that the ACP
pierWest guider calibration standard is not going to change,
we need to deal with it here. We also want to make it
convenient to be used in normal MaxIm mode. Fortunately, we
can use the hints summarized below to let us figure out which
mode we are in. We can then use the GuiderReverseX and
GuiderReverseY property to manually reverse the sense of the
guider motor conenctions.
Before describing how we can figure out what side of the mount
the guider was calibrated on and what mode of guiding we are
in, we need to describe the other thing we do with this
software: move the mount so that in the main camera, our
target is centered where we want it. The main camera pixel
coordinates of these quantities are stored in the obj_center
and desired_center property of the ObsData object or its
subclass (e.g. CorObsData). Using the main camera PinPoint
sample astrometry solution, run_level_main_astrometry, we can
translate these pixel coordinates into absolute astrometric
coordinates and figure out how far to move the mount to get
our target centered.
There are several ways we can move the mount: enter in new
coordinates, push mount motion buttons with ASCOM direct, or
push mount motion buttons with whatever MaxIm uses for the
guider. Since we the user has already figured out how to
connect MaxIm to the mount to make the guider work and there
is an easy way to command MaxIm to use that connection via the
scripting interface (CCDCamera.GuiderMove()), we will do the
later. The problem with this is that motor reverse commands
effect these motor control commands. So if we want to move
the telescope in astrometric coordinate space, we need to keep
track of the motor flips and undo them when. --> Alternately,
we could translate our astrometric coordinates into guider
pixel space, apply a guider speed and angle coordinate
translation and the flips would take care of themselves.
Whew!
Now we can write down the various cases of what mode we are
in, how the guider was calibrated, how pier flipping is
tracked and how we need to track that pier flipping to reverse
its effect when we want to use the MaxIm guider motion commands.
(1) MaxIm guiding mode:
Telescope connected to MaxIm
AutoPierFlip enabled
if cal pierEast, directions match astrometry rotated to pierEast
[Guider motor connections can be grepped in this configuration]
if cal pierWest, E/W and/or N/S will be flipped,
depending on Guider Motor Control state
MaxIm will manage all pier flip for the guider
Observing pierWest, unflip E/W and/or N/S for guider moves
(1a) MaxIm guiding mode but scope not connected for some reason
We have to manage all pier flip stuff for the guider via
GuiderReverseX and GuiderReverseY
Observing pierWest, unflip E/W and/or N/S for guider moves
(2) ACP guiding mode:
telescope should not be connected
AutoPierFlip probably disabled
Astrometry directions flipped to pierWest should match
guider cal directions
We have to manage all pier flip stuff for the guider via
GuiderReverseX and GuiderReverseY
Observing pierEast, unflip E/W and/or N/S for guider moves
The MaxIm scripting interface does not provide any property
for the Guider Motor Control radio buttons, so we duplicate
them here as global variables
(guider_motor_control_reverse[XY]), though there is one case
when we can grep them out of the guider cal. If this gets
closely integrated with ACP, we might also be able to get
these from its configuration options.
"""
# The guider astrometry calibration gives us absolute knowledge
# of which directions are which in the guide camera, assuming
# the images were recorded with MaxIm connected to the telescope
# and the telecsope reports pier side. The side on which guider
# calibration is done is not noted by MaxIm. However, we can
# compare the GuiderAngle and the north angle in the guider
# astrometry and use the guider astrometry PIERSIDE to tell us
# exactly which side the guider was calibrated on. With the
# guider astrometry lined up with the guider calibration, we can
# see which way the cal directions point to figure out whether
# or not MaxIm was doing any motor reversing while the guider
# calibration was being done
# This will be None if the mount is not a GEM or was not
# connected when astrometry was recorded. Save it for later...
guider_astrometry_pierside = self.guider_astrometry.get('PIERSIDE')
# Find the angle of N in the astrometry image relative to
# MaxIm's N = -Y convention. This is very confusing because
# for MaxIm, in a nominal N up, E left configuration, N is
# toward -Y, since MaxIm plots Y increasing down. In the FITS
# standard, if the cardinal directions are aligned to pixel X,
# Y, increasing Y means moving N. Ultimately, which direction
# N is plotted doesn't really matter: the user tweaks things
# so N is up for them and the FITS CDELT will adjust. If
# plotted in Cartesian coordinates (e.g. IDL, IRAF), the user
# will align N up so CDELT2 is positive. For MaxIm and other
# (0,0) top left oriented displays the user will align N up so
# CDELT2 is negative. But here it matters! We need to
# emulate a MaxIm N vector with the FITS standard so we can
# compare our astrometry image to MaxIm's guider calibration.
# Start with the regular FITS WCS standard, using a
# healthy-size angle to avoid numeric problems
dp = self.scope_wcs((0, 10.),
to_pix=True,
astrometry=self.guider_astrometry,
absolute=True,
delta=True)
# Now negate Y to align with MaxIm's -Y = up standard, keeping
# in mind that scope_wcs returns Y,X.
dp[0] *= -1
# arctan2 takes y, x and uses the normal math angle definition
# (+X axis = 0 deg)
aang = np.degrees(np.arctan2(dp[0], dp[1]))
# Convert angle to N up, +/-180 (already increasing CCW)
aang = angle_norm(aang-90, 180)
# Now compare to guider calibration N up angle. This has yet
# to match precisely on, probably because Bob has private
# distortions
gang = angle_norm(self.CCDCamera.GuiderAngle, 180)
log.debug("PinPoint solution angle: " + repr(aang))
log.debug("GuiderAngle: " + repr(gang))
dang = abs(angle_norm(gang - aang, 180))
if ((dang < 90
and dang > guider_cal_astrometry_max_misalignment)
or ((dang > 90)
and dang < 180 - guider_cal_astrometry_max_misalignment)):
raise EnvironmentError('Angle mismatch between Guider PinPoint solution and guider calibration is too large. Record them both at the same time to ensure match')
if dang < 90:
guider_cal_astrometry_aligned = True
else:
# In the case that guider cal and guider pinpoint solution
# are on opposite sides of the pier, we can check for some
# errors
if not self.telescope_connectable:
raise EnvironmentError('GEM pier flip between guider calibration and guider PinPoint solution detected, yet telescope is not connectable. GEM mounts really need to be connected for all of this to work. Alternately, you could calibrate on one side and just stick to that side, or I may write code to specify GEM flips on the fly like MaxIm does')
if self.alignment_mode != win32com.client.constants.algGermanPolar:
raise EnvironmentError('GEM pier flip between guider calibration and guider PinPoint solution detected yet mount is not reporting that it is a GEM')
if guider_astrometry_pierside is None:
raise EnvironmentError('GEM pier flip between guider calibration and guider PinPoint solution detected, yet no PIERSIDE was recorded in guider astrometry FITS header. Was MaxIm connected to the telescope when the astrometry was recorded?')
# If we made it here, there are no errors and we know that
# the guider cal and guider pinpoint solution are on
# opposite sides of the pier
guider_cal_astrometry_aligned = False
if self.alignment_mode != win32com.client.constants.algGermanPolar:
log.debug('non-GEM mount, guider astrometry and guider cal are lined up')
return
# If we made it here, we are a GEM Rotate our guider
# astrometry to line up with the guider calibration direction
if guider_astrometry_pierside is None:
raise EnvironmentError('Currently connected mount reports it is a GEM, but PIERSIDE was not recorded in guider astrometry FITS header. Was MaxIm connected to the telescope when the astrometry was recorded?')
if guider_cal_astrometry_aligned:
log.debug('Guider astrometry and calibration performed on same side of pier')
self.guider_cal_pierside = guider_astrometry_pierside
else:
log.debug('Guider astrometry and calibration performed on opposite side of pier. Flipping guider astrometry to line up with guider cal')
if guider_astrometry_pierside == 'EAST':
self.guider_cal_pierside = 'WEST'
if guider_astrometry_pierside == 'WEST':
self.guider_cal_pierside = 'EAST'
# --> this tweaks our guider image header in memory
self.guider_astrometry \
= pier_flip_astrometry(self.guider_astrometry)
# Don't forget to flip aang, our astrometry N angle, since
# it is used below
aang = angle_norm(aang+180, 180)
log.debug('guider calibrated on pier ' + self.guider_cal_pierside)
# Now see what direction E turns out to be relative to N.
dp = self.scope_wcs((10., 0),
to_pix=True,
astrometry=self.guider_astrometry,
absolute=True,
delta=True)
# Just in case there is a sizable component of up/down in E,
# make sure we cast our synthetic E vector onto our MaxIm
# coordinate system, where Y is in the opposite sense from the
# WCS conventions
dp[0] *= -1
eang = np.degrees(np.arctan2(dp[0], dp[1]))
# Convert angle to N up, +/-180 (already increasing CCW)
eang = angle_norm(eang-90, 180)
# Rotate into N up coordinate
eang = angle_norm(eang - aang, 180)
log.debug('aang, eang = ' + repr((aang, eang)))
# At this point eang will be +90 if E goes left on the camera,
# -90 if it goes W. ASSUMING GUIDER +X CONNECTED TO E
if np.sign(eang*self.CCDCamera.GuiderXSpeed) == -1:
guider_cal_Xflip = False
else:
guider_cal_Xflip = True
# ASSUMING GUIDER +Y CONNECTED TO N and keeping in mind +Y is
# _down_ in MaxIm
if np.sign(self.CCDCamera.GuiderYSpeed) == -1:
guider_cal_Yflip = False
else:
guider_cal_Yflip = True
# Now do the best we can with our table of what mode we are in
if self.guider_cal_pierside == 'WEST':
if guider_cal_Xflip or guider_cal_Yflip:
log.debug('Assuming normal motor connections of E = -X, N = +Y, motor reversal(s) detected on pierWest guider cal. Setting to pier flip on pierWest. This is almost certainly MaxIm mode.')
self.pier_flip_on_side = win32com.client.constants.pierWest
else:
log.debug('Assuming normal motor connections of E = -X, N = +Y, no motor reversals were detected on pierWest guider cal. Setting to pier flip on pierEast. This is almost certainly ACP mode.')
self.pier_flip_on_side = win32com.client.constants.pierEast
if self.guider_cal_pierside == 'EAST':
if guider_cal_Xflip or guider_cal_Yflip:
log.warning('Pier flip detected for pierEast guider cal. This is not a valid MaxIm pier flip state and ACP does not allow calibration on pierEast. Do you have your motor control leads hooked up in the normal way: E = -X, N = +Y? For now I will assume the connections are normal and just set pier_flip_onside pierEast. If the guider pushes the star out or precision guide moves the telecsope in the wrong way and it is not possible for you to change the motor control leads, contact the developer and ask for an abstraction layer to be added')
self.pier_flip_on_side = win32com.client.constants.pierEast
else:
log.debug('Assuming normal motor connections of E = -X, N = +Y, no motor reversal(s) detected on pierEast guider cal. Setting to pier flip on pierWest. This is almost certainly MaxIm mode.')
self.pier_flip_on_side = win32com.client.constants.pierWest
def set_guider_motor_reverse_and_DEC(self):
# --> I am not sure the effect of this if the telescope is
# --> connected and Auto Scope Dec is selected
self.CCDCamera.GuiderDeclination = self.Telescope.Declination
if self.alignment_mode != win32com.client.constants.algGermanPolar:
log.debug('Not GEM, no motor reversal')
return
if self.pier_flip_on_side == win32com.client.constants.pierEast:
log.debug("ACP guider calibration mode detected. Don't let MaxIm manage pier flip...")
self.CCDCamera.GuiderAutoPierFlip = False
elif (self.Application.TelescopeConnected
and self.CCDCamera.GuiderAutoPierFlip):
# ACP does pier flips on east side
log.debug('Let MaxIm manage pier flip state: guider calibrated in MaxIm mode, MaxIm is connected to the telescope and Auto Pier Flip is on.')
# Set these to False, since otherwise they would confuse
# us and MaxIm
self.CCDCamera.GuiderReverseX = False
self.CCDCamera.GuiderReverseY = False
return
else:
log.debug("MaxIm is not managing pier flip...")
if self.Telescope.SideOfPier == self.pier_flip_on_side:
self.CCDCamera.GuiderReverseX = guider_motor_control_reverseX
self.CCDCamera.GuiderReverseY = guider_motor_control_reverseY
log.debug("... flip detected...")
else:
self.CCDCamera.GuiderReverseX = False
self.CCDCamera.GuiderReverseY = False
log.debug("... no flip detected...")
log.debug("CCDCamera.GuiderReverseXY = " + repr((self.CCDCamera.GuiderReverseX, self.CCDCamera.GuiderReverseY)))
def guider_motor_reverse_state(self):
"""Return np.array indicating motor reverse state"""
# See guider_motor_reverse_setup for explanation
revXY = np.asarray([1,1])
if self.alignment_mode != win32com.client.constants.algGermanPolar:
log.debug('Not GEM, no motor reversal')
return revXY
if self.Application.TelescopeConnected:
log.debug("MaxIm is connected to the telescope...")
if self.pier_flip_on_side == win32com.client.constants.pierEast:
log.debug("... but guider calibrated in ACP mode, so inspecting GuiderReverseXY...")
elif self.CCDCamera.GuiderAutoPierFlip:
log.debug("...and managing pier flip...")
if (self.Telescope.SideOfPier
== win32com.client.constants.pierEast):
log.debug("...but telescope is on pierEast, so no motor reversal")
elif (self.Telescope.SideOfPier
== win32com.client.constants.pierWest):
log.debug("... and reversing motor motion because the telescope is on pierWest")
if guider_motor_control_reverseX:
revXY *= np.asarray([-1,1])
if guider_motor_control_reverseY:
revXY *= np.asarray([1,-1])
else:
raise EnvironmentError('Inconsistent Telescope.SideofPier ' + repr(self.Telescope.SideOfPier))
else:
log.debug('... but not managing pier flip, so inspecting GuiderReverseXY...')
else:
log.debug('MaxIm is not connected to the telescope, so inspecting GuiderReverseXY...')
if self.CCDCamera.GuiderReverseX:
log.debug('... GuiderReverseX is set')
revXY *= np.asarray([-1,1])
if self.CCDCamera.GuiderReverseY:
log.debug('... GuiderReverseY is set')
revXY *= np.asarray([1,-1])
log.debug('Motor reversal state = ' + repr(revXY))
return revXY
def check_guider_speeds(self):
"""Check guider X and Y speeds against telescope guide rates.
This also sets the self.guide_rates property"""
# Do this by creating synthetic guider calibrations. CDELT*
# are the same regardless of DEC, but DEC must be considered
# when doing coordinate transformation from pixels to world,
# so just set the guider astrometry DEC to 0, saving current
# value so we can put it back later
save_crval2 = self.guider_astrometry['CRVAL2']
self.guider_astrometry['CRVAL2'] = 0
# Create a notional time to move in pixel space. It would be
# great to use the actual guider cal times, which defaults to
# 10s, but MaxIm does not make those available. So just use
# 10s
dt = 10
dx = self.CCDCamera.GuiderXSpeed * dt
dy = self.CCDCamera.GuiderYSpeed * dt
# Transpose, since we are in pix
dra_ddec = self.scope_wcs((dy, dx),
to_world=True,
astrometry=self.guider_astrometry,
absolute=True,
delta=True)
guider_cal_guide_rates = dra_ddec/dt
# Use cal speeds by default but see if we can refine them with
# scope rates
self.guide_rates = guider_cal_guide_rates
if self.telescope_connectable and self.Telescope.CanSetGuideRates:
# Always assume telescope reported guide rates are
# correct, but warn if guider rates are off by 10%,
# keeping in mind that MaxIm seems to consider a pixel
# size to be its diameter
self.guide_rates \
= np.asarray((self.Telescope.GuideRateRightAscension,
self.Telescope.GuideRateDeclination))
dgrp = np.linalg.norm(self.guide_rates)
dgcgrp = np.linalg.norm(guider_cal_guide_rates)
dr = np.abs(dgrp - dgcgrp)
if dr > 0.1 * dgrp:
log.warning('Guider calibration rate is off by more than 10% of the scope reported rate: ' + repr((self.guide_rates, np.abs(guider_cal_guide_rates))) + '. Norm of these: ' + repr((np.linalg.norm(self.guide_rates), np.linalg.norm(guider_cal_guide_rates))) + '. Have you specified the correct guider astrometery image? Have you changed the guide rates changed since calibrating the guider? Assuming reported telescope guide rates are correct.')
self.guider_astrometry['CRVAL2'] = save_crval2
def horizon_limit(self):
return (not self.Telescope.Tracking
or self.Telescope.Altitude < self.horizon_limit_value)
# For now use self.Application.ShutDownObservatory()
#def do_shutdown(self):
# self.Telescope.Park()
# return True
#
#def check_shutdown(self):
# # if weather: AstroAlert.Weather
# # self.do_shutdown()
#def set_GuiderReverse_and_DEC(self):
# # --> Eventually, I could include GuiderReverseY
# # Tell MaxIm about pier flip and scope DEC manually in case we
# # are in ACP-mode.
# if self.ACP_mode:
# self.CCDCamera.GuiderReverseX \
# = (self.Telescope.SideOfPier
# == win32com.client.constants.pierEast)
# log.debug("GuiderReverseX set to " + repr(self.CCDCamera.GuiderReverseX))
# self.CCDCamera.GuiderDeclination = self.Telescope.Declination
# log.debug("Guider DEC set to " + repr(self.CCDCamera.GuiderDeclination))
def guider_cycle(self, n=1):
"""Returns average and RMS guider error magnitude after n guider cycles
Parameters
----------
n : int like
Number of guider cycles. Default = 1
norm : boolean
Return norm of guider error. Default False
"""
if not self.CCDCamera.GuiderRunning:
log.warning('Guider not running')
return None
this_norm = 0
running_total = 0
running_sq = 0
for i in range(n):
assert self.weather_server.Safe, ('Weather is not safe!')
# --> Need a timeout
while self.CCDCamera.GuiderNewMeasurement is False:
time.sleep(self.loop_sleep_time)
# As per MaxIm documentation, reading these clears
# GuiderNewMeasurement
this_norm = np.linalg.norm(
(self.CCDCamera.GuiderYError,
self.CCDCamera.GuiderXError))
running_total += this_norm
running_sq += this_norm**2
return (running_total/n, (running_sq/n)**0.5)
def guider_settle(self):
"""Wait for guider to settle"""
if not self.CCDCamera.GuiderRunning:
log.warning('Guider not running')
return False
start = time.time()
now = start
av = self.guider_settle_tolerance + 1
rms = av
while (rms > self.guider_settle_tolerance
and av > self.guider_settle_tolerance
and time.time() <= start + self.guider_max_settle_time):
if self.horizon_limit():
log.error('Horizon limit reached')
return False
av, rms = self.guider_cycle(self.guider_settle_cycle)
log.debug('guider AV, RMS = ' + str((av, rms)))
if time.time() > start + self.guider_max_settle_time:
log.warning('Guider failed to settle after ' + str(self.guider_max_settle_time) + 's')
return False
log.debug('GUIDER SETTLED TO ' + str(self.guider_settle_tolerance) + ' GUIDER PIXELS')
return True
def move_with_guide_box(self,
dra_ddec,
dec=None,
guider_astrometry=None):
"""Moves the telescope by moving the guide box. Guide box position is moved gradually relative to instantaneous guide box position, resulting in a delta move relative to any other guide box motion. NOTE: use guider_stop to make sure guider position is properly set after guider is stopped (see that function's documentation).
Parameters
----------
dra_ddec : tuple-like array
delta move in RA and DEC in DEGREES
guider_astrometry : filename, HDUList, or FITS header
Input method for providing an HDUList with WCS
parameters appropriate for the guider (mainly
CDELT*). Defaults to guider_astrometry property
"""
# --> Don't bother checking to see if we have commanded
if not self.CCDCamera.GuiderRunning:
log.error('Guider not running, move not performed')
return False
if guider_astrometry is None:
guider_astrometry = self.guider_astrometry
# Get the rough RA and DEC of our current ("old") guide box
# position. !!! Don't forget that pixel coordinates are
# in !!! TRANSPOSE !!!
op_coords = (self.CCDCamera.GuiderYStarPosition,
self.CCDCamera.GuiderXStarPosition)
D.say('old guidebox coords: ' + repr(op_coords[::-1]))
w_coords = self.scope_wcs(op_coords,
to_world=True,
astrometry=guider_astrometry)
D.say('world coords of old guidebox: ' + repr(w_coords))
# In world coords, we know how far we want to move our guide
# box. Calculate the new guidebox position
# --> I think this is where the MaxIm people and I differ by a minus
# sign in MaxIm 6.20
p_coords = self.scope_wcs(w_coords + dra_ddec,
to_pix=True,
astrometry=guider_astrometry)
D.say('New code p_coords: ' + repr(p_coords[::-1]))
# Now we are in pixel coordinates on the guider.
# Calculate how far we need to move.
# There is some implicit type casting here since op_coords
# is a tuple, but p_coords is an np.array
dp_coords = p_coords - op_coords
# Calculate the length in pixels of our move and the unit
# vector in that direction
norm_dp = np.linalg.norm(dp_coords)
uv = dp_coords / norm_dp
# Move the guide box slowly but have a threshold
if norm_dp < self.guider_settle_tolerance:
num_steps = 1
else:
# Guard against guide_box_steps_per_pix < 1 (fast moving)
num_steps = max((1,
int(self.guide_box_steps_per_pix * norm_dp)))
step_dp = dp_coords / num_steps
log.debug('move_with_guide_box: total guider dpix (X, Y): ' + str(dp_coords[::-1]))
log.debug('norm_dp: ' + str(norm_dp))
log.debug('Number of steps: ' + str(num_steps))
if num_steps > self.max_guide_num_steps:
# We can't do this, so just bomb with a False return
# and let the caller (usually center) handle it
log.error('Maximum number of steps (' + str(self.max_guide_num_steps) + ') exceeded: ' + str(num_steps))
return False
log.debug('Delta per step (X, Y): ' + str(step_dp[::-1]))
for istep in range(num_steps):
# Just in case someone else is commanding the guide
# box to move, use its instantaneous position as the
# starting point of our move !!! TRANSPOSE !!!
cp_coords = np.asarray((self.CCDCamera.GuiderYStarPosition,
self.CCDCamera.GuiderXStarPosition))
tp_coords = cp_coords + step_dp
log.debug('Setting to: ' + str(tp_coords[::-1]))
# !!! TRANSPOSE !!!
self.CCDCamera.GuiderMoveStar(tp_coords[1], tp_coords[0])
if self.horizon_limit():
log.error('Horizon limit reached')
return False
self.guider_settle()
## Give it a few extra cycles to make sure it has stuck
## (though even this might be too short)
#for i in range(self.guide_box_steps_per_pix):
# if self.check_guiding() is False:
# return False
return True
#def check_guiding(self):
# # --> the guider doesn't turn off when the star fades
# # --> This algorithm could use improvement with respect to
# # slowing itself down by looking at the guide errors, but
# # it works for now
# if self.guider_exptime is None:
# # If we didn't start the guider, take a guess at its
# # exposure time, since MaxIm doesn't give us that info
# exptime = default_guider_exptime
# else:
# exptime = self.guider_exptime
# # --> This needs to include the guide box read time or
# # else loop which uses it gets guide box position confused
# time.sleep(exptime*3)
# if self.CCDCamera.GuiderRunning:
# return True
# else:
# log.error('Guider stopped running while performing move')
# return False
def astrometry_pier_flip_state(self):
"""Return -1 if the telecsope is pier flipped relative to astrometry, 1 otherwise"""
if self.alignment_mode != win32com.client.constants.algGermanPolar:
return 1
log.debug('self.Telescope.SideOfPier: ' + repr(self.Telescope.SideOfPier))
if self.guider_cal_pierside == self.Telescope.SideOfPier:
log.debug('Telescope is on same side as astrometry')
return 1
log.debug('Telescope is on opposite side from astrometry')
return -1
def move_with_guider_slews(self,
dra_ddec,
guider_astrometry=None):
"""Moves the telescope using guider slews.
Parameters
----------
dra_ddec : tuple-like array delta move in RA and DEC in DEGREES
guider_astrometry : FITS header. Defaults to guider
astrometry in object
"""
log.debug('Preparing to move with guider slews by dRA, dDEC = ' + repr(dra_ddec))
if self.CCDCamera.GuiderRunning:
log.warning('Guider was running, turning off')
self.CCDCamera.GuiderStop
# Use our guide rates to change to time to press E/W, N/S.
# Note that we deal with sign below
dt = dra_ddec/self.guide_rates
# Do a sanity check to make sure we are not moving too much
max_t = (self.guider_max_move_multiplier *
np.asarray((self.CCDCamera.GuiderMaxMoveX,
self.CCDCamera.GuiderMaxMoveY)))
if np.any(np.abs(dt) > max_t):
log.warning('requested move of ' + str(dra_ddec) + ' arcsec translates into move times of ' + str(np.abs(dt)) + ' seconds. Limiting move in one or more axes to max t of ' + str(max_t))
dt = np.minimum(max_t, abs(dt)) * np.sign(dt)
# Or too little
bad_idx = np.where(np.abs(dt) <
np.asarray((self.CCDCamera.guiderMinMoveX,
self.CCDCamera.guiderMinMoveY)))
dt[bad_idx] = 0
log.debug('Seconds to move guider in RA and DEC: ' + str(dt))
# Now we need to translate this into how long to command MaxIm
# to operate the + or - X and Y motors. self.guide_rates gets
# us the right absolute time to push the buttons, but we have
# to work with the sign of the X and Y speed calibrations and
# any pier flips to get the directions right
XYsign = np.sign(np.asarray((self.CCDCamera.GuiderXSpeed,
self.CCDCamera.GuiderYSpeed)))
# First assume we are on the same side of the pier as the
# guider calibration. Recall from guider_motor_reverse_setup
# documentation that +RA is typically connected to the +X
# motor, but +RA is east, east is left on the sky and left is
# -X in a nominally oriented MaxIm camera. Hence the nominal
# GuiderXSpeed is negative for a properly connected +RA to +X
# motor. To have a +RA command the +X motor, we therefore
# need to negate the X part of XYsign. Similarly +DEC is
# connected to the +Y motor, but Y increases down in MaxIm, so
# GuiderYSpeed often comes out negative. Thus:
XYsign = -XYsign
# Now apply our pier flip, since after all that is said and
# done, MaxIm (or we) may be doing some motor reversals.
self.set_guider_motor_reverse_and_DEC()
dt *= XYsign * self.guider_motor_reverse_state()
log.debug('Seconds to command MaxIm to move X and Y guider motors, where +X connects to +RA (nominally -X on the CCD) and +Y connects to +DEC (nominally -Y on the CCD): ' + str(dt))
if dt[0] > 0:
RA_success = self.CCDCamera.GuiderMove(win32com.client.constants.gdPlusX, dt[0])
elif dt[0] < 0:
RA_success = self.CCDCamera.GuiderMove(win32com.client.constants.gdMinusX, -dt[0])
else:
# No need to move
RA_success = True
if not RA_success:
raise EnvironmentError('RA guide slew command failed')
# MaxIm seems to be able to press RA and DEC buttons
# simultaneously, but we can't!
while self.CCDCamera.GuiderMoving:
assert self.weather_server.Safe, ('Weather is not safe!')
time.sleep(0.1)
if dt[1] > 0:
DEC_success = self.CCDCamera.GuiderMove(win32com.client.constants.gdPlusY, dt[1])
elif dt[1] < 0:
DEC_success = self.CCDCamera.GuiderMove(win32com.client.constants.gdMinusY, -dt[1])
else:
# No need to move
DEC_success = True
if not DEC_success:
raise EnvironmentError('DEC guide slew command failed')
while self.CCDCamera.GuiderMoving:
assert self.weather_server.Safe, ('Weather is not safe!')
time.sleep(0.1)
def scope_wcs(self,
coords_in,
to_world=False,
to_pix=False,
astrometry=None,
absolute=False,
delta=False):
"""Computes WCS coordinate transformations to/from UNBINNED PIXELS, using scope coordinates if necessary
Parameters
----------
coords_in : tuple-like array
(List of) coordinates to transform. Pixel coordinates
are in Y, X order, UNBINNED. World coordinates are in
RA, DEC order
to_world : Boolean
perform pix to world transformation
to_pix : Boolean
perform world to pix transformation
astrometry : scope name, filename, HDUList, or FITS header
Input method for providing an HDUList with WCS
parameters appropriate for the CCD being used (mainly
CDELT*). If scope name provided ("main" or "guide"),
the appropriate run level default file will be used.
Can also be a FITS filename or HDUList object.
Default: "main." If astrometry image was taken with
binned pixels, the header keys will be adjusted so the
WCS transformations will be to/from unbinned pixels
absolute : ignore scope position and use position from astrometry
delta : assume coords_in are delta from center of CCD
"""
coords_in = np.asarray(coords_in)
if coords_in.shape[-1] != 2:
raise ValueError('coordinates must be specified in pairs')
if to_world + to_pix != 1:
raise ValueError('Specify one of to_world or to_pix')
# Set up our astrometry
we_opened_file = False
if astrometry is None:
astrometry = 'main'
if isinstance(astrometry, str):
if astrometry.lower() == 'main':
astrometry = self.main_astrometry
elif astrometry.lower() == 'guide':
astrometry = self.guider_astrometry
if isinstance(astrometry, str):
if not os.path.isfile(astrometry):
raise ValueError(astrometry + ' file not found')
# If we made it here, we have a file to open to get
# our astrometry from. Opening it puts the header
# into a dictionary we can access at any time
astrometry = fits.open(astrometry)
we_opened_file = True
if isinstance(astrometry, fits.HDUList):
astrometry = astrometry[0].header
if not isinstance(astrometry, fits.Header):
raise ValueError('astrometry must be a string, FITS HDUList, or FITS header')
# Make sure we don't mess up original
header = astrometry.copy()
if we_opened_file:
astrometry.close()
if header.get('CTYPE1') is None:
raise ValueError('astrometry header does not contain a FITS header with valid WCS keys.')
if absolute:
# In the case of absolute astrometry, we don't have to
# mess with the astrometry pointing keyword or pier flip
# relative to our main or guider reference astrometry images
pier_flip_sign = 1
else:
# The non-absolute case means we are going to use the
# scope's rough RA and DEC to fix the center of the FOV.
# This is most useful for relative astrometry between two
# points in the the image
try:
RA = self.Telescope.RightAscension
DEC = self.Telescope.Declination
except:
# If, for some reason the telescope doesn't report
# its RA and DEC, we can use the DEC reported by
# the user in the Scope Dec. box of the Guide tab,
# since DEC is really all we care about for the
# cosine effect in calculating deltas
RA = 0
DEC = self.CCDCamera.GuiderDeclination
log.warning('Telescope is not reporting RA and/or DEC. Setting RA = ' + str(RA) + ' and DEC = ' + str(DEC) + ', which was read from the Scope Dec. box of the Guide tab.')
# Check to see if we are pointed on the other side of the
# mount from our astrometry images
# --> this is going to change
if self.astrometry_pier_flip_state() == -1:
header = pier_flip_astrometry(header)
# Make sure RA is on correct axis and in the correct units
if 'RA' in header['CTYPE1']:
header['CRVAL1'] = RA / 24*360
header['CRVAL2'] = DEC
elif 'DEC' in header['CTYPE1']:
header['CRVAL2'] = RA / 24*360
header['CRVAL1'] = DEC
# Fix binning and subframing. More pixels to the center, but
# they are smaller. Also apply pier_flip_sign
header['CRPIX1'] = header['XBINNING'] * header['CRPIX1'] + header['XORGSUBF']
header['CRPIX2'] = header['YBINNING'] * header['CRPIX2'] + header['YORGSUBF']
header['CDELT1'] /= header['XBINNING']
header['CDELT2'] /= header['YBINNING']
header['CD1_1'] /= header['XBINNING']
header['CD1_2'] /= header['YBINNING']
header['CD2_1'] /= header['XBINNING']
header['CD2_2'] /= header['YBINNING']
# Put our binning and subframe to unbinned values so we
# don't tweak things again!
header['XORGSUBF'] = 0
header['YORGSUBF'] = 0
header['XBINNING'] = 1
header['YBINNING'] = 1
header['FLIPAPPL'] = (True, 'Applied pier_flip_sign')
header['HISTORY'] = 'Modified CRPIX*, CD*, XORG*, and *BINNING keywords'
# Do our desired transformations, only the WCS parts, not
# distortions, since I haven't mucked with those parameters
w = wcs.WCS(header)
if to_world:
# Our pix coords are in Y, X order. Transpose using
# negative striding. Use the Ellipsis trick to get
# to the last coordinate, which is, in a row major
# language, where the coordinate into the pairs
# resides (would be in the first coordinate in a
# column major language)
# https://stackoverflow.com/questions/12116830/numpy-slice-of-arbitrary-dimensions
coords = coords_in[..., ::-1]
if delta:
# We have left transpose space
c0 = np.asarray((header['CRPIX1'], header['CRPIX2']))
#log.debug('coords before: ' + str(coords))
coords += c0.astype(float)
#log.debug('coords after: ' + str(coords))
# Decide to leave in RA DEC, since we are no longer in
# our image when we are RA and DEC
# The 0 is because we number our pixels from 0, unlike
# FORTRAN which does so from 1
# ACK! The WCS package is not smart about checking
# for single tuple input, so I have to <sigh>
if coords.size == 2:
w_coords = w.wcs_pix2world(coords[0], coords[1], 0)
else:
w_coords = w.wcs_pix2world(coords, 0)
if delta:
w0_coords = w.wcs_pix2world(c0[0], c0[1], 0)
#log.debug('w_coords before: ' + str(w_coords))
w_coords = (np.asarray(w_coords)
- np.asarray(w0_coords))
#log.debug('w_coords after: ' + str(w_coords))
# for debugging purposes
#coords -= c0
#log.debug('coords_in[..., ::-1]: ' + str(coords))
#log.debug('plate scale: ' +
# str(3600*w_coords/coords))
return w_coords
if to_pix:
# --> This might need to be fixed
if delta:
coords_in += np.asarray((header['CRVAL1'],
header['CRVAL2']))
if coords_in.size == 2:
pix = np.asarray(
w.wcs_world2pix(coords_in[0], coords_in[1], 0))
else:
pix = w.wcs_world2pix(coords_in, 0)
if delta:
# Note we have yet to leave transpose space
pix -= np.asarray((header['CRPIX1'], header['CRPIX2']))
# Put out pix back into Y, X order, UNBINNED
return pix[..., ::-1]
def rot(self, vec, theta):
"""Rotates vector counterclockwise by theta degrees"""
np.asarray(vec)
theta = np.radians(theta)
c, s =
|
np.cos(theta)
|
numpy.cos
|
import copy
import pickle
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import box_utils, common_utils
from ..dataset import DatasetTemplate
class PlusAIMultiframeDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.split = self.dataset_cfg.DATA_SPLIT[self.mode]
print('root_path: ', self.root_path.resolve())
self.root_split_path = self.root_path
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
# TODO: should read from dataset_cfg
self.stack_frame_size = 3
self.base_frame_idx = 1
self.plusai_infos = []
self.include_plusai_data(self.mode)
def include_plusai_data(self, mode):
if self.logger is not None:
self.logger.info('Loading PlusAI dataset ...')
plusai_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
continue
with open(info_path, 'rb') as f:
infos = pickle.load(f)
plusai_infos.extend(infos)
self.plusai_infos.extend(plusai_infos)
if self.logger is not None:
self.logger.info('Total samples for PlusAI dataset: %d' % (len(plusai_infos)))
def set_split(self, split):
super().__init__(
dataset_cfg=self.dataset_cfg,
class_names=self.class_names,
training=self.training,
root_path=self.root_path,
logger=self.logger
)
self.split = split
self.root_split_path = self.root_path
split_dir = self.root_path / 'ImageSets' / (self.split + '.txt')
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None
def get_lidar(self, idx):
lidar_file = self.root_split_path / idx
assert lidar_file.exists()
lidar_data = np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 5)
return lidar_data
def get_label(self, idx):
[scene_name, _, frame] = idx.split('/')
label_file = self.root_split_path / scene_name / 'label' / (frame[:-4] + '.pkl')
try:
assert label_file.exists()
except AssertionError:
print('[ERROR] get label failed:', label_file)
with open(label_file, 'rb') as f:
labels = pickle.load(f)
return labels
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
# print('%s sample_idx: %s' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 5, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': np.array([1920, 1080])}
info['image'] = image_info
calib_info = {'P2': np.eye(4), 'R0_rect': np.eye(4), 'Tr_velo_to_cam': np.eye(4)}
info['calib'] = calib_info
if has_label:
obj_labels = self.get_label(sample_idx)
obj_labels = obj_labels['obstacles']
annotations = {}
if len(obj_labels) > 0:
annotations['name'] = np.array([label[self.base_frame_idx]['class'] for label in obj_labels])
annotations['truncated'] = np.array([0 for label in obj_labels])
annotations['occluded'] = np.array([0 for label in obj_labels])
annotations['alpha'] = np.array([0 for label in obj_labels])
annotations['bbox'] = np.array([[1, 1, 1, 1] for label in obj_labels])
annotations['dimensions'] = np.array([label[self.base_frame_idx]['size'] for label in obj_labels]) # lwh(lidar) format
annotations['location'] = np.array([label[self.base_frame_idx]['location'] for label in obj_labels])
annotations['rotation_y'] = np.array([label[self.base_frame_idx]['heading'] for label in obj_labels])
annotations['score'] = np.array([1 for label in obj_labels])
annotations['difficulty'] = np.array([0 for label in obj_labels], np.int32)
# multi-frame data
annotations['locations'] = np.array([[label['location'] for label in obj] for obj in obj_labels])
annotations['rotations_y'] = np.array([[label['heading'] for label in obj] for obj in obj_labels])
annotations['velocities'] = np.array([[label['velocity'] for label in obj] for obj in obj_labels])
# num_objects = len([label['name'] for label in obj_labels if label['name'] != 'DontCare'])
num_objects = len([name for name in annotations['name'] if name != 'DontCare'])
num_gt = len(annotations['name'])
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] =
|
np.array(index, dtype=np.int32)
|
numpy.array
|
#!/usr/bin/env python
"""
Python implementation of common model fitting operations to
analyse protein folding data. Simply automates some fitting
and value calculation. Will be extended to include phi-value
analysis and other common calculations.
Allows for quick model evaluation and plotting.
Also tried to make this somewhat abstract and modular to
enable more interesting calculations, such as Ising models
and such.
Requirements (recommended python 2.7+):
- numpy
- scipy
- matplotlib
Lowe, A.R. 2015
"""
import sys
import inspect
import numpy as np
import scipy as sp
from . import core
from . import constants
__author__ = "<NAME>"
__email__ = "<EMAIL>"
def list_models():
""" List the kinetic of equilibrium models defined in this module.
Returns a list of the names of the models, whose parent class is
FitModel.
"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
verif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)
fit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]
return fit_models
class TemplateModel(core.FitModel):
""" A template model for expansion
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([])
def fit_func(self, x):
raise NotImplementedError
@property
def equation(self):
return r'F=f(x)'
# F = \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}
"""
==========================================================
EQUILIBRIUM FOLDING models
==========================================================
"""
class TwoStateEquilibrium(core.FitModel):
""" Two state equilibrium denaturation curve - No sloping baseline.
Folding Scheme:
N <-> D
Params:
F = Fraction unfolded
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Clarke and Fersht. Engineered disulfide bonds as probes of
the folding pathway of barnase: Increasing the stability
of proteins against the rate of denaturation.
Biochemistry (1993) vol. 32 (16) pp. 4322-4329
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1.5, 5.])
self.verified = True
def fit_func(self, x, m, d50):
F = ( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT))
return F
@property
def equation(self):
return r'F = \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}'
class TwoStateEquilibriumSloping(core.FitModel):
""" Two state equilibrium denaturation curve - Sloping baseline.
Folding Scheme:
N <-> D
Params:
F = Fraction unfolded
alpha f = intercept of the native baseline at low denaturation concentrations
beta f = slope/gradient of the native baseline at low denaturation concentrations
alpha u = intercept of the denatured baseline at high denaturation concentrations
beta u = slope/gradient of the denatured baseline at high denaturation concentrations
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Clarke and Fersht. Engineered disulfide bonds as probes of
the folding pathway of barnase: Increasing the stability
of proteins against the rate of denaturation.
Biochemistry (1993) vol. 32 (16) pp. 4322-4329
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5.])
self.verified = True
def fit_func(self, x, alpha_f, beta_f, alpha_u, beta_u, m, d50):
F = (alpha_f+beta_f*x) + (alpha_u+beta_u*x) * (\
( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT)))
return F
@property
def equation(self):
return r'F = (\alpha_f+\beta_f x) + (\alpha_u+\beta_u x) \cdot \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}'
# NOTE (ergm) added on 30/8/2017 and corrected incorrect asscii for running on PC 8/9/2017
class ThreeStateEquilibrium (core.FitModel):
""" Three state equilbrium denaturation curve.
Folding Scheme:
N <-> I <-> D
Params:
Y_obs = The spectroscopic signal maximum as a function of denaturant concentration
Y_N = spectroscopic signals of the native state
Y_D = spectroscopic signals of the denatured state
F_D = fraction denatured
F_N = fraction native
F_I = fraction intermediate
Kni = equilibrium contstant of unfolding native to intermediate state
Kid = equilibrium contstant of unfolding intermediate to denatured state
DGni = stability of native state relative to intermediate state
m_ni = m-value of native to intermediate transition
DGid = stability of intermediate state relative to denatured state
m_id = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
<NAME>, <NAME>. Structural Perturbation and Compensation by Directed
Evolution at Physiological Temperature Leads to Thermostabilization of
beta-Lactamase. (2005) Biochemistry 44. pp. 12640-12654
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.5, 0.0, 5., 1.5, 5., 1])
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, Y_N, Y_I, Y_D, DGni, m_ni, DGid, m_id):
F = (Y_N + Y_I*np.exp((-DGni + m_ni*x)/core.temperature.RT) + Y_D*np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT)) \
/ (1 + np.exp((-DGni + m_ni*x)/core.temperature.RT) + np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT))
return F
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{obs} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \ \\ \
\text{where:} \\ \
& F_N = \frac{1} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
& F_I = \frac{K_{NI}} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
& F_D = \frac{K_{NI} K_{ID}} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
\text{and:} \\ \
& K_{NI} = \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT}\\ \
& K_{ID} = \exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}\\ \
\\ \
\text{thus:} \\ \
& \Upsilon_{obs} = \frac{ \Upsilon_N + \Upsilon_I \exp \frac {\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \
\Upsilon_D \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \cdot \exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}} {1 + \exp \
\frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \cdot \
\exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
class TwoStateDimerEquilibrium(core.FitModel):
""" Two State model for a dimer denaturation Equilibrium - No Intermediate.
Folding Scheme:
N2 <-> 2D
Params:
Y_obs = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native monomeric subunits at a concentration of Pt
Y_D = spectroscopic signal for denatured monomeric subunits at a concentration of Pt
alpha_N = intercept of the native baseline at low denaturation concentrations
beta_N = slope/gradient of the native baseline at low denaturation concentrations
alpha_D = intercept of the denatured baseline at high denaturation concentrations
beta_D = slope/gradient of the denatured baseline at high denaturation concentrations
F_D = fraction of unfolded monomers
K_U = Equilibrium Constant for Unfolding of dimer.
Pt = total protein concentration. This variable needs to be set per denaturation curve.
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Mallam and Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
# NOTE (ergm) added on 25/8/2017
def fit_func(self, x, alpha_N, beta_N, alpha_D, beta_D, m, d50, Pt):
K_U = np.exp(((core.temperature.RT * np.log(Pt))-m*(d50-x)) / core.temperature.RT)
F_D = (np.sqrt((np.square(K_U) + (8 * K_U * Pt))) - K_U) / (4*Pt)
Y_0 = ((alpha_N + beta_N*x)*(1-F_D)) + ((alpha_D + beta_D*x)*(F_D))
return Y_0
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{obs} = \Upsilon_N \cdot (1-F_D) + \Upsilon_D \cdot F_D \\ \
\text{where} \\ \
& \Upsilon_N = \alpha_N+\beta_N x \\ \
& \Upsilon_D = \alpha_D+\beta_D x \\ \
& F_D = \frac{\sqrt{((K_U^2 + (8 K_U Pt)) - K_U}} {4 Pt} \\ \
& K_U = \exp \frac{(RT \ln(Pt - m(d_{50} - x))} {RT}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
# NOTE (ergm) updated Folding Scheme - was wrong 7/9/2017
class ThreeStateMonoIEquilibrium(core.FitModel):
""" Three State model for a dimer denaturation Equilibrium - Monomeric intermediate.
Folding Scheme:
N2 <-> 2I <-> 2D
Params:
Y_rel = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native state
Y_D = spectroscopic signal for denatured state
Y_I = spectroscopic signal for intermediate state
F_D = fraction denatured monomers
F_N = fraction native dimers
F_I = fraction intermediate dimers
Pt = total protein concentration. This variable needs to be set per denaturation curve.
K1 = equilibrium constant of unfolding for native to intermediate state
K2 = equilibrium constant of unfolding for intermediate to denatured state
DG1 = stability of native state relative to intermediate state
m1 = m-value of native to intermediate transition
DG2 = stability of intermediate state relative to denatured state
m2 = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
<NAME> Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 1.0, 0.1, 1.5, 5., 3., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):
K1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)
K2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)
F_I = -(K1*(1+K2) + (np.sqrt(np.square(K1) * np.square(1+K2) +(8*Pt*K1)))) / (4*Pt)
Y_rel = (Y_N * ((2 * Pt * np.square(F_I))/K1)) + (Y_I * F_I) + (Y_D * (K2*F_I))
return Y_rel
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{rel} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \\ \
\text{expanded:} \\ \
& \Upsilon_{rel} = \Upsilon_N \cdot \frac{2PtF_I^2} {K_1} + \Upsilon_I F_I + \Upsilon_D * K_2F_I \\ \
\\ \
\text{where:} \\ \
& F_I = \frac {- K_1 (1+K_2) + \sqrt{(K_1^2 (1+K_2)^2 + (8 Pt K_1))}} {4Pt} \\ \
& K_1 = \exp \frac{-\Delta G_{H_20}^1 + m_1 x} {RT} \\ \
& K_2 = \exp \frac{-\Delta G_{H_20}^2 + m_2 x} {RT}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
# NOTE (ergm) updated Folding Scheme - was wrong 7/9/2017
class ThreeStateDimericIEquilibrium(core.FitModel):
""" Three State model for a dimer denaturation Equilibrium - Dimeric Intermediate.
Folding Scheme:
N2 <-> I2 <-> 2D
Params:
Y_rel = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native state
Y_D = spectroscopic signal for denatured state
Y_I = spectroscopic signal for intermediate state
F_D = fraction denatured monomers
F_N = fraction native dimers
F_I = fraction intermediate dimers
Pt = total protein concentration. This variable needs to be set per denaturation curve.
K1 = equilibrium contstant of unfolding native to intermediate state
K2 = equilibrium contstant of unfolding intermediate to denatured state
DG1 = stability of native state relative to intermediate state
m1 = m-value of native to intermediate transition
DG2 = stability of intermediate state relative to denatured state
m2 = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Mallam and Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 2., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):
K1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)
K2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)
F_D = (-(K1*K2) + np.sqrt(np.square(K1*K2) + 8*(1+K1)*(K1*K2)*Pt)) / (4*Pt*(1+K1))
Y_rel = (Y_N * ((2 * Pt * np.square(F_D))/(K1*K2))) + (Y_I * ((2 * Pt * np.square(F_D))/K2)) + (Y_D * F_D)
return Y_rel
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{rel} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \\ \
\text{expanded:} \\ \
& \Upsilon_{rel} = \Upsilon_N \cdot \frac{2PtF_D^2} {K_1 K_2} + \Upsilon_I \frac{2PtF_D^2} {K_2} + \Upsilon_D * (F_D) \\ \
\\ \
\text{where:} \\ \
& F_D = \frac {- K_1 K_2 + \sqrt{((K_1 K_2)^2 + 8(1+K_1)(K_1 K_2)Pt)}} {4Pt (1 + K_1)} \\ \
& K_1 = \exp \frac{-\Delta G_{H_20}^1 + m_1 x} {RT} \\ \
& K_2 = \exp \frac{-\Delta G_{H_20}^2 + m_2 x} {RT}\
\end{aligned}\
\end{equation}'
class HomozipperIsingEquilibrium(core.FitModel):
""" Homopolymer Zipper Ising model
Params:
q = partition function
f = fraction of folded protein
Kappa = equilibrium constant of folding for a given repeating unit
Tau = equilibrium constant of association between 2 repeating units
n = number of repeating units
x = denaturant concentration (M)
Gi = intrinsic stability (folding energy) of a repeating unit i
mi = denaturant sensitivity of the intrinsic stability of a repeating unit i
Gi,i+1 = interface interaction energy between 2 repeating units
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Aksel and Barrick. Analysis of repeat-protein folding using
nearest-neighbor statistical mechanical models.
Methods in enzymology (2009) vol. 455 pp. 95-125
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([7, 0.1, -.53, -4.6])
self.constants = (('n',7),)
self.verified = True
def fit_func(self, x, n, DG_intrinsic, m_intrinsic, DG_interface):
# # clamp to prevent instability
# if DG_intrinsic<0. or DG_interface>0.:
# return core.FIT_ERROR(x)
k = np.exp(-(DG_intrinsic - m_intrinsic*x) / core.temperature.RT )
#t = np.exp(-(DG_interface - m_interface*x) / core.temperature.RT )
t = np.exp(-(DG_interface) / core.temperature.RT )
pre_factor = (k/(n*(k*t-1)))
numerator = n*(k*t)**(n+2) - (n+2)*(k*t)**(n+1) + (n+2)*k*t-n
denominator = (k*t-1)**2 + k*((k*t)**(n+1) - (n+1)*k*t+n )
theta = pre_factor * (numerator / denominator)
return 1.-theta
# NOTE (ergm) changed on 4/9/2017
@property
def equation(self):
return r'\text{the partition function } (q) \text{ and thus fraction of folded protein } (f) \text{ of n arrayed repeats are given by:}\\ \
\begin{equation} \\ \
\begin{aligned} \
& q = 1 + \frac{\kappa([\kappa \tau]^{n+1} - [n+1]\kappa \tau - n)} {(\kappa \tau + 1)^2} \\ \
\\ \
& f = \frac{1} {n} \sum^{n}_{i=0}i\frac{(n-i+1)\kappa^i\tau^{i-1}} {q} \\ \
\\ \
\text{where:} \\ \
& \kappa (x) = \exp\frac{-G_i} {RT} = \exp\frac{-G_{i,H_20} + m_i x} {RT} \\ \
\\ \
& \tau (x) = \exp\frac{-G_{i,i+1}} {RT} \
\end{aligned}\
\end{equation}'
class HeteropolymerIsingEquilibrium(core.FitModel):
""" Heteropolymer Ising model
Params:
q = partition function
f = fraction of folded protein
Kappa = equilibrium constant of folding for a given repeating unit
Tau = equilibrium constant of association between 2 repeating units
n = number of repeating units
x = denaturant concentration (M)
DG_intrinsic = intrinsic stability (folding energy) of a repeating unit i
m_intrinsic = denaturant sensitivity of the intrinsic stability of a repeating unit i
DG_interface = interface interaction energy between 2 repeating units
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Aksel and Barrick. Analysis of repeat-protein folding using
nearest-neighbor statistical mechanical models.
Methods in enzymology (2009) vol. 455 pp. 95-125
"""
def __init__(self):
core.FitModel.__init__(self)
def fit_func(self, x):
raise NotImplementedError('This is a dummy model.')
# NOTE (ergm) changed on 4/9/2017
@property
def equation(self):
return r'\text{the partition function } (q) \text{ and thus fraction of folded protein } (f) \text{ of n arrayed repeats are given by:} \\ \
\begin{equation} \\ \
\begin{aligned} \\ \
\kappa(x) &= \exp(-(\Delta G_{intrinsic} - m_{intrinsic}x) / RT) \\ \
\tau(x) &= \exp(-\Delta G_{interface}) / RT) \\ \
q(i) &= \
\begin{bmatrix} 0 & 1\end{bmatrix} \
\begin{bmatrix} \kappa_1\tau_{-1} & 1\\ \kappa & 1 \end{bmatrix} \
\ldots \
\begin{bmatrix} \kappa_n\tau_{n-1} & 1\\ \kappa & 1 \end{bmatrix} \
\begin{bmatrix} 1 \\ 1 \end{bmatrix} \\ \
\theta &= \frac{1}{nq(n)} \sum_{i=0}^{n}{q(i)} \
\end{aligned} \
\end{equation}'
"""
==========================================================
KINETIC FOLDING models
==========================================================
"""
class TwoStateChevron(core.FitModel):
""" Two state chevron plot.
Folding Scheme:
N <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kf = rate constant of refolding at a particular denaturant concentration
mf = the gradient of refolding arm of the chevron
ku = rate constant of unfolding at a a particular denaturant concentration
mu = the gradient of unfolding arm of the chevron
x = denaturant concentration (M)
Reference:
Jackson SE and Fersht AR. Folding of chymotrypsin inhibitor 2.
1. Evidence for a two-state transition.
Biochemistry (1991) 30(43):10428-10435.
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([100., 1.3480, 5e-4, 1.])
#self.constants = (('mf',1.76408),('mu',1.13725))
self.verified = True
def fit_func(self, x, kf, mf, ku, mu):
k_obs = kf*np.exp(-mf*x) + ku*np.exp(mu*x)
return k_obs
def error_func(self, y):
return np.log(y)
# NOTE (ergm) added on 24/8/2017
# def components(self, x, kf, mf, ku, mu):
# k_f = kf*np.exp(-mf*x)
# k_u = ku*np.exp(mu*x)
# k_obs = k_f + k_u
# return {'k_f':k_f, 'k_u':k_u}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_f + k_u \\ \
\\ \
\text{where:} \\ \
& k_f = k_f^{H_2O}\exp(-m_{kf}x)\\ \
& k_u = k_u^{H_2O}\exp(m_{ku}x) \\ \
\text{thus:} \\ \
& k_{obs} = k_f^{H_2O}\exp(-m_{kf}x) + k_u^{H_2O}\exp(m_{ku}x)\\ \
\end{aligned} \
\end{equation}'
class ThreeStateChevron(core.FitModel):
""" Three state chevron with single intermediate.
Folding Scheme:
N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
i.e. k_if = kif(H20) * exp((mi - mif)*x)
Kiu = equilibrium constant for the rapid equilibration between intermediate & unfolded
i.e. Kiu = Kiu(H2O) * exp((mu-mi)*x)
mif = m-value associated with the kinetic transition between intermediate & folded
mi = m-value associated with the equilibrium transition between intermediate & folded
mu = m-value associated with the equilibrium transition between unfolded & folded
x = denaturant concentration (M)
Reference:
Parker et al. An integrated kinetic analysis of
intermediates and transition states in protein folding reactions.
Journal of molecular biology (1995) vol. 253 (5) pp. 771-86
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([4.5e-4, -9.5e-1, 1.3e9, -6.9, 1.4e-8, -1.6])
#self.constants = (('mif',-0.97996),('mi',-6.00355),('mu',-1.66154))
self.verified = True
def fit_func(self, x, kfi, mif, kif, mi, Kiu, mu):
k_fi = kfi*np.exp(-mif*x)
k_if = kif*np.exp((mi - mif)*x)
K_iu = Kiu*np.exp((mu - mi)*x)
k_obs = k_fi + k_if / (1.+1./K_iu)
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kfi, mif, kif, mi, Kiu, mu):
k_fi = kfi*np.exp(-mif*x)
k_if = kif*np.exp((mi - mif)*x)
k_obs_I = k_fi + k_if
return {'kobs_I':k_obs_I}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = \frac{k_{fi} + k_{if}} {(1+1/K_{iu})} \\ \
\\ \
\text{where:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(-m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp((m_i - m_{if})x)\\ \
& K_{iu} = K_{iu}^{H_2O}\exp((m_u - m_i)x)\\ \
\text{thus:} \\ \
& k_{obs} = k_{fi}^{H_2O}\exp(-m_{if}x) + k_{if}^{H_2O}\exp((m_i - m_{if})x) /(1 + 1 / (K_{iu}^{H_2O}\exp((m_u-m_i)x)))\\ \
\end{aligned} \
\end{equation}'
class ThreeStateFastPhaseChevron(core.FitModel):
""" Three state chevron with single intermediate.
Folding Scheme: N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
kiu = microscopic rate constant for the conversion of intermediate to unfolded
kui = microscopic rate constant for the conversion of unfolded to intermediate
Kiu = equilibrium constant for the rapid equilibration between intermediate & unfolded
mfi = m-value associated with the kinetic transition between folded & intermediate
mif = m-value associated with the kinetic transition between intermediate & folded
miu = m-value associated with the kinetic transition between intermediate & unfolded
mui = m-value associated with the kinetic transition between unfolded & intermediate
x = denaturant concentration (M)
Reference:
Parker et al. An integrated kinetic analysis of
intermediates and transition states in protein folding reactions.
Journal of molecular biology (1995) vol. 253 (5) pp. 771-86
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([172., 1.42, .445, .641, 1e4, 2.71313, 1.83e-3, 1.06])
#self.constants = (('kui',172.), ('mui',1.42), ('kiu',.445), ('miu',.641), ('mif',-2.71313),('mfi',1.06534))
self.verified = True
def fit_func(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_iu = kiu*np.exp(miu*x)
k_ui = kui*np.exp(-mui*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
K_iu = k_iu / (k_iu+k_ui)
k_obs = k_fi + k_if / (1.+1./K_iu)
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_iu = kiu*np.exp(miu*x)
k_ui = kui*np.exp(-mui*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
k_obs_I = k_iu + k_ui
k_obs_N = k_fi + k_if
return {'kobs_I':k_obs_I} #, 'kobs_N':k_obs_N}
# NOTE (ergm) added on 23/8/2017
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = \frac{k_{fi} + k_{if}} {(1+1/K_{iu})} \\ \
\\ \
\text{where:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp(-m_{if}x)\\ \
& k_{iu} = k_{iu}^{H_2O}\exp(m_{iu}x)\\ \
& k_{ui} = k_{ui}^{H_2O}\exp(-m_{ui}x)\\ \
& K_{iu} = \frac{k_{iu}} {k_{iu} + k_{ui}}\\ \
\end{aligned} \
\end{equation}'
class ThreeStateSequentialChevron(core.FitModel):
""" Three state metastable intermediate chevron plot.
Folding Scheme: N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
kiu = microscopic rate constant for the conversion of intermediate to unfolded
kui = microscopic rate constant for the conversion of unfolded to intermediate
mfi = m-value associated with the kinetic transition between folded & intermediate
mif = m-value associated with the kinetic transition between intermediate & folded
miu = m-value associated with the kinetic transition between intermediate & unfolded
mui = m-value associated with the kinetic transition between unfolded & intermediate
x = denaturant concentration (M)
Reference:
Bachmann and Kiefhaber. Apparent two-state tendamistat
folding is a sequential process along a defined route.
J Mol Biol (2001) vol. 306 (2) pp. 375-386
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([2e4, 0.3480, 1e4, 0, 20.163, 1.327, 0.3033, 0.2431])
# NOTE (ergm) changed constants on 3/10/2017
self.constants = (('kiu', 1.e4),('miu',0.))
self.verified = True
def fit_func(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_ui = kui*np.exp(-mui*x)
k_iu = kiu*np.exp(miu*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
lam_1 = -(k_ui + k_iu + k_if + k_fi)
lam_2 = k_ui * (k_if+k_fi) + k_iu*k_fi
k_obs = 0.5 * (-lam_1 - np.sqrt(lam_1**2 - 4*lam_2))
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_ui = kui*np.exp(-mui*x)
k_iu = kiu*np.exp(miu*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
k_TS1 = k_ui + (k_fi/kif)*k_iu
k_TS2 = (k_ui/k_iu)*k_if + k_fi
return {'kTS1':k_TS1, 'kTS2':k_TS2}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = 0.5(-A_2 \pm \sqrt{A_2^2 - 4A_1}) \\ \
\\ \
\text{where:}\\ \
& A_1 = -(k_{ui} + k_{iu} + k_{if} + k_{fi}) \\ \
& A_2 = k_{ui}(k_{if} + k_{fi}) + k_{iu}k_{if} \\ \
\text{and:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp(-m_{if}x)\\ \
& k_{iu} = k_{iu}^{H_2O}\exp(m_{iu}x)\\ \
& k_{ui} = k_{ui}^{H_2O}\exp(-m_{ui}x)\\ \
\end{aligned} \
\end{equation}'
class ParallelTwoStateChevron(core.FitModel):
""" Parallel Two state chevron plot.
Folding Scheme:
N <-> D
^ ^
|_____|
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
k_obs_A = rate constant of unfolding or refolding of pathway A at a particular denaturant concentration
k_obs_B = rate constant of unfolding or refolding of pathway B at a particular denaturant concentration
mf_A = the gradient of refolding arm of pathway A
mf_B = the gradient of refolding arm of pathway B
mu_A = the gradient of unfolding arm of pathway A
mu_B = the gradient of unfolding arm of pathway B
x = denaturant concentration (M)
Reference:
Lowe & Itzhaki. Rational redesign of the folding pathway of a modular protein.
PNAS (2007) vol. 104 (8) pp. 2679-2684
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([50., 1.3480, 5e-4, 1., 150., 3.5])
def fit_func(self, x, kf_A, mf_A, ku_A, mu_A, kf_B, mf_B):
if mf_A < 0. or mf_B < 0. or mu_A < 0.:
return core.FIT_ERROR(x)
if kf_A <0. or ku_A <0. or kf_B < 0.:
return core.FIT_ERROR(x)
deltaG_A = kf_A / ku_A
ku_B = kf_B / deltaG_A
mu_B = np.abs(mf_A + mu_A) - np.abs(mf_B)
k_obs_A = kf_A*np.exp(-mf_A*x) + ku_A*np.exp(mu_A*x)
k_obs_B = kf_B*
|
np.exp(-mf_B*x)
|
numpy.exp
|
# encoding: utf-8
"""
A script for running the following zero-shot domain transfer experiments:
* dataset: Overnight
* model: BART encoder + vanilla Transformer decoder for LF
* lexical token representations are computed based on lexicon
* training: normal (CE on teacher forced target)
"""
import faulthandler
import itertools
import json
import math
import random
import string
from copy import deepcopy
from functools import partial
from typing import Callable, Set
import fire
# import wandb
import qelos as q # branch v3
import numpy as np
import torch
from nltk import Tree
from torch.utils.data import DataLoader
from parseq.datasets import OvernightDatasetLoader, pad_and_default_collate, autocollate, Dataset
from parseq.decoding import merge_metric_dicts
from parseq.eval import SeqAccuracies, TreeAccuracy, make_array_of_metrics, CELoss
from parseq.grammar import tree_to_lisp_tokens, lisp_to_tree
from parseq.vocab import SequenceEncoder, Vocab
from transformers import AutoTokenizer, AutoModel, BartConfig, BartModel, BartForConditionalGeneration, BertLayer, \
BertModel
from transformers.activations import ACT2FN
from transformers.modeling_bart import SinusoidalPositionalEmbedding, DecoderLayer, SelfAttention, LayerNorm
UNKID = 3
DATA_RESTORE_REVERSE = False
def get_labels_from_tree(x:Tree):
ret = {x.label()}
for child in x:
ret |= get_labels_from_tree(child)
return ret
def get_maximum_spanning_examples(examples, mincoverage=1, loadedex=None):
"""
Sort given examples by the degree they span their vocabulary.
First examples maximally increase how much least seen tokens are seen.
:param examples:
:param mincoverage: the minimum number of times every token must be covered.
If the token occurs less than 'mincoverage' number of times in given 'examples',
all examples with that token are included but the 'mincoverage' criterion is not satisfied!
:return:
"""
tokencounts = {}
uniquetokensperexample = []
examplespertoken = {} # reverse index from token to example number
for i, example in enumerate(examples):
exampletokens = set(get_labels_from_tree(example[1]))
uniquetokensperexample.append(exampletokens)
for token in exampletokens:
if token not in tokencounts:
tokencounts[token] = 0
tokencounts[token] += 1
if token not in examplespertoken:
examplespertoken[token] = set()
examplespertoken[token].add(i)
scorespertoken = {k: len(examples) / len(examplespertoken[k]) for k in examplespertoken.keys()}
selectiontokencounts = {k: 0 for k, v in tokencounts.items()}
if loadedex is not None:
for i, example in enumerate(loadedex):
exampletokens = set(get_labels_from_tree(example[1]))
for token in exampletokens:
if token in selectiontokencounts:
selectiontokencounts[token] += 1
def get_example_score(i):
minfreq = min(selectiontokencounts.values())
ret = 0
for token in uniquetokensperexample[i]:
ret += 1/8 ** (selectiontokencounts[token] - minfreq)
return ret
exampleids = set(range(len(examples)))
outorder = []
i = 0
while len(exampleids) > 0:
sortedexampleids = sorted(exampleids, key=get_example_score, reverse=True)
outorder.append(sortedexampleids[0])
exampleids -= {sortedexampleids[0]}
# update selection token counts
for token in uniquetokensperexample[sortedexampleids[0]]:
selectiontokencounts[token] += 1
minfreq = np.infty
for k, v in selectiontokencounts.items():
if tokencounts[k] < mincoverage and selectiontokencounts[k] >= tokencounts[k]:
pass
else:
minfreq = min(minfreq, selectiontokencounts[k])
i += 1
if minfreq >= mincoverage:
break
out = [examples[i] for i in outorder]
print(f"{len(out)}/{len(examples)} examples loaded from domain")
return out
def get_lf_abstract_transform(examples):
"""
Receives examples from different domains in the format (_, out_tokens, split, domain).
Returns a function that transforms a sequence of domain-specific output tokens
into a sequence of domain-independent tokens, abstracting domain-specific tokens/subtrees.
:param examples:
:return:
"""
# get shared vocabulary
domainspertoken = {}
domains = set()
for i, example in enumerate(examples):
if "train" in example[2]:
exampletokens = set(example[1])
for token in exampletokens:
if token not in domainspertoken:
domainspertoken[token] = set()
domainspertoken[token].add(example[3])
domains.add(example[3])
sharedtokens = set([k for k, v in domainspertoken.items() if len(v) == len(domains)])
sharedtokens.add("@ABS@")
sharedtokens.add("@END@")
sharedtokens.add("@START@")
sharedtokens.add("@META@")
sharedtokens.add("@UNK@")
sharedtokens.add("@PAD@")
sharedtokens.add("@METARARE@")
replacement = "@ABS@"
def example_transform(x):
abslf = [xe if xe in sharedtokens else replacement for xe in x]
abslf = ["@ABSSTART@"] + abslf[1:]
return abslf
return example_transform
def load_ds(traindomains=("restaurants",),
testdomain="housing",
min_freq=1,
mincoverage=1,
top_k=np.infty,
batsize=10,
nl_mode="bert-base-uncased",
fullsimplify=False,
add_domain_start=True,
supportsetting="lex", # "lex" or "min"
):
"""
:param traindomains:
:param testdomain:
:param min_freq:
:param mincoverage:
:param top_k:
:param nl_mode:
:param fullsimplify:
:param add_domain_start:
:param onlyabstract:
:param pretrainsetting: "all": use all examples from every domain
"lex": use only lexical examples
"all+lex": use both
:param finetunesetting: "lex": use lexical examples
"all": use all training examples
"min": use minimal lexicon-covering set of examples
! Test is always over the same original test set.
! Validation is over a fraction of training data
:return:
"""
def tokenize_and_add_start(t, _domain, meta=False):
tokens = tree_to_lisp_tokens(t)
if not meta:
starttok = f"@START/{_domain}@" if add_domain_start else "@START@"
tokens = [starttok] + tokens
else:
starttok = f"@META/{_domain}@" if add_domain_start else "@META@"
tokens = [starttok] + tokens
return tokens
domains = {}
alltrainex = []
for domain in list(traindomains) + [testdomain]:
ds = OvernightDatasetLoader(simplify_mode="light" if not fullsimplify else "full", simplify_blocks=True,
restore_reverse=DATA_RESTORE_REVERSE, validfrac=.10)\
.load(domain=domain)
domainexamples = [(a, b, c) for a, b, c in ds.examples]
if supportsetting == "lex":
domainexamples = [(a, b, "finetune" if c == "lexicon" else c)
for a, b, c in domainexamples]
else:
domainexamples = [(a, b, c) for a, b, c in domainexamples if c != "lexicon"]
if domain != testdomain:
alltrainex += [(a, b, c, domain) for a, b, c in domainexamples if c == "train"]
domains[domain] = domainexamples
if supportsetting == "min":
for domain, domainexamples in domains.items():
mindomainexamples = get_maximum_spanning_examples([(a, b, c) for a, b, c in domainexamples if c == "train"],
mincoverage=mincoverage,
loadedex=[a for a in alltrainex if a[3] != domain])
domains[domain] = domains[domain] + [(a, b, "finetune") for a, b, c in mindomainexamples]
for domain in domains:
domains[domain] = [(a, tokenize_and_add_start(b, domain, meta=c=="finetune"), c)
for a, b, c in domains[domain]]
# sourceex += ds[(None, None, lambda x: x in ("train", "valid", "lexicon"))].examples # don't use test examples
allex = []
for domain in domains:
allex += [(a, b, c, domain) for a, b, c in domains[domain]]
ds = Dataset(allex)
et = get_lf_abstract_transform(ds[lambda x: x[3] != testdomain].examples)
ds = ds.map(lambda x: (x[0], x[1], et(x[1]), x[2], x[3]))
abstracttokens = set()
# abstracttokens.add("@META@")
abstracttokens.add("@START@")
abstracttokens.add("@END@")
abstracttokens.add("@UNK@")
abstracttokens.add("@PAD@")
abstracttokens.add("@ABS@")
abstracttokens.add("@ABSSTART@")
abstracttokens.add("@METARARE@")
seqenc_vocab = Vocab(padid=0, startid=1, endid=2, unkid=UNKID)
seqenc_vocab.add_token("@ABS@", seen=np.infty)
seqenc_vocab.add_token("@ABSSTART@", seen=np.infty)
seqenc_vocab.add_token("@METARARE@", seen=np.infty)
seqenc = SequenceEncoder(vocab=seqenc_vocab, tokenizer=lambda x: x,
add_start_token=False, add_end_token=True)
for example in ds.examples:
abstracttokens |= set(example[2])
seqenc.inc_build_vocab(example[1], seen=example[3] in ("train", "finetune") if example[4] != testdomain else example[3] == "finetune")
seqenc.inc_build_vocab(example[2], seen=example[3] in ("train", "finetune") if example[4] != testdomain else example[3] == "finetune")
seqenc.finalize_vocab(min_freq=min_freq, top_k=top_k)
abstracttokenids = {seqenc.vocab[at] for at in abstracttokens}
nl_tokenizer = AutoTokenizer.from_pretrained(nl_mode)
def tokenize(x):
ret = (nl_tokenizer.encode(x[0], return_tensors="pt")[0],
seqenc.convert(x[1], return_what="tensor"),
seqenc.convert(x[2], return_what="tensor"),
x[3],
x[0], x[1], x[2], x[3])
return ret
sourceret = {}
targetret = {}
for domain in domains:
finetuneds = ds[lambda x: x[3] == "finetune" and x[4] == domain].map(tokenize)
trainds = ds[lambda x: x[3] == "train" and x[4] == domain].map(tokenize)
validds = ds[lambda x: x[3] == "valid" and x[4] == domain].map(tokenize)
testds = ds[lambda x: x[3] == "test" and x[4] == domain].map(tokenize)
if domain == testdomain:
ret = targetret
else:
ret = sourceret
ret[domain] = {
"finetune":DataLoader(finetuneds, batch_size=batsize, shuffle=True, collate_fn=partial(autocollate, pad_value=0)),
"train": DataLoader(trainds, batch_size=batsize, shuffle=True, collate_fn=partial(autocollate, pad_value=0)),
"valid": DataLoader(validds, batch_size=batsize, shuffle=False, collate_fn=partial(autocollate, pad_value=0)),
"test": DataLoader(testds, batch_size=batsize, shuffle=False, collate_fn=partial(autocollate, pad_value=0))
}
# populate the "all" domain
allsourceret = {
"finetune": DataLoader(ds[lambda x: x[3] == "finetune" and x[4] in traindomains].map(tokenize),
batch_size=batsize, shuffle=True, collate_fn=partial(autocollate, pad_value=0)),
"train": DataLoader(ds[lambda x: x[3] == "train" and x[4] in traindomains].map(tokenize),
batch_size=batsize, shuffle=True, collate_fn=partial(autocollate, pad_value=0)),
"valid": DataLoader(ds[lambda x: x[3] == "valid" and x[4] in traindomains].map(tokenize),
batch_size=batsize, shuffle=False, collate_fn=partial(autocollate, pad_value=0)),
"test": DataLoader(ds[lambda x: x[3] == "test" and x[4] in traindomains].map(tokenize),
batch_size=batsize, shuffle=False, collate_fn=partial(autocollate, pad_value=0)),
}
return sourceret, targetret, allsourceret, nl_tokenizer, seqenc, abstracttokenids
def apply_withpath(m:torch.nn.Module, fn:Callable, mpath=None):
""" Apply function 'fn' recursively on 'm' and its submodules, where 'fn' gets 'm' and 'mpath' as argument """
fn(m, mpath)
for name, child in m.named_children():
apply_withpath(child, fn, f"{mpath}.{name}" if mpath is not None else f"{name}")
class TransformerLayerAdapter(torch.nn.Module):
def __init__(self, dim, hdim, **kw):
super(TransformerLayerAdapter, self).__init__(**kw)
self.fc1 = torch.nn.Linear(dim, hdim)
self.fc2 = torch.nn.Linear(hdim, dim)
self.layernorm = torch.nn.LayerNorm(dim)
def forward(self, x):
innerresidual = x
x = self.fc1(x)
x = torch.nn.functional.relu(x)
x = self.fc2(x)
x = x + innerresidual
x = self.layernorm(x)
return x
class GatedTransformerLayerAdapter(TransformerLayerAdapter):
def __init__(self, dim, hdim, biasoffset=-3, **kw):
super(GatedTransformerLayerAdapter, self).__init__(dim, hdim, **kw)
self.fc3 = torch.nn.Linear(hdim, dim)
self.biasoffset = biasoffset
def forward(self, x):
innerresidual = x
h = self.fc1(x)
h = torch.relu(h)
x = self.fc2(h)
z = self.fc3(h)
z = torch.sigmoid(z + self.biasoffset)
x = x * z + innerresidual * (1 - z)
x = self.layernorm(x)
return x
class AdaptedBartDecoderLayer(torch.nn.Module):
def __init__(self, decoderlayer:DecoderLayer=None, compression=2, ):
super().__init__()
self.core = decoderlayer
self.adapter = GatedTransformerLayerAdapter(self.core.embed_dim, self.core.embed_dim//compression)
def forward(
self,
x,
encoder_hidden_states,
encoder_attn_mask=None,
layer_state=None,
causal_mask=None,
decoder_padding_mask=None,
):
x, self_attn_weights, layer_state = self.core(x, encoder_hidden_states,
encoder_attn_mask=encoder_attn_mask, layer_state=layer_state, causal_mask=causal_mask,
decoder_padding_mask=decoder_padding_mask)
x = self.adapter(x)
return (
x,
self_attn_weights,
layer_state,
) # just self_attn weights for now, following t5, layer_state = cache for decoding
class AdaptedBertEncoderLayer(torch.nn.Module):
def __init__(self, core:BertLayer, compression=2):
super(AdaptedBertEncoderLayer, self).__init__()
self.core = core
self.dim = core.output.dense.out_features
self.hdim = self.dim // compression
self.adapter = GatedTransformerLayerAdapter(self.dim, self.hdim)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
outputs = self.core(hidden_states, attention_mask=attention_mask, head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask)
adapted = self.adapter(outputs[0])
outputs = (adapted,) + outputs[1:]
return outputs
class BartGenerator(BartForConditionalGeneration):
def __init__(self, config:BartConfig, emb=None, outlin=None):
super(BartGenerator, self).__init__(config)
if emb is not None:
self.model.shared = emb
self.model.decoder.embed_tokens = emb
if outlin is not None:
self.outlin = outlin
else:
self.outlin = torch.nn.Linear(config.d_model, config.vocab_size)
self.outlin.apply(self._init_weights)
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, torch.nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, torch.nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def forward(
self,
input_ids,
attention_mask=None,
encoder_outputs=None,
decoder_input_ids=None,
decoder_attention_mask=None,
decoder_cached_states=None,
use_cache=False,
**unused
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
decoder_cached_states=decoder_cached_states,
use_cache=use_cache,
)
lm_logits = self.outlin(outputs[0])
outputs = (lm_logits,) + outputs[1:] # Add hidden states and attention if they are here
return outputs
class BartGeneratorTrain(torch.nn.Module):
def __init__(self, model:BartGenerator, smoothing=0., tensor2tree:Callable=None, orderless:Set[str]=set(),
maxlen:int=100, numbeam:int=1, **kw):
super(BartGeneratorTrain, self).__init__(**kw)
self.model = model
# CE loss
self.ce = CELoss(ignore_index=model.config.pad_token_id, smoothing=smoothing)
# accuracies
self.accs = SeqAccuracies()
self.accs.padid = model.config.pad_token_id
self.accs.unkid = UNKID
self.tensor2tree = tensor2tree
self.orderless = orderless
self.maxlen, self.numbeam = maxlen, numbeam
self.treeacc = TreeAccuracy(tensor2tree=tensor2tree,
orderless=orderless)
self.metrics = [self.ce, self.accs, self.treeacc]
def forward(self, input_ids, output_ids, *args, **kwargs):
ret = self.model(input_ids, attention_mask=input_ids!=self.model.config.pad_token_id, decoder_input_ids=output_ids)
probs = ret[0]
_, predactions = probs.max(-1)
outputs = [metric(probs, predactions, output_ids[:, 1:]) for metric in self.metrics]
outputs = merge_metric_dicts(*outputs)
return outputs, ret
def get_test_model(self, maxlen:int=None, numbeam:int=None):
maxlen = self.maxlen if maxlen is None else maxlen
numbeam = self.numbeam if numbeam is None else numbeam
ret = BartGeneratorTest(self.model, maxlen=maxlen, numbeam=numbeam,
tensor2tree=self.tensor2tree, orderless=self.orderless)
return ret
class AbstractBartGeneratorTrain(torch.nn.Module):
def __init__(self, model:BartGenerator, smoothing=0., tensor2tree:Callable=None, orderless:Set[str]=set(), tokenmask=None, **kw):
super(AbstractBartGeneratorTrain, self).__init__(**kw)
self.model = model
# CE loss
self.ce = CELoss(ignore_index=model.config.pad_token_id, smoothing=smoothing)
# accuracies
self.accs = SeqAccuracies()
self.accs.padid = model.config.pad_token_id
self.accs.unkid = UNKID
self.treeacc = TreeAccuracy(tensor2tree=tensor2tree,
orderless=orderless)
self.register_buffer("tokenmask", tokenmask)
self.metrics = [self.ce, self.accs, self.treeacc]
def forward(self, input_ids, _, output_ids, *args, **kwargs):
ret = self.model(input_ids, attention_mask=input_ids!=self.model.config.pad_token_id, decoder_input_ids=output_ids)
probs = ret[0] # (batsize, seqlen, vocsize)
if self.tokenmask is not None: # (vocsize,)
probs += torch.log(self.tokenmask[None, None, :])
_, predactions = probs.max(-1)
outputs = [metric(probs, predactions, output_ids[:, 1:]) for metric in self.metrics]
outputs = merge_metric_dicts(*outputs)
return outputs, ret
class BartGeneratorTest(BartGeneratorTrain):
def __init__(self, model:BartGenerator, maxlen:int=5, numbeam:int=None,
tensor2tree:Callable=None, orderless:Set[str]=set(), **kw):
super(BartGeneratorTest, self).__init__(model, **kw)
self.maxlen, self.numbeam = maxlen, numbeam
# accuracies
self.accs = SeqAccuracies()
self.accs.padid = model.config.pad_token_id
self.accs.unkid = UNKID
self.treeacc = TreeAccuracy(tensor2tree=tensor2tree,
orderless=orderless)
self.metrics = [self.accs, self.treeacc]
def forward(self, input_ids, output_ids, *args, **kwargs):
ret = self.model.generate(input_ids,
decoder_input_ids=output_ids[:, 0:1],
attention_mask=input_ids!=self.model.config.pad_token_id,
max_length=self.maxlen,
num_beams=self.numbeam)
outputs = [metric(None, ret[:, 1:], output_ids[:, 1:]) for metric in self.metrics]
outputs = merge_metric_dicts(*outputs)
return outputs, ret
class SpecialEmbedding(torch.nn.Embedding):
def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
metarare_targets=None, init_std=0.02):
super(SpecialEmbedding, self).__init__(num_embeddings, embedding_dim, padding_idx=padding_idx)
self.register_buffer("metarare_targets", metarare_targets)
# self.metarare = self.weight[self.metarare_source, :]
# self.base_emb = torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx)
self.extra_emb = torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx)
self.metarare_emb = torch.nn.Embedding(1, embedding_dim)
self.init_std = init_std
self.apply(self._init_weights)
self.extra_emb.weight.data.fill_(0)
def _init_weights(self, module):
std = self.init_std
if isinstance(module, torch.nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, torch.nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def forward(self, input: torch.Tensor) -> torch.Tensor:
# metarare_targets are 1 for domain-specific tokens
base_emb = super(SpecialEmbedding, self).forward(input)
metarare_emb = self.metarare_emb(torch.zeros_like(input))
extra_emb = self.extra_emb(input)
switch = self.metarare_targets[input]
emb = switch[:, :, None] * (extra_emb + metarare_emb) \
+ (1 - switch[:, :, None]) * base_emb
return emb
class SpecialOutlin(torch.nn.Linear):
def __init__(self, dim, vocsize, metarare_targets=None, bias=True, init_std=0.02):
super(SpecialOutlin, self).__init__(dim, vocsize, bias=bias)
self.register_buffer("metarare_targets", metarare_targets)
# self.metarare = self.weight[self.metarare_source, :]
# self.base_emb = torch.nn.Embedding(num_embeddings, embedding_dim, padding_idx)
self.extra_lin = torch.nn.Linear(dim, vocsize, bias=bias)
self.metarare_lin = torch.nn.Linear(dim, 1, bias=bias)
self.init_std = init_std
self.apply(self._init_weights)
self.extra_lin.weight.data.fill_(0)
self.extra_lin.bias.data.fill_(0)
def _init_weights(self, module):
std = self.init_std
if isinstance(module, torch.nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, SinusoidalPositionalEmbedding):
pass
elif isinstance(module, torch.nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
def forward(self, input: torch.Tensor) -> torch.Tensor:
base_logits = super(SpecialOutlin, self).forward(input)
extra_logits = self.extra_lin(input)
metarare_logits = self.metarare_lin(input)
switch = self.metarare_targets.expand_as(base_logits)
logits = switch * (extra_logits + metarare_logits) + (1 - switch) * base_logits
return logits
def create_model(encoder_name="bert-base-uncased",
dec_vocabsize=None, dec_layers=6, dec_dim=640, dec_heads=8, dropout=0.,
maxlen=20, smoothing=0., numbeam=1, tensor2tree=None,
abstract_token_ids=set(),
metarare="no", useadapters=False):
if encoder_name != "bert-base-uncased":
raise NotImplementedError(f"encoder '{encoder_name}' not supported yet.")
pretrained = BertModel.from_pretrained(encoder_name)
# replace layers with adapted layers
if useadapters:
for i, layer in enumerate(pretrained.encoder.layer):
pretrained.encoder.layer[i] = AdaptedBertEncoderLayer(layer, compression=4)
encoder = pretrained
class BertEncoderWrapper(torch.nn.Module):
def __init__(self, model, dropout=0., **kw):
super(BertEncoderWrapper, self).__init__(**kw)
self.model = model
self.proj = torch.nn.Linear(pretrained.config.hidden_size, dec_dim, bias=False)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_ids, attention_mask=None):
ret, _ = self.model(input_ids, attention_mask=attention_mask)
if pretrained.config.hidden_size != dec_dim:
ret = self.proj(ret)
ret = self.dropout(ret)
ret = (ret, None, None)
return ret
encoder = BertEncoderWrapper(encoder, dropout=dropout)
decoder_config = BartConfig(d_model=dec_dim,
pad_token_id=0,
bos_token_id=1,
vocab_size=dec_vocabsize,
decoder_attention_heads=dec_heads//2,
decoder_layers=dec_layers,
dropout=dropout,
attention_dropout=min(0.1, dropout/2),
decoder_ffn_dim=dec_dim*4,
encoder_attention_heads=dec_heads,
encoder_layers=dec_layers,
encoder_ffn_dim=dec_dim*4,
)
isabstracttokenmask = torch.zeros(dec_vocabsize)
for abstract_token_id in abstract_token_ids:
isabstracttokenmask[abstract_token_id] = 1
# create special embeddings and output layer
if metarare == "no":
emb, outlin = None, None
else:
if "emb" in metarare.split("+"):
# emb = torch.nn.Embedding(decoder_config.vocab_size, decoder_config.d_model, decoder_config.pad_token_id)
emb = SpecialEmbedding(decoder_config.vocab_size,
decoder_config.d_model,
decoder_config.pad_token_id,
metarare_targets=1-isabstracttokenmask)
else:
emb = None
if "outlin" in metarare.split("+"):
# outlin = torch.nn.Linear(decoder_config.d_model, decoder_config.vocab_size)
outlin = SpecialOutlin(decoder_config.d_model,
decoder_config.vocab_size,
metarare_targets=1-isabstracttokenmask)
else:
outlin = None
#
# def _init_weights(module):
# std = 0.02
# if isinstance(module, torch.nn.Linear):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.bias is not None:
# module.bias.data.zero_()
# elif isinstance(module, SinusoidalPositionalEmbedding):
# pass
# elif isinstance(module, torch.nn.Embedding):
# module.weight.data.normal_(mean=0.0, std=std)
# if module.padding_idx is not None:
# module.weight.data[module.padding_idx].zero_()
# emb.apply(_init_weights)
# outlin.apply(_init_weights)
# print("using special embs and linouts")
# else:
# emb = torch.nn.Embedding(decoder_config.vocab_size, decoder_config.d_model, decoder_config.pad_token_id)
# outlin = torch.nn.Linear(decoder_config.d_model, decoder_config.vocab_size)
# emb = None
# outlin = None
model = BartGenerator(decoder_config, emb, outlin)
model.model.encoder = encoder
if useadapters:
for i, layer in enumerate(model.model.decoder.layers):
model.model.decoder.layers[i] = AdaptedBartDecoderLayer(layer)
orderless = {"op:and", "SW:concat"}
trainmodel = BartGeneratorTrain(model, smoothing=smoothing, tensor2tree=tensor2tree, orderless=orderless,
maxlen=maxlen, numbeam=numbeam)
abstracttrainmodel = AbstractBartGeneratorTrain(model, smoothing=smoothing, tensor2tree=tensor2tree, orderless=orderless,
tokenmask=isabstracttokenmask)
# testmodel = BartGeneratorTest(model, maxlen=maxlen, numbeam=numbeam, tensor2tree=tensor2tree, orderless=orderless)
return trainmodel, abstracttrainmodel
def _tensor2tree(x, D:Vocab=None):
# x: 1D int tensor
x = list(x.detach().cpu().numpy())
x = [D(xe) for xe in x]
x = [xe for xe in x if xe != D.padtoken]
# find first @END@ and cut off
parentheses_balance = 0
for i in range(len(x)):
if x[i] ==D.endtoken:
x = x[:i]
break
elif x[i] == "(" or x[i][-1] == "(":
parentheses_balance += 1
elif x[i] == ")":
parentheses_balance -= 1
else:
pass
# balance parentheses
while parentheses_balance > 0:
x.append(")")
parentheses_balance -= 1
i = len(x) - 1
while parentheses_balance < 0 and i > 0:
if x[i] == ")":
x.pop(i)
parentheses_balance += 1
i -= 1
# convert to nltk.Tree
try:
tree, parsestate = lisp_to_tree(" ".join(x), None)
except Exception as e:
tree = None
return tree
def move_grad(source=None, target=None):
source_params = {k: v for k, v in source.named_parameters()}
for k, v in target.named_parameters():
assert(v.size() == source_params[k].size())
if source_params[k].grad is not None:
if v.grad is None:
v.grad = source_params[k].grad
else:
v.grad += source_params[k].grad
source.zero_grad()
def reset_special_grads_inner(_m:torch.nn.Module, mode="none"):
# for paramname, param in _m.named_parameters():
# if paramname not in ["model.model.decoder.embed_tokens.extra_emb.weight",
# "model.outlin.extra_lin.weight", "model.outlin.extra_lin.weight"]:
# param.grad = None
if mode == "metarare": # train everything
pass
elif mode == "split" or mode == "metararetokensonly" \
or "inner:onlyemb" in mode.split("+"): # train only embeddings and output layer
for paramname, param in _m.named_parameters():
dotrain = False
for e in ["model.model.decoder.embed_tokens", "model.outlin"]:
if paramname.startswith(e):
dotrain = dotrain or True
if not dotrain:
param.grad = None
elif mode == "adapter" or mode == "adaptersplit" or mode == "adapterinner": # finetune only adapters and embeddings and output layers
for paramname, param in _m.named_parameters():
isadapterparam = False
m = _m
namesplits = paramname.split(".")
for namepiece in namesplits:
m = getattr(m, namepiece)
if isinstance(m, TransformerLayerAdapter):
isadapterparam = True
break
dotrain = False
if isadapterparam:
dotrain = dotrain or True
else:
for e in ["model.model.decoder.embed_tokens", "model.outlin"]:
if paramname.startswith(e):
dotrain = dotrain or True
if not dotrain:
param.grad = None
elif "inner:all" in mode.split("+"):
pass
# else:
# if isinstance(_m.model.model.decoder.embed_tokens, SpecialEmbedding):
# _m.model.model.decoder.embed_tokens.weight.grad = None
# _m.model.model.decoder.metarare_emb.weight.grad = None
# if isinstance(_m.model.outlin, SpecialOutlin):
# _m.model.outlin.weight.grad = None
# _m.model.outlin.metarare_lin.weight.grad = None
# if _m.model.outlin.bias is not None:
# _m.model.outlin.bias.grad = None
# _m.model.outlin.metarare_lin.bias.grad = None
def reset_special_grads_outer(_m, mode="none"):
# if mode == "metararetokensonly":
# if isinstance(_m.model.model.decoder.embed_tokens, SpecialEmbedding):
# _m.model.model.decoder.embed_tokens.extra_emb.weight.grad = None
# if isinstance(_m.model.outlin, SpecialOutlin):
# _m.model.outlin.extra_lin.weight.grad = None
# _m.model.outlin.extra_lin.bias.grad = None
if mode == "metarare" or mode == "metararetokensonly": # train everything except Special layers's, extra vectors
if isinstance(_m.model.model.decoder.embed_tokens, SpecialEmbedding):
_m.model.model.decoder.embed_tokens.extra_emb.weight.grad = None
if isinstance(_m.model.outlin, SpecialOutlin):
_m.model.outlin.extra_lin.weight.grad = None
_m.model.outlin.extra_lin.bias.grad = None
elif mode == "split" or "outer:noemb" in mode.split("+"): # don't train any embeddings/output layer
for paramname, param in _m.named_parameters():
for e in ["model.model.decoder.embed_tokens", "model.outlin"]:
if paramname.startswith(e):
param.grad = None
elif mode == "adapter": # don't train original bert weights
for paramname, param in _m.named_parameters():
isadapterparam = False
m = _m
namesplits = paramname.split(".")
for namepiece in namesplits:
m = getattr(m, namepiece)
if isinstance(m, TransformerLayerAdapter):
isadapterparam = True
break
dotrain = False
if paramname.startswith("model.model.encoder"):
if isadapterparam:
dotrain = dotrain or True
else:
dotrain = dotrain or True
if not dotrain:
param.grad = None
elif mode == "adaptersplit": # finetune only adapters and embeddings and output layers
for paramname, param in _m.named_parameters():
isadapterparam = False
m = _m
namesplits = paramname.split(".")
for namepiece in namesplits:
m = getattr(m, namepiece)
if isinstance(m, TransformerLayerAdapter):
isadapterparam = True
break
donttrain = False
if paramname.startswith("model.model.encoder"):
donttrain = donttrain or True # don't train anything in encoder
else:
if isadapterparam:
donttrain = donttrain or True
else:
for e in ["model.model.decoder.embed_tokens", "model.outlin"]:
if paramname.startswith(e):
donttrain = donttrain or True
if donttrain:
param.grad = None
elif "outer:all" in mode.split("+") or mode == "adapter" or mode == "adapterinner":
pass
def infiter(a):
while True:
for ae in a:
yield ae
def cat_batches(*x, pad_value=0):
y = list(zip(*x))
for i, yi in enumerate(y):
if isinstance(yi[0], torch.Tensor):
y[i] = q.pad_tensors(yi, 1, pad_value)
for i, yi in enumerate(y):
if isinstance(yi[0], torch.Tensor):
y[i] = torch.cat(yi, 0)
elif isinstance(yi, tuple):
_yi = yi[0]
for yij in yi[1:]:
_yi = _yi + yij
y[i] = _yi
return y
def meta_train_epoch(model=None,
absmodel=None,
data=None,
allsourcedata=None,
injecttraindata=False,
optim=None,
get_ft_model=None,
get_ft_optim=None,
losses=None,
abslosses=None,
ftlosses=None,
device=torch.device("cpu"),
tt=q.ticktock(" -"),
current_epoch=0,
max_epochs=0,
finetunesteps=1,
gradmode="none", # "none", "metarare", ...
on_start=tuple(),
on_end=tuple(),
print_every_batch=False,
clipgradnorm=None,
gradacc=1,
abstract_contrib=0.):
"""
Performs an epoch of training on given model, with data from given dataloader, using given optimizer,
with loss computed based on given losses.
:param model:
:param data: dictionary from domains to dicts of dataloaders
:param optim:
:param losses: list of loss wrappers
:param device: device to put batches on
:param tt:
:param current_epoch:
:param max_epochs:
:param _train_batch: train batch function, default is train_batch
:param on_start:
:param on_end:
:return:
"""
for loss in losses:
loss.push_epoch_to_history(epoch=current_epoch-1)
loss.reset_agg()
loss.loss.to(device)
model.to(device)
absmodel.to(device)
[e() for e in on_start]
q.epoch_reset(model)
optim.zero_grad()
numbatsperdomain = {k: len(data[k]["train"]) for k in data}
totalnumtrainbats = sum(numbatsperdomain.values())
probbatsperdomain = {k: numbatsperdomain[k] / totalnumtrainbats for k in numbatsperdomain}
# iter-ize training dataloaders in data
for k, v in data.items():
v["_train"] = iter(v["train"])
outerstep_i = 0
while True:
outerbatch = None
exhausted_domains = set()
while outerbatch is None and len(exhausted_domains) < len(data):
ks, vs = zip(*probbatsperdomain.items())
chosendomain = np.random.choice(ks, p=vs)
try:
outerbatch = next(data[chosendomain]["_train"])
except StopIteration as e:
# print(f"stopping iteration - outerstep_i: {outerstep_i}")
exhausted_domains.add(chosendomain)
outerbatch = None
if outerbatch is None:
break
# perform K number of inner steps
ftmodel = get_ft_model(model)
ftoptim = get_ft_optim(ftmodel)
inneriter = infiter(data[chosendomain]["finetune"])
extra_inneriter = infiter(allsourcedata["train"])
oldemb = ftmodel.model.model.decoder.embed_tokens.weight + 0
oldlin = ftmodel.model.outlin.weight + 0
for loss in ftlosses:
loss.push_epoch_to_history(epoch=str(current_epoch - 1)+"."+chosendomain)
loss.reset_agg()
loss.loss.to(device)
for innerstep_i in range(finetunesteps):
innerbatch = next(inneriter)
if injecttraindata:
extra_innerbatch = next(extra_inneriter)
innerbatch = cat_batches(innerbatch, extra_innerbatch)
ttmsg = q.train_batch(batch=innerbatch, model=ftmodel, optim=ftoptim, losses=ftlosses, device=device,
batch_number=innerstep_i, max_batches=finetunesteps, current_epoch=current_epoch,
max_epochs=max_epochs,
on_before_optim_step=[
partial(clipgradnorm, _m=ftmodel),
partial(reset_special_grads_inner, _m=ftmodel, mode=gradmode)])
if print_every_batch:
tt.msg(ttmsg)
else:
tt.live(ttmsg)
# after K inner updates
# perform outer update on main model weights
# do outer update:
# 1. obtain gradient on inner-updated model using outerbatch,
# 2. apply gradient on main model weights
ttmsg = q.train_batch(batch=outerbatch, model=ftmodel, optim=None, losses=losses, device=device,
batch_number=outerstep_i, max_batches=totalnumtrainbats, current_epoch=current_epoch,
max_epochs=max_epochs, gradient_accumulation_steps=gradacc)
# , on_before_optim_step=[
# partial(clipgradnorm, _m=model),
# partial(copy_grad, source=ftmodel, target=model)])
move_grad(ftmodel, model)
reset_special_grads_outer(model, mode=gradmode)
# do abstract prediction
if abstract_contrib > 0.:
abs_ttmsg = q.train_batch(batch=outerbatch, model=absmodel, optim=None, losses=abslosses, device=device,
batch_number=outerstep_i, max_batches=totalnumtrainbats, current_epoch=current_epoch,
max_epochs=max_epochs, gradient_accumulation_steps=gradacc,
loss_scale=abstract_contrib)
else:
abs_ttmsg = "N/A"
clipgradnorm(_m=model)
# do optim step
_do_optim_step = ((outerstep_i+1) % gradacc) == 0
_do_optim_step = _do_optim_step or (outerstep_i+1) == totalnumtrainbats # force optim step at the end of epoch
if _do_optim_step:
optim.step()
optim.zero_grad()
if print_every_batch:
tt.msg(ttmsg + " -- " + abs_ttmsg)
else:
tt.live(ttmsg + " -- " + abs_ttmsg)
outerstep_i += 1
tt.stoplive()
[e() for e in on_end]
ttmsg = q.pp_epoch_losses(*losses) + " -- " + q.pp_epoch_losses(*abslosses)
return ttmsg
def meta_test_epoch(model=None,
data=None,
allsourcedata=None,
injecttraindata=False,
get_ft_model=None,
get_ft_optim=None,
gradmode="none",
losses=None,
ftlosses=None,
finetunesteps=1,
bestfinetunestepsvar=None,
bestfinetunestepswhichmetric=None,
bestfinetunelowerisbetter=False,
evalinterval=-1,
mode="valid", # "valid" or "test"
device=torch.device("cpu"),
clipgradnorm=None,
current_epoch=0, max_epochs=0, print_every_batch=False,
on_start=tuple(), on_start_batch=tuple(), on_end_batch=tuple(), on_end=tuple(),
on_outer_start=tuple(), on_outer_end=tuple()):
if evalinterval < 0:
evalinterval = 1
"""
Performs a test epoch. If run=True, runs, otherwise returns partially filled function.
:param model:
:param dataloader:
:param losses:
:param device:
:param current_epoch:
:param max_epochs:
:param on_start:
:param on_start_batch:
:param on_end_batch:
:param on_end:
:return:
"""
tt = q.ticktock(" -")
model.to(device)
q.epoch_reset(model)
[e() for e in on_outer_start]
lossesperdomain = {}
stepperevals = []
for domain in data:
stepperevals.append([])
lossesperdomain[domain] = []
# doing one domain
domaindata = data[domain]
# perform fine-tuning (with early stopping if valid is given
ftmodel = get_ft_model(model)
ftoptim = get_ft_optim(ftmodel)
ftmodel.train()
inneriter = infiter(domaindata["finetune"])
extra_inneriter = infiter(allsourcedata["train"])
for loss in ftlosses:
loss.push_epoch_to_history(epoch=str(current_epoch - 1)+"."+domain)
loss.reset_agg()
loss.loss.to(device)
for innerstep_i in range(finetunesteps):
innerbatch = next(inneriter)
if injecttraindata:
extra_innerbatch = next(extra_inneriter)
innerbatch = cat_batches(innerbatch, extra_innerbatch)
ttmsg = q.train_batch(batch=innerbatch, model=ftmodel, optim=ftoptim, losses=ftlosses, device=device,
batch_number=innerstep_i, max_batches=finetunesteps, current_epoch=current_epoch, max_epochs=max_epochs,
on_before_optim_step=[partial(clipgradnorm, _m=ftmodel),
partial(reset_special_grads_inner, _m=ftmodel, mode=gradmode)])
if print_every_batch:
tt.msg(ttmsg)
else:
tt.live(ttmsg)
test_ftmodel = ftmodel.get_test_model()
if (mode == "valid" and (innerstep_i+1) % evalinterval == 0) \
or (mode == "test"): #"(innerstep_i+1 == finetunesteps):
_losses = deepcopy(losses)
dataname = "valid" if mode == "valid" else "test"
q.test_epoch(test_ftmodel, dataloader=domaindata[dataname], losses=_losses, device=device,
current_epoch=current_epoch, max_epochs=max_epochs, print_every_batch=print_every_batch,
on_start=on_start, on_end=on_end, on_start_batch=on_start_batch, on_end_batch=on_end_batch)
lossesperdomain[domain].append(_losses)
stepperevals[-1].append(innerstep_i)
# find best number of steps
metricsmatrix = np.zeros((len(lossesperdomain), math.ceil(finetunesteps / evalinterval), len(losses)))
for i, domain in enumerate(sorted(lossesperdomain.keys())):
for j, steplosses in enumerate(lossesperdomain[domain]):
for k, lossval in enumerate(steplosses):
metricsmatrix[i, j, k] = lossval.get_epoch_error()
metricsmatrix = metricsmatrix.mean(0) # (numevals, numlosses)
if mode == "valid":
critvals = metricsmatrix[:, bestfinetunestepswhichmetric] # (numevals)
critvals = critvals * (1 if bestfinetunelowerisbetter is False else -1)
k = np.argmax(critvals)
evalstep = stepperevals[0][k]
bestfinetunestepsvar.v = evalstep
else:
print("metricsmatrix:")
print(metricsmatrix)
evalstep = q.v(bestfinetunestepsvar)
k = q.v(bestfinetunestepsvar)
for loss, _loss in zip(losses, metricsmatrix[k, :]):
loss.epoch_agg_values.append(_loss)
loss.epoch_agg_sizes.append(1)
tt.stoplive()
[e() for e in on_outer_end]
ttmsg = q.pp_epoch_losses(*losses) + f" [@{evalstep+1}]"
return ttmsg
def run(traindomains="ALL",
domain="restaurants",
mincoverage=2,
lr=0.0001,
enclrmul=0.1,
numbeam=1,
ftlr=0.0001,
cosinelr=False,
warmup=0.,
batsize=30,
epochs=100,
finetunesteps=5,
maxfinetunesteps=4,
evalinterval=2,
dropout=0.1,
wreg=1e-9,
gradnorm=3,
gradacc=1,
smoothing=0.,
patience=20,
gpu=-1,
seed=123456789,
encoder="bert-base-uncased",
numlayers=6,
hdim=600,
numheads=8,
maxlen=30,
fullsimplify=True,
domainstart=False,
supportsetting="lex", # "lex" or "min"
metarare="no",
abscontrib=1.,
gradmode="none", # "none", "metarare", "metarareonly", "split"
injecttraindata=False,
useadapters=False,
):
settings = locals().copy()
print(json.dumps(settings, indent=4))
# wandb.init(project=f"overnight_joint_pretrain_fewshot_{pretrainsetting}-{finetunesetting}-{domain}",
# reinit=True, config=settings)
if traindomains == "ALL":
alldomains = {"recipes", "restaurants", "blocks", "calendar", "housing", "publications"}
traindomains = alldomains - {domain, }
else:
traindomains = set(traindomains.split("+"))
random.seed(seed)
torch.manual_seed(seed)
|
np.random.seed(seed)
|
numpy.random.seed
|
import pickle
import os
import numpy as np
import time
import matplotlib.pyplot as plt
from math import sqrt, ceil
from random import randrange
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def load_one_cifar_batch(file):
"""
load one batch training set
"""
with open(file, 'rb') as file:
dict = pickle.load(file, encoding='bytes')
X = dict[b'data']
Y = dict[b'labels']
X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("float")
Y = np.array(Y)
return X, Y
def load_all_cifar(root):
"""
load 5 batches training set and 1 test set
"""
x_all = []
y_all = []
for i in range(1, 6):
filepath = os.path.join(root, "data_batch_%d" % (i,))
X, Y = load_one_cifar_batch(filepath)
x_all.append(X)
y_all.append(Y)
Xtrain = np.concatenate(x_all)
Ytrain = np.concatenate(y_all)
del X, Y
Xtest, Ytest = load_one_cifar_batch(os.path.join(root, "test_batch"))
return Xtrain, Ytrain, Xtest, Ytest
def showimages(X_train, y_train, sample_per_class=8):
"""
plot images in category
"""
# define class name list
class_list = ['plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(class_list)
# print some pictures from training set
for class_index, class_name in enumerate(class_list):
# get indexes in the label list that are equal to the index of the class list
y_train_indexes = np.flatnonzero(y_train == class_index)
# randomly pick sample indexes from the class
y_train_indexes = np.random.choice(
y_train_indexes, sample_per_class, replace=False)
# show images
for i, y_index in enumerate(y_train_indexes):
plt_idx = i * num_classes + class_index + 1
plt.subplot(sample_per_class, num_classes, plt_idx)
plt.imshow(X_train[y_index].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(class_name)
plt.show()
def time_elapse(function, *args):
"""
Call a function with args and return the time (in seconds) that it took to execute.
"""
tic = time.time()
function(*args)
toc = time.time()
return toc - tic
def load_data(dir, num_training=49000, num_validation=1000, num_test=1000, num_dev=500, bias=False, row=False):
"""
Load cifar data and preprocess data
"""
# load data
X_train, y_train, X_test, y_test = load_all_cifar(dir)
# get the validation set
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# get the training set
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# get a development set as the subset of training set
mask = range(num_dev)
X_dev = X_train[mask]
y_dev = y_train[mask]
# get a sub test set
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
"""
preprocess data
"""
# Mean substraction
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
"""
# visualize the mean image
"""
# plt.figure(figsize=(4, 4))
# plt.imshow(mean_image.reshape((32, 32, 3)).astype('uint8'))
# plt.show()
# reshape the image data into rows
if row:
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# if need to add a bias 1 to the last column
if bias:
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
else:
# Retain the input shape and change the channel to the first
X_train = X_train.transpose(0, 3, 1, 2).copy()
X_val = X_val.transpose(0, 3, 1, 2).copy()
X_test = X_test.transpose(0, 3, 1, 2).copy()
X_dev = X_dev.transpose(0, 3, 1, 2).copy()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
def eval_numerical_gradient(f, x, verbose=True, h=0.00001):
"""
a naive implementation of numerical gradient of f at x
- f should be a function that takes a single argument
- x is the point (numpy array) to evaluate the gradient at
"""
fx = f(x) # evaluate function value at original point
grad = np.zeros_like(x)
# iterate over all indexes in x
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
# evaluate function at x+h
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evalute f(x + h)
x[ix] = oldval - h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # restore
# compute the partial derivative with centered formula
grad[ix] = (fxph - fxmh) / (2 * h) # the slope
if verbose:
print(ix, grad[ix])
it.iternext() # step to next dimension
return grad
def eval_numerical_gradient_array(f, x, df, h=1e-5):
"""
Evaluate a numeric gradient for a function that accepts a numpy
array and returns a numpy array.
"""
grad = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
oldval = x[ix]
x[ix] = oldval + h
pos = f(x).copy()
x[ix] = oldval - h
neg = f(x).copy()
x[ix] = oldval
grad[ix] = np.sum((pos - neg) * df) / (2 * h)
it.iternext()
return grad
def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
"""
Compute numeric gradients for a function that operates on input
and output blobs.
We assume that f accepts several input blobs as arguments, followed by a blob
into which outputs will be written. For example, f might be called like this:
f(x, w, out)
where x and w are input Blobs, and the result of f will be written to out.
Inputs:
- f: function
- inputs: tuple of input blobs
- output: output blob
- h: step size
"""
numeric_diffs = []
for input_blob in inputs:
diff = np.zeros_like(input_blob.diffs)
it = np.nditer(input_blob.vals, flags=['multi_index'],
op_flags=['readwrite'])
while not it.finished:
idx = it.multi_index
orig = input_blob.vals[idx]
input_blob.vals[idx] = orig + h
f(*(inputs + (output,)))
pos = np.copy(output.vals)
input_blob.vals[idx] = orig - h
f(*(inputs + (output,)))
neg = np.copy(output.vals)
input_blob.vals[idx] = orig
diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)
it.iternext()
numeric_diffs.append(diff)
return numeric_diffs
def eval_numerical_gradient_net(net, inputs, output, h=1e-5):
return eval_numerical_gradient_blobs(lambda *args: net.forward(),
inputs, output, h=h)
def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5):
"""
sample a few random elements and only return numerical
in this dimensions.
"""
for i in range(num_checks):
ix = tuple([randrange(m) for m in x.shape])
oldval = x[ix]
x[ix] = oldval + h # increment by h
fxph = f(x) # evaluate f(x + h)
x[ix] = oldval - h # increment by h
fxmh = f(x) # evaluate f(x - h)
x[ix] = oldval # reset
grad_numerical = (fxph - fxmh) / (2 * h)
grad_analytic = analytic_grad[ix]
rel_error = abs(grad_numerical - grad_analytic) / \
(abs(grad_numerical) + abs(grad_analytic))
print('numerical: %f analytic: %f, relative error: %e' %
(grad_numerical, grad_analytic, rel_error))
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in range(grid_size):
x0, x1 = 0, W
for x in range(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
# grid[y0:y1, x0:x1] = Xs[next_idx]
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
# grid_max = np.max(grid)
# grid_min = np.min(grid)
# grid = ubound * (grid - grid_min) / (grid_max - grid_min)
return grid
def vis_grid(Xs):
""" visualize a grid of images """
(N, H, W, C) = Xs.shape
A = int(ceil(sqrt(N)))
G = np.ones((A*H+A, A*W+A, C), Xs.dtype)
G *=
|
np.min(Xs)
|
numpy.min
|
import os
import logging
import numpy as np
from numpy.testing import assert_allclose
import trackpy as tp
from trackpy.artificial import (draw_features_brightfield,
gen_nonoverlapping_locations,
gen_connected_locations)
from trackpy.tests.common import sort_positions, StrictTestCase
from trackpy.feature import locate
from trackpy.locate_functions.brightfield_ring import locate_brightfield_ring
from trackpy.refine.brightfield_ring import (_min_edge, _fit_circle)
path, _ = os.path.split(os.path.abspath(__file__))
# we need to use a low value for min_percentile because the artificial
# edge is very sharp
MIN_PERC = 0.5
def draw_artificial_image(shape, pos, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
# tp.locate ignores a margin of size radius, take 1 px more to be safe
diameter = tuple([(r * 2) + 1 for r in radius])
size = [d / 2 for d in diameter]
cols = ['x', 'y', 'z'][:len(shape)][::-1]
image = draw_features_brightfield(shape, pos, size, noise_level, dip=dip)
if not traditional:
kwargs.update({'min_percentile': MIN_PERC})
result = locate_brightfield_ring(image, diameter, **kwargs)
else:
result = locate(image, diameter, **kwargs)
# For some reason, sorting the DataFrame gives wrong orders in some cases
result = np.sort(result[cols].astype(float).values, axis=0)
expected = np.sort(pos, axis=0)
return result, expected
def artificial_image(shape, count, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
margin = tuple([r + 1 for r in radius])
separation = tuple([2.5*r for r in radius])
pos = gen_nonoverlapping_locations(shape, count, separation, margin)
return draw_artificial_image(shape, pos, radius, noise_level, dip,
traditional, **kwargs)
def artificial_cluster(shape, count, radius, noise_level, dip=False,
traditional=False, **kwargs):
radius = tp.utils.validate_tuple(radius, len(shape))
margin = tuple([r + 1 for r in radius])
separation = tuple([1.4*r for r in radius])
pos = gen_connected_locations(shape, count, separation, margin)
return draw_artificial_image(shape, pos, radius, noise_level, dip,
traditional, **kwargs)
def generate_random_circle(r, x, y, num_samples=500, noise=0):
np.random.seed(1)
theta = np.random.rand(num_samples) * (2 * np.pi)
if noise > 0:
mini = r-noise
maxi = r+noise
r_rand = np.random.rand(num_samples) * (maxi-mini) + mini
else:
r_rand = r
xc = r_rand * np.cos(theta) + x
yc = r_rand * np.sin(theta) + y
return np.squeeze(np.dstack((xc, yc))).T
class TestLocateBrightfieldRing(StrictTestCase):
def setUp(self):
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
self.pixel_tolerance = 0.9
self.n_feat_sparse = 5
self.n_feat_cluster = 3
self.n_feat_dense = 30
self.image_size = (250, 350)
self.radius = 13
self.cluster_sep = 1.0*self.radius
def test_multiple_simple_sparse(self):
actual, expected = artificial_image(self.image_size,
self.n_feat_sparse,
self.radius, noise_level=0)
assert_allclose(actual, expected, atol=self.pixel_tolerance)
def test_multiple_simple_sparse_no_multiprocessing(self):
actual, expected = artificial_image(self.image_size,
self.n_feat_sparse,
self.radius, noise_level=0,
processes=0)
|
assert_allclose(actual, expected, atol=self.pixel_tolerance)
|
numpy.testing.assert_allclose
|
# ============================================================================ #
# LINEAR REGRESSION #
# ============================================================================ #
# Renders stacked line charts showing convex and non-convex obj function
#%%
import pandas as pd
import plotly.offline as po
import plotly.graph_objs as go
import numpy as np
from sklearn import datasets
from sklearn.linear_model import SGDRegressor
from ml_studio.supervised_learning.regression import LinearRegression
from ml_studio.supervised_learning.training.metrics import R2
from ml_studio.utils.data_manager import StandardScaler, data_split
from ml_studio.visual.animations import SingleModelFit2D, SingleModelSearch3D
directory = "./content/figures/"
# ---------------------------------------------------------------------------- #
# DATA #
# ---------------------------------------------------------------------------- #
#%%
# Data
X, y, coef = datasets.make_regression(n_samples=1000, n_features=1, bias=10,
noise=40, coef=True, random_state=50)
X_train, X_test, y_train, y_test = data_split(X,y, test_size=0.3, seed=50)
# ---------------------------------------------------------------------------- #
# SCATTER PLOT #
# ---------------------------------------------------------------------------- #
#%%
# Linear Regression Scatterplot
data = go.Scatter(
x=X_train.flatten(),
y=y_train,
mode='markers',
marker=dict(color='steelblue')
)
layout = go.Layout(title='Simulated Data',
height=400,
width=800,
showlegend=False,
xaxis_title="X",
yaxis_title="Y",
margin=dict(l=10,r=10,t=40,b=10),
template='plotly_white'
)
fig = go.Figure(data=data, layout=layout)
fig.show()
po.plot(fig, filename = "./content/figures/simulated_training_data.html", auto_open=False)
# ---------------------------------------------------------------------------- #
# LINEAR REGRESSION #
# ---------------------------------------------------------------------------- #
#%%
# Linear Regression
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
lr = LinearRegression(epochs=1000, learning_rate=0.01, val_size=0.2, patience=40,
early_stop=True, metric='r2', verbose=True, checkpoint=100)
lr.fit(X_train,y_train)
print(lr.intercept_)
print(lr.coef_.shape)
# ---------------------------------------------------------------------------- #
# ANIMATIONS #
# ---------------------------------------------------------------------------- #
#%%
# Animations
plot = SingleModelSearch3D()
plot.search(lr, directory=directory, filename="linear_regression_search_test.gif")
plot = SingleModelFit2D()
plot.fit(lr, directory=directory, filename="linear_regression_fit_test.gif")
#%%
# ---------------------------------------------------------------------------- #
# TEST #
# ---------------------------------------------------------------------------- #
scaler.fit(X_test)
X_test = scaler.transform(X_test)
# %%
# ---------------------------------------------------------------------------- #
# PREDICTED VS ACTUAL ON TEST DATA #
# ---------------------------------------------------------------------------- #
def f_actual(x):
y = (10 +
|
np.dot(coef, x)
|
numpy.dot
|
import os
import unittest
import warnings
from io import BytesIO
from pathlib import Path
import h5py
import numpy as np
from h5py import SoftLink, HardLink, ExternalLink, File
from h5py import filters as h5py_filters
from hdmf.backends.hdf5 import H5DataIO
from hdmf.backends.hdf5.h5tools import HDF5IO, ROOT_NAME, SPEC_LOC_ATTR
from hdmf.backends.io import HDMFIO, UnsupportedOperation
from hdmf.backends.warnings import BrokenLinkWarning
from hdmf.build import (GroupBuilder, DatasetBuilder, BuildManager, TypeMap, ObjectMapper, OrphanContainerBuildError,
LinkBuilder)
from hdmf.container import Container, Data
from hdmf.data_utils import DataChunkIterator, InvalidDataIOError
from hdmf.spec.catalog import SpecCatalog
from hdmf.spec.namespace import NamespaceCatalog
from hdmf.spec.namespace import SpecNamespace
from hdmf.spec.spec import (AttributeSpec, DatasetSpec, GroupSpec, LinkSpec, ZERO_OR_MANY, ONE_OR_MANY, ZERO_OR_ONE,
RefSpec, DtypeSpec)
from hdmf.testing import TestCase
from hdmf.utils import docval, getargs
from tests.unit.utils import (Foo, FooBucket, CORE_NAMESPACE, get_temp_filepath, CustomGroupSpec, CustomDatasetSpec,
CustomSpecNamespace)
class FooFile(Container):
@docval({'name': 'buckets', 'type': list, 'doc': 'the FooBuckets in this file', 'default': list()},
{'name': 'foo_link', 'type': Foo, 'doc': 'an optional linked Foo', 'default': None},
{'name': 'foofile_data', 'type': 'array_data', 'doc': 'an optional dataset', 'default': None},
{'name': 'foo_ref_attr', 'type': Foo, 'doc': 'a reference Foo', 'default': None},
)
def __init__(self, **kwargs):
buckets, foo_link, foofile_data, foo_ref_attr = getargs('buckets', 'foo_link', 'foofile_data',
'foo_ref_attr', kwargs)
super().__init__(name=ROOT_NAME) # name is not used - FooFile should be the root container
self.__buckets = {b.name: b for b in buckets} # note: collections of groups are unordered in HDF5
for f in buckets:
f.parent = self
self.__foo_link = foo_link
self.__foofile_data = foofile_data
self.__foo_ref_attr = foo_ref_attr
def __eq__(self, other):
return (self.buckets == other.buckets
and self.foo_link == other.foo_link
and self.foofile_data == other.foofile_data)
def __str__(self):
return ('buckets=%s, foo_link=%s, foofile_data=%s' % (self.buckets, self.foo_link, self.foofile_data))
@property
def buckets(self):
return self.__buckets
def add_bucket(self, bucket):
self.__buckets[bucket.name] = bucket
bucket.parent = self
def remove_bucket(self, bucket_name):
bucket = self.__buckets.pop(bucket_name)
if bucket.parent is self:
self._remove_child(bucket)
return bucket
@property
def foo_link(self):
return self.__foo_link
@foo_link.setter
def foo_link(self, value):
if self.__foo_link is None:
self.__foo_link = value
else:
raise ValueError("can't reset foo_link attribute")
@property
def foofile_data(self):
return self.__foofile_data
@foofile_data.setter
def foofile_data(self, value):
if self.__foofile_data is None:
self.__foofile_data = value
else:
raise ValueError("can't reset foofile_data attribute")
@property
def foo_ref_attr(self):
return self.__foo_ref_attr
@foo_ref_attr.setter
def foo_ref_attr(self, value):
if self.__foo_ref_attr is None:
self.__foo_ref_attr = value
else:
raise ValueError("can't reset foo_ref_attr attribute")
class H5IOTest(TestCase):
"""Tests for h5tools IO tools"""
def setUp(self):
self.path = get_temp_filepath()
self.io = HDF5IO(self.path, mode='a')
self.f = self.io._file
def tearDown(self):
self.io.close()
os.remove(self.path)
##########################################
# __chunked_iter_fill__(...) tests
##########################################
def test__chunked_iter_fill(self):
"""Matrix test of HDF5IO.__chunked_iter_fill__ using a DataChunkIterator with different parameters"""
data_opts = {'iterator': range(10),
'numpy': np.arange(30).reshape(5, 2, 3),
'list': np.arange(30).reshape(5, 2, 3).tolist(),
'sparselist1': [1, 2, 3, None, None, None, None, 8, 9, 10],
'sparselist2': [None, None, 3],
'sparselist3': [1, 2, 3, None, None], # note: cannot process None in ndarray
'nanlist': [[[1, 2, 3, np.nan, np.nan, 6], [np.nan, np.nan, 3, 4, np.nan, np.nan]],
[[10, 20, 30, 40, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]]]}
buffer_size_opts = [1, 2, 3, 4] # data is divisible by some of these, some not
for data_type, data in data_opts.items():
iter_axis_opts = [0, 1, 2]
if data_type == 'iterator' or data_type.startswith('sparselist'):
iter_axis_opts = [0] # only one dimension
for iter_axis in iter_axis_opts:
for buffer_size in buffer_size_opts:
with self.subTest(data_type=data_type, iter_axis=iter_axis, buffer_size=buffer_size):
with warnings.catch_warnings(record=True) as w:
dci = DataChunkIterator(data=data, buffer_size=buffer_size, iter_axis=iter_axis)
if len(w) <= 1:
# init may throw UserWarning for iterating over not-first dim of a list. ignore here
pass
dset_name = '%s, %d, %d' % (data_type, iter_axis, buffer_size)
my_dset = HDF5IO.__chunked_iter_fill__(self.f, dset_name, dci)
if data_type == 'iterator':
self.assertListEqual(my_dset[:].tolist(), list(data))
elif data_type == 'numpy':
self.assertTrue(np.all(my_dset[:] == data))
self.assertTupleEqual(my_dset.shape, data.shape)
elif data_type == 'list' or data_type == 'nanlist':
data_np = np.array(data)
np.testing.assert_array_equal(my_dset[:], data_np)
self.assertTupleEqual(my_dset.shape, data_np.shape)
elif data_type.startswith('sparselist'):
# replace None in original data with default hdf5 fillvalue 0
data_zeros = np.where(np.equal(np.array(data), None), 0, data)
np.testing.assert_array_equal(my_dset[:], data_zeros)
self.assertTupleEqual(my_dset.shape, data_zeros.shape)
##########################################
# write_dataset tests: scalars
##########################################
def test_write_dataset_scalar(self):
a = 10
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTupleEqual(dset.shape, ())
self.assertEqual(dset[()], a)
def test_write_dataset_string(self):
a = 'test string'
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTupleEqual(dset.shape, ())
# self.assertEqual(dset[()].decode('utf-8'), a)
read_a = dset[()]
if isinstance(read_a, bytes):
read_a = read_a.decode('utf-8')
self.assertEqual(read_a, a)
##########################################
# write_dataset tests: lists
##########################################
def test_write_dataset_list(self):
a = np.arange(30).reshape(5, 2, 3)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a.tolist(), attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a))
def test_write_dataset_list_compress_gzip(self):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression='gzip',
compression_opts=5,
shuffle=True,
fletcher32=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 5)
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
@unittest.skipIf("lzf" not in h5py_filters.encode,
"LZF compression not supported in this h5py library install")
def test_write_dataset_list_compress_lzf(self):
warn_msg = ("lzf compression may not be available on all installations of HDF5. Use of gzip is "
"recommended to ensure portability of the generated HDF5 files.")
with self.assertWarnsWith(UserWarning, warn_msg):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression='lzf',
shuffle=True,
fletcher32=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, 'lzf')
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
@unittest.skipIf("szip" not in h5py_filters.encode,
"SZIP compression not supported in this h5py library install")
def test_write_dataset_list_compress_szip(self):
warn_msg = ("szip compression may not be available on all installations of HDF5. Use of gzip is "
"recommended to ensure portability of the generated HDF5 files.")
with self.assertWarnsWith(UserWarning, warn_msg):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression='szip',
compression_opts=('ec', 16),
shuffle=True,
fletcher32=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, 'szip')
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
def test_write_dataset_list_compress_available_int_filters(self):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression=1,
shuffle=True,
fletcher32=True,
allow_plugin_filters=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
def test_write_dataset_list_enable_default_compress(self):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression=True)
self.assertEqual(a.io_settings['compression'], 'gzip')
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, 'gzip')
def test_write_dataset_list_disable_default_compress(self):
with warnings.catch_warnings(record=True) as w:
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
compression=False,
compression_opts=5)
self.assertEqual(len(w), 1) # We expect a warning that compression options are being ignored
self.assertFalse('compression_ops' in a.io_settings)
self.assertFalse('compression' in a.io_settings)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.compression, None)
def test_write_dataset_list_chunked(self):
a = H5DataIO(np.arange(30).reshape(5, 2, 3),
chunks=(1, 1, 3))
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.chunks, (1, 1, 3))
def test_write_dataset_list_fillvalue(self):
a = H5DataIO(np.arange(20).reshape(5, 4), fillvalue=-1)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', a, attributes={}))
dset = self.f['test_dataset']
self.assertTrue(np.all(dset[:] == a.data))
self.assertEqual(dset.fillvalue, -1)
##########################################
# write_dataset tests: tables
##########################################
def test_write_table(self):
cmpd_dt = np.dtype([('a', np.int32), ('b', np.float64)])
data = np.zeros(10, dtype=cmpd_dt)
data['a'][1] = 101
data['b'][1] = 0.1
dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},
{'name': 'b', 'dtype': 'float64', 'doc': 'b column'}]
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))
dset = self.f['test_dataset']
self.assertEqual(dset['a'].tolist(), data['a'].tolist())
self.assertEqual(dset['b'].tolist(), data['b'].tolist())
def test_write_table_nested(self):
b_cmpd_dt = np.dtype([('c', np.int32), ('d', np.float64)])
cmpd_dt = np.dtype([('a', np.int32), ('b', b_cmpd_dt)])
data = np.zeros(10, dtype=cmpd_dt)
data['a'][1] = 101
data['b']['c'] = 202
data['b']['d'] = 10.1
b_dt = [{'name': 'c', 'dtype': 'int32', 'doc': 'c column'},
{'name': 'd', 'dtype': 'float64', 'doc': 'd column'}]
dt = [{'name': 'a', 'dtype': 'int32', 'doc': 'a column'},
{'name': 'b', 'dtype': b_dt, 'doc': 'b column'}]
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', data, attributes={}, dtype=dt))
dset = self.f['test_dataset']
self.assertEqual(dset['a'].tolist(), data['a'].tolist())
self.assertEqual(dset['b'].tolist(), data['b'].tolist())
##########################################
# write_dataset tests: Iterable
##########################################
def test_write_dataset_iterable(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', range(10), attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), list(range(10)))
def test_write_dataset_iterable_multidimensional_array(self):
a = np.arange(30).reshape(5, 2, 3)
aiter = iter(a)
daiter = DataChunkIterator.from_iterable(aiter, buffer_size=2)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', daiter, attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), a.tolist())
def test_write_multi_dci_oaat(self):
"""
Test writing multiple DataChunkIterators, one at a time
"""
a = np.arange(30).reshape(5, 2, 3)
b = np.arange(30, 60).reshape(5, 2, 3)
aiter = iter(a)
biter = iter(b)
daiter1 = DataChunkIterator.from_iterable(aiter, buffer_size=2)
daiter2 = DataChunkIterator.from_iterable(biter, buffer_size=2)
builder = GroupBuilder("root")
dataset1 = DatasetBuilder('test_dataset1', daiter1)
dataset2 = DatasetBuilder('test_dataset2', daiter2)
builder.set_dataset(dataset1)
builder.set_dataset(dataset2)
self.io.write_builder(builder)
dset1 = self.f['test_dataset1']
self.assertListEqual(dset1[:].tolist(), a.tolist())
dset2 = self.f['test_dataset2']
self.assertListEqual(dset2[:].tolist(), b.tolist())
def test_write_multi_dci_conc(self):
"""
Test writing multiple DataChunkIterators, concurrently
"""
a = np.arange(30).reshape(5, 2, 3)
b = np.arange(30, 60).reshape(5, 2, 3)
aiter = iter(a)
biter = iter(b)
daiter1 = DataChunkIterator.from_iterable(aiter, buffer_size=2)
daiter2 = DataChunkIterator.from_iterable(biter, buffer_size=2)
builder = GroupBuilder("root")
dataset1 = DatasetBuilder('test_dataset1', daiter1)
dataset2 = DatasetBuilder('test_dataset2', daiter2)
builder.set_dataset(dataset1)
builder.set_dataset(dataset2)
self.io.write_builder(builder)
dset1 = self.f['test_dataset1']
self.assertListEqual(dset1[:].tolist(), a.tolist())
dset2 = self.f['test_dataset2']
self.assertListEqual(dset2[:].tolist(), b.tolist())
def test_write_dataset_iterable_multidimensional_array_compression(self):
a = np.arange(30).reshape(5, 2, 3)
aiter = iter(a)
daiter = DataChunkIterator.from_iterable(aiter, buffer_size=2)
wrapped_daiter = H5DataIO(data=daiter,
compression='gzip',
compression_opts=5,
shuffle=True,
fletcher32=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_daiter, attributes={}))
dset = self.f['test_dataset']
self.assertEqual(dset.shape, a.shape)
self.assertListEqual(dset[:].tolist(), a.tolist())
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 5)
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
#############################################
# write_dataset tests: data chunk iterator
#############################################
def test_write_dataset_data_chunk_iterator(self):
dci = DataChunkIterator(data=np.arange(10), buffer_size=2)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', dci, attributes={}, dtype=dci.dtype))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), list(range(10)))
self.assertEqual(dset[:].dtype, dci.dtype)
def test_write_dataset_data_chunk_iterator_with_compression(self):
dci = DataChunkIterator(data=np.arange(10), buffer_size=2)
wrapped_dci = H5DataIO(data=dci,
compression='gzip',
compression_opts=5,
shuffle=True,
fletcher32=True,
chunks=(2,))
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_dci, attributes={}))
dset = self.f['test_dataset']
self.assertListEqual(dset[:].tolist(), list(range(10)))
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 5)
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
self.assertEqual(dset.chunks, (2,))
def test_pass_through_of_recommended_chunks(self):
class DC(DataChunkIterator):
def recommended_chunk_shape(self):
return (5, 1, 1)
dci = DC(data=np.arange(30).reshape(5, 2, 3))
wrapped_dci = H5DataIO(data=dci,
compression='gzip',
compression_opts=5,
shuffle=True,
fletcher32=True)
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', wrapped_dci, attributes={}))
dset = self.f['test_dataset']
self.assertEqual(dset.chunks, (5, 1, 1))
self.assertEqual(dset.compression, 'gzip')
self.assertEqual(dset.compression_opts, 5)
self.assertEqual(dset.shuffle, True)
self.assertEqual(dset.fletcher32, True)
def test_dci_h5dataset(self):
data = np.arange(30).reshape(5, 2, 3)
dci1 = DataChunkIterator(data=data, buffer_size=1, iter_axis=0)
HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)
dset = self.f['test_dataset']
dci2 = DataChunkIterator(data=dset, buffer_size=2, iter_axis=2)
chunk = dci2.next()
self.assertTupleEqual(chunk.shape, (5, 2, 2))
chunk = dci2.next()
self.assertTupleEqual(chunk.shape, (5, 2, 1))
# TODO test chunk data, shape, selection
self.assertTupleEqual(dci2.recommended_data_shape(), data.shape)
self.assertIsNone(dci2.recommended_chunk_shape())
def test_dci_h5dataset_sparse_matched(self):
data = [1, 2, 3, None, None, None, None, 8, 9, 10]
dci1 = DataChunkIterator(data=data, buffer_size=3)
HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)
dset = self.f['test_dataset']
dci2 = DataChunkIterator(data=dset, buffer_size=2)
# dataset is read such that Nones in original data were not written, but are read as 0s
self.assertTupleEqual(dci2.maxshape, (10,))
self.assertEqual(dci2.dtype, np.dtype(int))
count = 0
for chunk in dci2:
self.assertEqual(len(chunk.selection), 1)
if count == 0:
self.assertListEqual(chunk.data.tolist(), [1, 2])
self.assertEqual(chunk.selection[0], slice(0, 2))
elif count == 1:
self.assertListEqual(chunk.data.tolist(), [3, 0])
self.assertEqual(chunk.selection[0], slice(2, 4))
elif count == 2:
self.assertListEqual(chunk.data.tolist(), [0, 0])
self.assertEqual(chunk.selection[0], slice(4, 6))
elif count == 3:
self.assertListEqual(chunk.data.tolist(), [0, 8])
self.assertEqual(chunk.selection[0], slice(6, 8))
elif count == 4:
self.assertListEqual(chunk.data.tolist(), [9, 10])
self.assertEqual(chunk.selection[0], slice(8, 10))
count += 1
self.assertEqual(count, 5)
self.assertTupleEqual(dci2.recommended_data_shape(), (10,))
self.assertIsNone(dci2.recommended_chunk_shape())
def test_dci_h5dataset_sparse_unmatched(self):
data = [1, 2, 3, None, None, None, None, 8, 9, 10]
dci1 = DataChunkIterator(data=data, buffer_size=3)
HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)
dset = self.f['test_dataset']
dci2 = DataChunkIterator(data=dset, buffer_size=4)
# dataset is read such that Nones in original data were not written, but are read as 0s
self.assertTupleEqual(dci2.maxshape, (10,))
self.assertEqual(dci2.dtype, np.dtype(int))
count = 0
for chunk in dci2:
self.assertEqual(len(chunk.selection), 1)
if count == 0:
self.assertListEqual(chunk.data.tolist(), [1, 2, 3, 0])
self.assertEqual(chunk.selection[0], slice(0, 4))
elif count == 1:
self.assertListEqual(chunk.data.tolist(), [0, 0, 0, 8])
self.assertEqual(chunk.selection[0], slice(4, 8))
elif count == 2:
self.assertListEqual(chunk.data.tolist(), [9, 10])
self.assertEqual(chunk.selection[0], slice(8, 10))
count += 1
self.assertEqual(count, 3)
self.assertTupleEqual(dci2.recommended_data_shape(), (10,))
self.assertIsNone(dci2.recommended_chunk_shape())
def test_dci_h5dataset_scalar(self):
data = [1]
dci1 = DataChunkIterator(data=data, buffer_size=3)
HDF5IO.__chunked_iter_fill__(self.f, 'test_dataset', dci1)
dset = self.f['test_dataset']
dci2 = DataChunkIterator(data=dset, buffer_size=4)
# dataset is read such that Nones in original data were not written, but are read as 0s
self.assertTupleEqual(dci2.maxshape, (1,))
self.assertEqual(dci2.dtype, np.dtype(int))
count = 0
for chunk in dci2:
self.assertEqual(len(chunk.selection), 1)
if count == 0:
self.assertListEqual(chunk.data.tolist(), [1])
self.assertEqual(chunk.selection[0], slice(0, 1))
count += 1
self.assertEqual(count, 1)
self.assertTupleEqual(dci2.recommended_data_shape(), (1,))
self.assertIsNone(dci2.recommended_chunk_shape())
#############################################
# H5DataIO general
#############################################
def test_warning_on_non_gzip_compression(self):
# Make sure no warning is issued when using gzip
with warnings.catch_warnings(record=True) as w:
dset = H5DataIO(np.arange(30),
compression='gzip')
self.assertEqual(len(w), 0)
self.assertEqual(dset.io_settings['compression'], 'gzip')
# Make sure a warning is issued when using szip (even if installed)
if "szip" in h5py_filters.encode:
with warnings.catch_warnings(record=True) as w:
dset = H5DataIO(np.arange(30),
compression='szip',
compression_opts=('ec', 16))
self.assertEqual(len(w), 1)
self.assertEqual(dset.io_settings['compression'], 'szip')
else:
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))
# Make sure a warning is issued when using lzf compression
with warnings.catch_warnings(record=True) as w:
dset = H5DataIO(np.arange(30),
compression='lzf')
self.assertEqual(len(w), 1)
self.assertEqual(dset.io_settings['compression'], 'lzf')
def test_error_on_unsupported_compression_filter(self):
# Make sure gzip does not raise an error
try:
H5DataIO(np.arange(30), compression='gzip', compression_opts=5)
except ValueError:
self.fail("Using gzip compression raised a ValueError when it should not")
# Make sure szip raises an error if not installed (or does not raise an error if installed)
warn_msg = ("szip compression may not be available on all installations of HDF5. Use of gzip is "
"recommended to ensure portability of the generated HDF5 files.")
if "szip" not in h5py_filters.encode:
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))
else:
try:
with self.assertWarnsWith(UserWarning, warn_msg):
H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 16))
except ValueError:
self.fail("SZIP is installed but H5DataIO still raises an error")
# Test error on illegal (i.e., a made-up compressor)
with self.assertRaises(ValueError):
warn_msg = ("unknown compression may not be available on all installations of HDF5. Use of gzip is "
"recommended to ensure portability of the generated HDF5 files.")
with self.assertWarnsWith(UserWarning, warn_msg):
H5DataIO(np.arange(30), compression="unknown")
# Make sure passing int compression filter raise an error if not installed
if not h5py_filters.h5z.filter_avail(h5py_filters.h5z.FILTER_MAX):
with self.assertRaises(ValueError):
warn_msg = ("%i compression may not be available on all installations of HDF5. Use of gzip is "
"recommended to ensure portability of the generated HDF5 files."
% h5py_filters.h5z.FILTER_MAX)
with self.assertWarnsWith(UserWarning, warn_msg):
H5DataIO(np.arange(30), compression=h5py_filters.h5z.FILTER_MAX, allow_plugin_filters=True)
# Make sure available int compression filters raise an error without passing allow_plugin_filters=True
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression=h5py_filters.h5z.FILTER_DEFLATE)
def test_value_error_on_incompatible_compression_opts(self):
# Make sure we warn when gzip with szip compression options is used
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='gzip', compression_opts=('ec', 16))
# Make sure we warn if gzip with a too high agression is used
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='gzip', compression_opts=100)
# Make sure we warn if lzf with gzip compression option is used
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='lzf', compression_opts=5)
# Make sure we warn if lzf with szip compression option is used
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='lzf', compression_opts=('ec', 16))
# Make sure we warn if szip with gzip compression option is used
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='szip', compression_opts=4)
# Make sure szip raises a ValueError if bad options are used (odd compression option)
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='szip', compression_opts=('ec', 3))
# Make sure szip raises a ValueError if bad options are used (bad methos)
with self.assertRaises(ValueError):
H5DataIO(np.arange(30), compression='szip', compression_opts=('bad_method', 16))
def test_warning_on_linking_of_regular_array(self):
with warnings.catch_warnings(record=True) as w:
dset = H5DataIO(np.arange(30),
link_data=True)
self.assertEqual(len(w), 1)
self.assertEqual(dset.link_data, False)
def test_warning_on_setting_io_options_on_h5dataset_input(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))
with warnings.catch_warnings(record=True) as w:
H5DataIO(self.f['test_dataset'],
compression='gzip',
compression_opts=4,
fletcher32=True,
shuffle=True,
maxshape=(10, 20),
chunks=(10,),
fillvalue=100)
self.assertEqual(len(w), 7)
def test_h5dataio_array_conversion_numpy(self):
# Test that H5DataIO.__array__ is working when wrapping an ndarray
test_speed = np.array([10., 20.])
data = H5DataIO((test_speed))
self.assertTrue(np.all(np.isfinite(data))) # Force call of H5DataIO.__array__
def test_h5dataio_array_conversion_list(self):
# Test that H5DataIO.__array__ is working when wrapping a python list
test_speed = [10., 20.]
data = H5DataIO(test_speed)
self.assertTrue(np.all(np.isfinite(data))) # Force call of H5DataIO.__array__
def test_h5dataio_array_conversion_datachunkiterator(self):
# Test that H5DataIO.__array__ is working when wrapping a python list
test_speed = DataChunkIterator(data=[10., 20.])
data = H5DataIO(test_speed)
with self.assertRaises(NotImplementedError):
np.isfinite(data) # Force call of H5DataIO.__array__
#############################################
# Copy/Link h5py.Dataset object
#############################################
def test_link_h5py_dataset_input(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))
self.io.write_dataset(self.f, DatasetBuilder('test_softlink', self.f['test_dataset'], attributes={}))
self.assertTrue(isinstance(self.f.get('test_softlink', getlink=True), SoftLink))
def test_copy_h5py_dataset_input(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset', np.arange(10), attributes={}))
self.io.write_dataset(self.f,
DatasetBuilder('test_copy', self.f['test_dataset'], attributes={}),
link_data=False)
self.assertTrue(isinstance(self.f.get('test_copy', getlink=True), HardLink))
self.assertListEqual(self.f['test_dataset'][:].tolist(),
self.f['test_copy'][:].tolist())
def test_link_h5py_dataset_h5dataio_input(self):
self.io.write_dataset(self.f, DatasetBuilder('test_dataset',
|
np.arange(10)
|
numpy.arange
|
# <NAME> (<EMAIL>)
from __future__ import division, print_function
from builtins import range
import numpy as np
from sklearn.metrics import auc
from sklearn.metrics.ranking import _binary_clf_curve, precision_recall_curve, roc_curve
import mlpaper.perf_curves as pc
import mlpaper.util as util
from mlpaper.test_constants import MC_REPEATS_LARGE
# ============================================================================
# Non-vectorized versions of routines in perf_curves for testing.
# ============================================================================
def _nv_add_pseudo_points(fps, tps):
if fps[-1] == 0:
fps = pc.EPSILON * tps
tps = tps.astype(fps.dtype)
if tps[-1] == 0:
tps = pc.EPSILON * fps
fps = fps.astype(tps.dtype)
return fps, tps
def _nv_binary_clf_curve(y_true, y_score, sample_weight=None):
assert y_true.ndim == 1 and y_true.dtype.kind == "b"
assert y_score.shape == y_true.shape and np.all(np.isfinite(y_score))
assert y_true.size >= 1
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score, y_true = y_score[desc_score_indices], y_true[desc_score_indices]
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
if sample_weight is None:
tps = np.cumsum(y_true)[threshold_idxs]
fps = 1 + threshold_idxs - tps
assert fps[-1] == np.sum(~y_true) and tps[-1] == np.sum(y_true)
else:
assert sample_weight.shape == y_true.shape
assert np.all(np.isfinite(sample_weight))
# Negative weight makes no sense, 0 can violate assumps. of other funcs
assert np.all(sample_weight > 0)
weight = sample_weight[desc_score_indices]
tps = np.cumsum(y_true * weight)[threshold_idxs]
fps = np.cumsum(weight)[threshold_idxs] - tps
assert np.allclose((fps[-1], tps[-1]), (np.sum(weight[~y_true]), np.sum(weight[y_true])))
# Now put in the (0, 0) coord (y_score >= np.inf)
assert not (tps[0] == 0 and fps[0] == 0)
fps, tps = np.r_[0, fps], np.r_[0, tps]
thresholds = np.r_[np.inf, y_score[threshold_idxs]]
# Clean up corner case
fps, tps = _nv_add_pseudo_points(fps, tps)
assert fps[-1] > 0 and tps[-1] > 0
assert fps.dtype == tps.dtype
# Remove any decreases due to numerics
fps = np.maximum.accumulate(fps)
assert np.all((np.diff(fps) >= 0.0) & (np.diff(tps) >= 0.0))
return fps, tps, thresholds
# ============================================================================
# Non-vectorized versions of routines in perf_curves for testing.
# ============================================================================
def _nv_roc_curve(y_true, y_score, sample_weight=None):
fps, tps, thresholds = _nv_binary_clf_curve(y_true, y_score, sample_weight=sample_weight)
fpr = np.true_divide(fps, fps[-1])
tpr = np.true_divide(tps, tps[-1])
return fpr, tpr, thresholds
def _nv_recall_precision_curve(y_true, y_score, sample_weight=None):
fps, tps, thresholds = _nv_binary_clf_curve(y_true, y_score, sample_weight=sample_weight)
recall = np.true_divide(tps, tps[-1])
with np.errstate(divide="ignore", invalid="ignore"):
precision = np.true_divide(tps, tps + fps)
precision[0] = precision[1]
assert np.all(0.0 <= precision) and np.all(precision <= 1.0)
return recall, precision, thresholds
def _nv_prg_curve(y_true, y_score, sample_weight=None):
fps, tps, thresholds = _nv_binary_clf_curve(y_true, y_score, sample_weight=sample_weight)
n_neg, n_pos = fps[-1], tps[-1]
fns = n_pos - tps
den = n_neg * tps
with np.errstate(divide="ignore", invalid="ignore"):
rec_gain = 1.0 - np.true_divide(n_pos * fns, den)
prec_gain = 1.0 - np.true_divide(n_pos * fps, den)
# interpolate backward just like in PR curve
prec_gain[0] = prec_gain[1]
assert np.all(rec_gain <= 1.0) and np.all(prec_gain <= 1.0)
# Find index to put everything in the box
with np.errstate(invalid="ignore"):
assert not np.any(np.diff(rec_gain) < 0.0)
idx = np.searchsorted(rec_gain, 0.0, side="right")
assert idx == np.where(rec_gain > 0.0)[0][0]
assert idx > 0 # Not selecting first point
# Bring forward most recent negative point as point at 0
rec_gain = np.concatenate(([0.0], rec_gain[idx:]))
prec_gain = prec_gain[idx - 1 :]
thresholds = thresholds[idx - 1 :]
return rec_gain, prec_gain, thresholds
# ============================================================================
# Now the actual tests
# ============================================================================
def test_nv_binary_clf_curve():
N = np.random.randint(low=1, high=10)
y_bool = np.random.rand(N) <= 0.5
y_pred = np.random.rand(N)
if np.random.rand() <= 0.5: # make non-unique
y_pred = np.random.choice(y_pred, size=N, replace=True)
sample_weight = None
if np.random.rand() <= 0.2:
sample_weight = np.abs(np.random.randn(N))
if np.random.rand() <= 0.2:
sample_weight = 1 + np.random.multinomial(N, np.ones(N) / N)
if np.random.rand() <= 0.2:
sample_weight = np.maximum(np.random.multinomial(N, np.ones(N) / N), 1e-6)
fps, tps, thresholds = _nv_binary_clf_curve(y_bool, y_pred, sample_weight)
assert fps.shape == tps.shape and fps.shape == thresholds.shape
assert np.all(np.isfinite(fps))
assert np.all(np.isfinite(tps))
assert np.all(np.isfinite(thresholds[1:]))
assert fps[0] == 0 and tps[0] == 0 and thresholds[0] == np.inf
if sample_weight is None:
assert np.abs(fps[-1] - np.sum(~y_bool)) <= 1e-8
assert np.abs(tps[-1] - np.sum(y_bool)) <= 1e-8
else:
assert np.abs(fps[-1] - np.sum(sample_weight * ~y_bool)) <= 1e-8
assert np.abs(tps[-1] - np.sum(sample_weight * y_bool)) <= 1e-8
assert np.all((np.diff(fps) >= 0.0) & (np.diff(tps) >= 0.0))
assert np.all((np.diff(fps) > 0) | (np.diff(tps) > 0))
assert np.all(np.diff(thresholds) < 0.0)
fpr, tpr, thresholds_roc = _nv_roc_curve(y_bool, y_pred, sample_weight)
assert fpr.shape == tpr.shape and fpr.shape == thresholds_roc.shape
assert np.all(np.isfinite(fpr))
assert np.all(np.isfinite(tpr))
assert np.all(np.isfinite(thresholds_roc[1:]))
assert fpr[0] == 0.0 and tpr[0] == 0.0
assert fpr[-1] == 1.0 and tpr[-1] == 1.0
assert np.all((np.diff(fpr) >= 0.0) & (np.diff(tpr) >= 0.0))
assert np.all((np.diff(fpr) > 0.0) | (np.diff(tpr) > 0.0))
assert np.all(np.diff(thresholds_roc) < 0.0)
rec, prec, thresholds_pr = _nv_recall_precision_curve(y_bool, y_pred, sample_weight)
assert rec.shape == prec.shape and rec.shape == thresholds_pr.shape
assert np.all(np.isfinite(rec))
assert np.all(np.isfinite(prec))
assert np.all(np.isfinite(thresholds_pr[1:]))
assert rec[0] == 0.0 and rec[-1] == 1.0
assert len(prec) >= 2 and prec[0] == prec[1]
b_rate = (
np.mean(y_bool)
if sample_weight is None
else np.true_divide(np.sum(sample_weight * y_bool), np.sum(sample_weight))
)
assert np.max(np.abs(prec[-1] - b_rate)) <= 1e-8
# Note: may have repeats in PR curve
assert np.all(np.diff(rec) >= 0.0)
assert np.all(np.diff(thresholds_pr) < 0.0)
rec_gain, prec_gain, thresholds_prg = _nv_prg_curve(y_bool, y_pred, sample_weight)
assert rec_gain.shape == prec_gain.shape
assert rec_gain.shape == thresholds_prg.shape
assert np.all(np.isfinite(thresholds_prg[1:]))
assert rec_gain[0] == 0.0 and rec_gain[-1] == 1.0
assert np.all(rec_gain <= 1.0) and np.all(prec_gain <= 1.0)
assert np.all(np.diff(rec_gain) >= 0.0)
assert np.allclose(prec_gain[-1], 0.0)
if np.all(y_bool) or (not np.any(y_bool)):
assert np.allclose(0.5, np.trapz(fpr, tpr))
assert np.allclose(np.mean(y_bool), np.sum(prec[:-1] * np.diff(rec)))
assert np.allclose(0.0, np.sum(prec_gain[:-1] * np.diff(rec_gain)))
return
fps2, tps2, thresholds2 = _binary_clf_curve(y_bool, y_pred, pos_label=True, sample_weight=sample_weight)
assert np.allclose(fps[1:], fps2)
assert np.allclose(tps[1:], tps2)
assert np.allclose(thresholds[1:], thresholds2)
fpr2, tpr2, thresholds2 = roc_curve(
y_bool, y_pred, pos_label=True, sample_weight=sample_weight, drop_intermediate=False
)
# sklearn inconsistent on including origin ==> need if statement
if len(fpr) == len(fpr2):
assert np.allclose(fpr, fpr2)
assert np.allclose(tpr, tpr2)
assert np.allclose(thresholds_roc[1:], thresholds2[1:])
else:
assert np.allclose(fpr[1:], fpr2)
assert np.allclose(tpr[1:], tpr2)
assert
|
np.allclose(thresholds_roc[1:], thresholds2)
|
numpy.allclose
|
from functools import reduce
import cv2 as cv
import numpy as np
from PIL import Image
def enhance_contrast(img, lower=90, upper=None):
img = np.asarray(img, dtype=np.uint8)
if upper is None:
upper = np.max(img)
lut = np.zeros(256, dtype=np.uint8)
lut[lower:upper + 1] = np.linspace(0, 255, upper - lower + 1, endpoint=True, dtype=np.uint8)
lut[upper + 1:] = 255
return Image.fromarray(lut[np.asarray(img, np.uint8)])
def image_threshold_mat2img(mat, threshold=127):
"""
threshold filter on L channel
:param threshold: negative value means inverted output
"""
if threshold < 0:
resultmat = mat <= -threshold
else:
resultmat = mat >= threshold
lut = np.zeros(256, dtype=np.uint8)
lut[1:] = 255
return Image.fromarray(lut[resultmat.astype(np.uint8)], 'L').convert('1')
def image_threshold(image, threshold=127):
"""
threshold filter on L channel
:param threshold: negative value means inverted output
"""
grayimg = image.convert('L')
mat = np.asarray(grayimg)
return image_threshold_mat2img(mat, threshold)
def crop_blackedge(numimg, value_threshold=127):
if numimg.width == 0 or numimg.height == 0:
return None
thimg = image_threshold(numimg, value_threshold)
return numimg.crop(thimg.getbbox())
def crop_blackedge2(numimg, value_threshold=127):
thimg = image_threshold(numimg, value_threshold)
x_threshold = int(numimg.height * 0.4)
y_threshold = 16
mat = np.asarray(thimg)
right = -1
for x in range(thimg.width - 1, -1, -1):
col = mat[:, x]
if
|
np.any(col)
|
numpy.any
|
import numpy as np
from .dedpulUtils import maximize_log_likelihood, rolling_apply, MonotonizingTrends
from scipy.stats import gaussian_kde
def estimate_diff(preds, target, bw_mix=0.05, bw_pos=0.1, kde_mode='logit', threshold='mid', k_neighbours=None,
tune=False, MT=False, MT_coef=0.25, decay_MT_coef=False, kde_type='kde', bins_mix=20, bins_pos=20):
"""
Estimates densities of predictions y(x) for P and U and ratio between them f_p / f_u for U sample;
uses kernel density estimation (kde);
post-processes difference of estimated densities - imposes monotonicity on lower preds
(so that diff is partly non-decreasing) and applies rolling median to further reduce variance
:param preds: predictions of NTC y(x), probability of belonging to U rather than P, np.array with shape (n,)
:param target: binary vector, 0 if positive, 1 if unlabeled, np.array with shape (n,)
:param bw_mix: bandwidth for kde of U
:param bw_pos: bandwidth for kde of P
:param kde_mode: 'prob', 'log_prob' or 'logit'; default is 'logit'
:param monotonicity: monotonicity is imposed on density difference for predictions below this number, float in [0, 1]
:param k_neighbours: difference is relaxed with median rolling window with size k_neighbours * 2 + 1,
default = int(preds[target == 1].shape[0] // 10)
:return: difference of densities f_p / f_u for U sample
"""
if kde_mode is None:
kde_mode = 'logit'
if (threshold is None) or (threshold == 'mid'):
threshold = preds[target == 1].mean() / 2 + preds[target == 0].mean() / 2
elif threshold == 'low':
threshold = preds[target == 0].mean()
elif threshold == 'high':
threshold = preds[target == 1].mean()
if k_neighbours is None:
k_neighbours = int(preds[target == 1].shape[0] // 20)
if kde_mode == 'prob':
kde_inner_fun = lambda x: x
kde_outer_fun = lambda dens, x: dens(x)
elif kde_mode == 'log_prob':
kde_inner_fun = lambda x: np.log(x)
kde_outer_fun = lambda dens, x: dens(np.log(x)) / (x + 10 ** -5)
elif kde_mode == 'logit':
kde_inner_fun = lambda x: np.log((x + 1e-7) / (1 - x + 10 ** -5))
kde_outer_fun = lambda dens, x: dens(np.log((x + 1e-7) / (1 - x + 10 ** -5))) / (x * (1 - x) + 10 ** -5)
if kde_type == 'kde':
if tune:
bw_mix = maximize_log_likelihood(preds[target == 1], kde_inner_fun, kde_outer_fun, kde_type=kde_type)
bw_pos = maximize_log_likelihood(preds[target == 0], kde_inner_fun, kde_outer_fun, kde_type=kde_type)
kde_mix = gaussian_kde(np.apply_along_axis(kde_inner_fun, 0, preds[target == 1]), bw_mix)
kde_pos = gaussian_kde(np.apply_along_axis(kde_inner_fun, 0, preds[target == 0]), bw_pos)
elif kde_type == 'hist':
if tune:
bins_mix = maximize_log_likelihood(preds[target == 1], kde_inner_fun, lambda kde, x: kde(x),
kde_type=kde_type)
bins_pos = maximize_log_likelihood(preds[target == 0], kde_inner_fun, lambda kde, x: kde(x),
kde_type=kde_type)
bars_mix = np.histogram(preds[target == 1], bins=bins_mix, range=(0, 1), density=True)[0]
bars_pos = np.histogram(preds[target == 0], bins=bins_pos, range=(0, 1), density=True)[0]
kde_mix = lambda x: bars_mix[np.clip((x // (1 / bins_mix)).astype(int), 0, bins_mix-1)]
kde_pos = lambda x: bars_pos[np.clip((x // (1 / bins_pos)).astype(int), 0, bins_pos-1)]
kde_outer_fun = lambda kde, x: kde(x)
# sorting to relax and impose monotonicity
sorted_mixed = np.sort(preds[target == 1])
diff = np.apply_along_axis(lambda x: kde_outer_fun(kde_pos, x) / (kde_outer_fun(kde_mix, x) + 10 ** -5), axis=0,
arr=sorted_mixed)
diff[diff > 50] = 50
diff = rolling_apply(diff, 5)
diff = np.append(
np.flip(np.maximum.accumulate(np.flip(diff[sorted_mixed <= threshold], axis=0)), axis=0),
diff[sorted_mixed > threshold])
diff = rolling_apply(diff, k_neighbours)
if MT:
MTrends = MonotonizingTrends(MT_coef=MT_coef)
diff = np.flip(np.array(MTrends.monotonize_array(np.flip(diff, axis=0), reset=True, decay_MT_coef=decay_MT_coef)), axis=0)
diff.sort()
diff = np.flip(diff, axis=0)
# desorting
diff = diff[np.argsort(np.argsort(preds[target == 1]))]
return diff
def estimate_poster_em(diff=None, mode='dedpul', converge=True, tol=10**-5, max_iterations=1000, \
nonconverge=True, step=0.001, max_diff=0.05, disp=False,
alpha=None, alpha_as_mean_poster=True, **kwargs):
"""
Performs Expectation-Maximization to estimate posteriors and priors alpha (if not provided) of N in U
with either of 'en' or 'dedpul' methods; both 'converge' and 'nonconverge' are recommended to be set True for
better estimate
:param diff: difference of densities f_p/f_u for the sample U, np.array (n,), output of estimate_diff()
:param preds: predictions of classifier, np.array with shape (n,)
:param target: binary vector, 0 if positive, 1 if unlabeled, np.array with shape (n,)
:param mode: 'dedpul' or 'en'; if 'dedpul', diff needs to be provided; if 'en', preds and target need to be provided
:param converge: True or False; True if convergence estimate should be computed
:param tol: tolerance of error between priors and mean posteriors, indicator of convergence
:param max_iterations: if exceeded, search of converged alpha stops even if tol is not reached
:param nonconverge: True or False; True if non-convergence estimate should be computed
:param step: gap between points of the [0, 1, step] gird to choose best alpha from
:param max_diff: alpha with difference of mean posteriors and priors bigger than max_diff cannot be chosen;
an heuristic to choose bigger alpha
:param plot: True or False, if True - plots ([0, 1, grid], mean posteriors - alpha) and
([0, 1, grid], second lag of (mean posteriors - alpha))
:param disp: True or False, if True - displays if the algorithm didn't converge
:param alpha: proportions of N in U; is estimated if None
:return: tuple (alpha, poster), e.g. (priors, posteriors) of N in U for the U sample
"""
assert converge + nonconverge, "At least one of 'converge' and 'nonconverge' has to be set to 'True'"
if alpha is not None:
if mode == 'dedpul':
alpha, poster = estimate_poster_dedpul(diff, alpha=alpha, alpha_as_mean_poster=alpha_as_mean_poster, tol=tol, **kwargs)
return alpha, poster
# if converge:
alpha_converge = 0
for i in range(max_iterations):
if mode.endswith('dedpul'):
_, poster_converge = estimate_poster_dedpul(diff, alpha=alpha_converge, **kwargs)
mean_poster = np.mean(poster_converge)
error = mean_poster - alpha_converge
if np.abs(error) < tol:
break
if np.min(poster_converge) > 0:
break
alpha_converge = mean_poster
if disp:
if i >= max_iterations - 1:
print('max iterations exceeded')
# if nonconverge:
errors = np.array([])
for alpha_nonconverge in np.arange(0, 1, step):
if mode.endswith('dedpul'):
_, poster_nonconverge = estimate_poster_dedpul(diff, alpha=alpha_nonconverge, **kwargs)
errors = np.append(errors, np.mean(poster_nonconverge) - alpha_nonconverge)
idx = np.argmax(np.diff(np.diff(errors))[errors[1: -1] < max_diff])
alpha_nonconverge = np.arange(0, 1, step)[1: -1][errors[1: -1] < max_diff][idx]
# if converge and not nonconverge:
# return alpha_converge, poster_converge
if ((alpha_nonconverge >= alpha_converge) or#converge and nonconverge and
(((errors < 0).sum() > 1) and (alpha_converge < 1 - step))):
return alpha_converge, poster_converge
elif nonconverge:
if mode == 'dedpul':
_, poster_nonconverge = estimate_poster_dedpul(diff, alpha=alpha_nonconverge, **kwargs)
if disp:
print('didn\'t converge')
return alpha_nonconverge, poster_nonconverge
# return np.mean(poster_nonconverge), poster_nonconverge
else:
if disp:
print('didn\'t converge')
return None, None
def estimate_poster_dedpul(diff, alpha=None, quantile=0.05, alpha_as_mean_poster=False, max_it=100, **kwargs):
"""
Estimates posteriors and priors alpha (if not provided) of N in U with dedpul method
:param diff: difference of densities f_p / f_u for the sample U, np.array (n,), output of estimate_diff()
:param alpha: priors, share of N in U (estimated if None)
:param quantile: if alpha is None, relaxation of the estimate of alpha;
here alpha is estimaeted as infinum, and low quantile is its relaxed version;
share of posteriors probabilities that we allow to be negative (with the following zeroing-out)
:param kwargs: dummy
:return: tuple (alpha, poster), e.g. (priors, posteriors) of N in U for the U sample, represented by diff
"""
if alpha_as_mean_poster and (alpha is not None):
poster = 1 - diff * (1 - alpha)
poster[poster < 0] = 0
cur_alpha = np.mean(poster)
if cur_alpha < alpha:
left_border = alpha
right_border = 1
else:
left_border = 0
right_border = alpha
poster_zero = 1 - diff
poster_zero[poster_zero < 0] = 0
if
|
np.mean(poster_zero)
|
numpy.mean
|
# The aim of this approach is to find for a given value of p1, p2 and moi, a value of P(lyso) that brings the MoI as close to 1 as possible. The logic is this: One way to achieve coexistence is to ensure that the MoI remains equal to 1 so that neither of the two species dominates the other.
# For each value of MoI at an interval of 0.01, calculate the range of average MoI (for 100 runs) of 1 period each for each of P(lyso) at intervals of 0.05; the optimal curve would pass through points that equal MoI = 1 or closest to it.
#####
# Warning: Ensure correct directory name below before run
#
# Usage: python alt_script.py <arg1> <arg2>
# where <arg1> and <arg2> represent the environemntal parameters p1 and p2
# for example, python alt_script.py 0.1 1.0 will run the code for the worst phage environment (p1=0.1) and the best bacterial environment (p2=1.0)
#####
import numpy as np
import os
import time
import sys
start_time = time.time()
iterations = 1000 # number of times the code runs for each value of MoI - use 1000 generally, 2000 if needed
moi_range = [x*0.01 for x in xrange(1,201)] # MoI between 0.01 and 2
plyso_range = [x*0.01 for x in xrange(1,101)] # plyso between 0.01 and 1
p1 = float(sys.argv[1]) # phage environment
p2 = float(sys.argv[2]) # bacterial environment
r = 1 # bacterial steady growth
a = 10 # amplification factor(burst size)
lambda_p = 1 # degradation factor for phage - phage die
lambda_b = 0.1 # degradation factor for bacteria - bacteria die and lysed bacteria are released as free phages
moi_arr = [0.0 for x in xrange(iterations)]
mean_moi = 0.0
# Ensure correct directory name before run
base_folder = 'alt_run'
for moi in moi_range:
for plyso in plyso_range:
for x in xrange(iterations):
# create the environment
r1 = np.random.rand()
if r1 < p1:
envp = 1
else:
envp = 0
r2 = np.random.rand()
if r2 < p2:
envb = 1
else:
envb = 0
# initialise populations
N_bh = 10000
N_pfree = N_bh*moi
N_bi = 0
if envp == 1 and envb == 1:
N_bh = long(max(N_bh + r*N_bh - N_pfree, 0)) #assume all phages affect one bacterium each (for now)
N_bi = long(N_bi + r*N_bi + plyso*N_pfree)
N_pfree = long(max(N_pfree + a*(1-plyso)*N_pfree - (plyso)*N_pfree, 0))
if envp == 1 and envb == 0:
N_bh = long(max(N_bh + r*N_bh - N_pfree, 0))
N_bi_temp = long((N_bi + plyso*N_pfree)*np.exp(-lambda_b))
N_pfree = long(max(N_pfree + a*(1-plyso)*N_pfree - (plyso)*N_pfree, 0))
N_bi = N_bi_temp
if envp == 0 and envb == 1:
N_bh = long(max(N_bh + r*N_bh - N_pfree, 0))
N_bi = long(N_bi + r*N_bi + plyso*N_pfree)
N_pfree = long(max(N_pfree + a*(1-plyso)*N_pfree - (plyso)*N_pfree,0)*
|
np.exp(-lambda_p)
|
numpy.exp
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#####################################
import sys
sys.path.append("..\\src")
import numpy
import matplotlib.pyplot as plt
import pandas as pd
from SIR_model import SIR
from SEIR_model import SEIR
from SEIR_plusCAQ_model import SEIR_plusCAQ
from SEIR_plusCQD_model import SEIR_plusCQD
from SEIR_plusBG_model import SEIR_plusBG
from SEIR_plus_model import SEIR_plus
# In[2]:
TS_data_confirm = pd.read_csv("..\\data\\jh_data\\csse_covid_19_data\\csse_covid_19_time_series\\time_series_covid19_confirmed_US.csv")
TS_data_death = pd.read_csv("..\\data\\jh_data\\csse_covid_19_data\\csse_covid_19_time_series\\time_series_covid19_deaths_US.csv")
TS_data_confirm.set_index('UID',inplace = True)
TS_data_death.set_index('UID',inplace = True)
Miami_dade_comfirm = TS_data_confirm.loc[84012086]
Miami_dade_death = TS_data_death.loc[84012086]
Miami_dade_comfirm = Miami_dade_comfirm.drop(labels=['iso2', 'iso3','code3','FIPS','Province_State','Country_Region','Lat','Long_','Combined_Key','Admin2'])
Miami_dade_comfirm = Miami_dade_comfirm[Miami_dade_comfirm!=0]
Miami_dade_death = Miami_dade_death.drop(labels=['iso2', 'iso3','code3','FIPS','Province_State','Country_Region','Lat','Long_','Combined_Key','Admin2'])
for i in range(len(Miami_dade_death)):
if Miami_dade_death.index[0] != Miami_dade_comfirm.index[0]:
Miami_dade_death = Miami_dade_death.drop([Miami_dade_death.index[0]])
BHSF_data = pd.read_excel("..\data\Ih_D.xlsx")
BHSF_data = BHSF_data.fillna(0)
BHSF_data.head(50)
#print(Miami_dade_comfirm)
#print(Miami_dade_death)
# real_data = BHSF_data['cumulative_pos_IP'].to_numpy()
# real_data = numpy.delete(real_data, [0,1,2,3,4])
# print(real_data)
# In[3]:
# init parameters
mean_latent_period = 5
mean_recovery_time = 14
double_time = 5.5
rate_of_growth = 2**(1.0/double_time) - 1.0
population = 2716940 # init N
number_of_current_hospitalized_patients = 50
infected = 278
hospital_market_share = 0.22 # % of people will come to your hospital
hospitalization_percent= (135/hospital_market_share)/7712
#number_of_all_infected =
social_distancing = 0 # %
ICU_rate = (135/532)*hospital_market_share
ventilated_percent = (85/532)*hospital_market_share
hospital_stay = 7
ICU_stay = 9.1
Ventilator_days = 11.6
duration_of_immunization = 60
death_rate = 0.022
t=numpy.linspace(0,80,80)
# N, beta1, beta2, sigma, alpha, rho, rho_icu, rho_v,
# lamda1, lamda2, lamda_icu, lamda_v, kappa
N = population #population
I = infected #(number_of_current_hospitalized_patients / hospital_market_share)/hospitalization_percent
E = I * mean_latent_period
R = 0
D = 7
S = N - E - I - R - D
Ih = number_of_current_hospitalized_patients/hospital_market_share
Im = I - Ih
Iicu = 15/hospital_market_share
Iv = 6/hospital_market_share
beta = (rate_of_growth + 1/mean_recovery_time)/S * (1- social_distancing)
date = pd.date_range(start="2020-03-23",end="2020-06-11",periods=80)
date = numpy.array(date)
date_real = pd.date_range(start="2020-03-23",end="2020-04-19",periods=28)
date_real = numpy.array(date_real)
# In[4]:
#SEIR+MHD
# param
beta1 = beta
beta2 = beta1 * 0.1
sigma = 1/duration_of_immunization
alpha = 1/mean_latent_period
rho = hospitalization_percent
rho_icu = ICU_rate
rho_v = ventilated_percent
lamda1 = 1/ mean_recovery_time
lamda2 = 1/ hospital_stay
lamda_icu = 1/ ICU_stay
lamda_v = 1/Ventilator_days
kappa = death_rate
data = [S, E, Im, Ih, Iicu, Iv, D, R]
SEIR_MHD = SEIR_plus(data, t, N, beta1, beta2, sigma, alpha, rho, rho_icu, rho_v,
lamda1, lamda2, lamda_icu, lamda_v, kappa)
solution = SEIR_MHD.solve()
# In[5]:
#realdata = numpy.array([155,168,166,167,166,170,155,160,156,151,147,145,135,135])
realdata = BHSF_data['cumulative_pos_IP'].to_numpy()
realdata =
|
numpy.delete(realdata, [0,1,2,3,4,5,6,7,8,9,10,11])
|
numpy.delete
|
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
from model.main import *
import json
import pandas as pd
import numpy as np
class DataStore():
model=None
model_month=None
sale_model=None
data = DataStore()
@app.route('/', methods=["GET"])
def home():
percent=percentageMethod()
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
with open('percent.json') as f:
file2 = json.load(f)
labels=file2['index']
data=file2['data']
if "username" in session:
return render_template('index.html', last_year=lastYear(), last_month=lastMonth(),dataset=data, label=labels, percent=percent,
month_index=month_index, month_data=month_data)
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
@app.route('/totalyear', methods=["GET"])
def total_year():
total_year=totalYear()
file1=pd.read_json('total_year.json',orient='index')
year_index=np.array(file1['year'])
year_data=np.array(file1['total'])
return render_template("total_year.html",year_index=year_index, year_data=year_data)
@app.route('/totalmonth', methods=["GET"])
def total_month():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
num=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
data=data1['total']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totalmonth', methods=["POST"])
def total_month_num():
total_month=totalMonth()
file1=pd.read_json('total_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_month"))
predict_rs, fitted_data=predict(data.model_month,num)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['total'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("total_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num=num)
def check_stationary():
total_month=totalMonth()
data1=total_month[['month_year','total']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','total']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','total']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["GET"])
def total_date():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
num=30
# Fit model
model_date=fit_model_date()
data.model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
data=data1['total']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','total']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/totaldate', methods=["POST"])
def total_date_num():
total_date=totalDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['total'])
#Get data
if request.method == "POST":
num = int(request.form.get("num_date"))
predict_rs_date, fitted_data_date=predict_date(data.model,num)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['total'])
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("total_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=data.model, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=totalDate()
data1=total_date[['date','total']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=6
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['total', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','total']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','total']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','total']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
return mse, rmse, mae, mape
@app.route('/revenueyear', methods=["GET"])
def revenue_year():
sale_year=saleYear()
year_index=np.array(sale_year['year'])
year_data=np.array(sale_year['quantity'])
return render_template("revenue_year.html",year_index=year_index, year_data=year_data)
@app.route('/revenuemonth', methods=["GET"])
def revenue_month():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
num_sale=6
# Fit model
model=fit_model()
data.model_month=model
predict_rs, fitted_data=predict(model,6)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=model, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def fit_model():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
data=data1['quantity']
stationary=check_stationary()
p=stationary[1]
if (p<0.05):
result1 = fit_model_stationary(data)
else:
result1 = fit_model_non_stationary(data)
return result1
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuemonth', methods=["POST"])
def revenue_month_num():
total_month=saleMonth()
file1=pd.read_json('sale_month.json',orient='index')
month_index=np.array(file1['month_year'])
month_data=np.array(file1['quantity'])
#Get data
if request.method == "POST":
num_sale= int(request.form.get("sale_month"))
predict_rs, fitted_data=predict(data.model_month,num_sale)
pred_index=np.array(predict_rs['month_year'])
pred_data=np.array(predict_rs['quantity'])
#Test model
test_rs= test(pred_data[0], fitted_data)
return render_template("revenue_month.html",month_index=month_index, month_data=month_data, stationary=check_stationary(), model=data.model_month, pred_index=pred_index, pred_data=pred_data, test_rs=test_rs, num_sale=num_sale)
def check_stationary():
total_month=saleMonth()
data1=total_month[['month_year','quantity']]
data1.set_index('month_year', inplace=True)
result=stationary(data1)
return result
def predict(model,num_predict):
if num_predict==0:
num_predict=6
fitted_month, confint_month = model.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data2=total_day[['date','quantity']]
data2.set_index('date', inplace=True)
date = pd.date_range(data2.index[-1], periods=num_predict, freq='MS')
fitted_seri_month = pd.Series(fitted_month, index=date)
dff=pd.DataFrame(fitted_seri_month)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['month_year'] = pd.to_datetime(dff['date']).dt.to_period('M')
pred=dff[['month_year','quantity']]
return pred, fitted_month
def test(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=np.sqrt(mse)
mae=np.mean(np.abs(e))
mape=np.mean(abs(e/y))
# print('Sai số bình phương trung bình MSE: {}'.format(mse))
# print('Root Mean Square Error: {}'.format(rmse))
# print('Mean Absolute Error: {}'.format(mae))
# print('Mean Absolute Percentage Error: {}'.format(mape))
return mse, rmse, mae, mape
@app.route('/revenuedate', methods=["GET"])
def revenue_date():
total_date=saleDate()
date_index=np.array(total_date['date'])
date_data=np.array(total_date['quantity'])
num=30
# Fit model
model_date=fit_model_date()
data.sale_model=model_date
predict_rs_date, fitted_data_date=predict_date(model_date,30)
pred_index_date=np.array(predict_rs_date['date'])
pred_data_date=np.array(predict_rs_date['quantity'])
#Test model
test_rs= test_date(pred_data_date[0], fitted_data_date)
return render_template("revenue_date.html",date_index=date_index, date_data=date_data, stationary=check_stationary_date(), model_date=model_date, pred_index=pred_index_date, pred_data=pred_data_date, test_rs=test_rs, num=num)
def check_stationary_date():
total_date=saleDate()
data1=total_date[['date','quantity']]
data1.set_index('date', inplace=True)
result=stationary_trend(data1)
return result
def fit_model_date():
total_date=saleDate()
data1=total_date[['date','quantity']]
data1.set_index('date', inplace=True)
data=data1['quantity']
result1 = fit_model_fast(data)
return result1
def predict_date(model_date, num_predict):
if num_predict==0:
num_predict=30
fitted_date, confint_date = model_date.predict(n_periods=num_predict, return_conf_int=True)
df2=df[['quantity', 'date']]
total_day = df2.groupby(['date'], as_index=False).sum().sort_values('date', ascending=True)
data=total_day[['date','quantity']]
data.set_index('date', inplace=True)
date = pd.date_range(data.index[-1], periods=num_predict)
fitted_seri_date = pd.Series(fitted_date, index=date)
dff=pd.DataFrame(fitted_seri_date)
dff=dff.reset_index()
dff.columns=['date','quantity']
dff['date'] = pd.to_datetime(dff['date']).dt.to_period('D')
pred=dff[['date','quantity']]
return pred, fitted_date
def test_date(y, yhat):
e = y-yhat
mse=np.mean(e**2)
rmse=
|
np.sqrt(mse)
|
numpy.sqrt
|
# Copyright 2018 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for distributions."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control.composer.variation import distributions
import numpy as np
RANDOM_SEED = 123
NUM_ITERATIONS = 100
def _make_random_state():
return np.random.RandomState(RANDOM_SEED)
class DistributionsTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self._variation_random_state = _make_random_state()
self._np_random_state = _make_random_state()
def testUniform(self):
lower, upper = [2, 3, 4], [5, 6, 7]
variation = distributions.Uniform(low=lower, high=upper)
for _ in range(NUM_ITERATIONS):
np.testing.assert_array_equal(
variation(random_state=self._variation_random_state),
self._np_random_state.uniform(lower, upper))
def testUniformChoice(self):
choices = ['apple', 'banana', 'cherry']
variation = distributions.UniformChoice(choices)
for _ in range(NUM_ITERATIONS):
self.assertEqual(
variation(random_state=self._variation_random_state),
self._np_random_state.choice(choices))
def testUniformPointOnSphere(self):
variation = distributions.UniformPointOnSphere()
samples = []
for _ in range(NUM_ITERATIONS):
sample = variation(random_state=self._variation_random_state)
self.assertEqual(sample.size, 3)
np.testing.assert_approx_equal(
|
np.linalg.norm(sample)
|
numpy.linalg.norm
|
# -*- coding: utf-8 -*-
# @Author: hliu.Luke
# @Date: 2020-07-31 11:18:22
# @Last Modified by: hliu.Luke
# @Last Modified time: 2020-08-03 15:17:32
import math
import torch
import numpy as np
# efficient version
def precision_recall_ndcg_at_k(k, rankedlist, test_matrix):
idcg_k = 0
dcg_k = 0
n_k = k if len(test_matrix) > k else len(test_matrix)
for i in range(n_k):
idcg_k += 1 / math.log(i + 2, 2)
b1 = rankedlist
b2 = test_matrix
s2 = set(b2)
hits = [ (idx, val) for idx, val in enumerate(b1) if val in s2]
count = len(hits)
#print(hits)
r =
|
np.array(rankedlist)
|
numpy.array
|
import numpy as np
import gym
from gym.spaces import Box, Dict
from mujoco_worldgen import Floor, WorldBuilder, Geom, ObjFromXML, WorldParams, Env
def update_obs_space(env, delta):
spaces = env.observation_space.spaces.copy()
for key, shape in delta.items():
spaces[key] = Box(-np.inf, np.inf, shape, np.float32)
return Dict(spaces)
def rand_pos_on_floor(sim, n=1):
world_size = sim.model.geom_size[sim.model.geom_name2id('floor0')] * 2
new_pos = np.random.uniform(np.array([[0.2, 0.2] for _ in range(n)]),
np.array([world_size[:2] - 0.2 for _ in range(n)]))
return new_pos
class GatherEnv(Env):
def __init__(self, n_food=3, horizon=200, n_substeps=10,
floorsize=4., deterministic_mode=False):
super().__init__(get_sim=self._get_sim,
get_obs=self._get_obs,
action_space=(-1.0, 1.0),
horizon=horizon,
deterministic_mode=deterministic_mode)
self.n_food = n_food
self.horizon = horizon
self.n_substeps = n_substeps
self.floorsize = floorsize
def _get_obs(self, sim):
qpos = sim.data.qpos.copy()
qvel = sim.data.qvel.copy()
qpos_qvel = np.concatenate([qpos, qvel], -1)
return {'qpos': qpos, 'qvel': qvel, 'qpos_qvel': qpos_qvel}
def _get_sim(self, seed):
if self.sim is None:
self.sim = self._get_new_sim(seed)
self.sim.data.qpos[0:2] = rand_pos_on_floor(self.sim)
return self.sim
def _get_new_sim(self, seed):
world_params = WorldParams(size=(self.floorsize, self.floorsize, 2.5),
num_substeps=self.n_substeps)
builder = WorldBuilder(world_params, seed)
floor = Floor()
builder.append(floor)
# Walls
wallsize = 0.1
wall = Geom('box', (wallsize, self.floorsize, 0.5), name="wall1")
wall.mark_static()
floor.append(wall, placement_xy=(0, 0))
wall = Geom('box', (wallsize, self.floorsize, 0.5), name="wall2")
wall.mark_static()
floor.append(wall, placement_xy=(1, 0))
wall = Geom('box', (self.floorsize - wallsize*2, wallsize, 0.5), name="wall3")
wall.mark_static()
floor.append(wall, placement_xy=(1/2, 0))
wall = Geom('box', (self.floorsize - wallsize*2, wallsize, 0.5), name="wall4")
wall.mark_static()
floor.append(wall, placement_xy=(1/2, 1))
# Add agents
obj = ObjFromXML("particle", name="agent0")
floor.append(obj)
obj.mark(f"object0")
# Add food sites
for i in range(self.n_food):
floor.mark(f"food{i}", (.5, .5, 0.05), rgba=(0., 1., 0., 1.))
sim = builder.get_sim()
# Cache constants for quicker lookup later
self.food_ids = np.array([sim.model.site_name2id(f'food{i}') for i in range(self.n_food)])
return sim
class FoodHealthWrapper(gym.Wrapper):
'''
Adds food health to underlying env.
Manages food levels.
Randomizes food positions.
'''
def __init__(self, env, max_food_health=10):
super().__init__(env)
self.unwrapped.max_food_health = self.max_food_health = max_food_health
self.unwrapped.max_food_size = self.max_food_size = 0.1
self.observation_space = update_obs_space(env,
{'food_obs': (self.unwrapped.n_food, 4),
'food_pos': (self.unwrapped.n_food, 3),
'food_health': (self.unwrapped.n_food, 1)})
def reset(self):
obs = self.env.reset()
# Reset food healths
self.unwrapped.food_healths = np.array([self.max_food_health
for _ in range(self.unwrapped.n_food)])
# Randomize food positions
new_pos = rand_pos_on_floor(self.unwrapped.sim, self.unwrapped.n_food)
sites_offset = (self.unwrapped.sim.data.site_xpos -
self.unwrapped.sim.model.site_pos).copy()
self.unwrapped.sim.model.site_pos[self.unwrapped.food_ids, :2] = \
new_pos - sites_offset[self.unwrapped.food_ids, :2]
# Reset food size
self.unwrapped.sim.model.site_size[self.unwrapped.food_ids] = self.max_food_size
return self.observation(obs)
def observation(self, obs):
# Add food position and healths to obersvations
food_pos = self.unwrapped.sim.data.site_xpos[self.unwrapped.food_ids]
food_health = self.unwrapped.food_healths
obs['food_pos'] = food_pos
obs['food_health'] = np.expand_dims(food_health, 1)
obs['food_obs'] = np.concatenate([food_pos, np.expand_dims(food_health, 1)], 1)
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
assert np.all(self.unwrapped.food_healths >= 0), \
f"There is a food health below 0: {self.unwrapped.food_healths}"
obs = self.observation(obs)
return obs, rew, done, info
class ProcessEatFood(gym.Wrapper):
"""
Manage food health. Resize food based on health.
Expects a binary vector as input detailing which
"""
def __init__(self, env, eat_thresh=0.7):
super().__init__(env)
self.n_food = self.unwrapped.n_food
self.eat_thresh = eat_thresh
def reset(self):
return self.env.reset()
def observation(self, obs):
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
obs = self.observation(obs)
# Eat food that is close enough
agent_food_diff = obs['food_pos'] - np.expand_dims(obs['qpos'], axis=0)
dist_to_food =
|
np.linalg.norm(agent_food_diff, axis=-1)
|
numpy.linalg.norm
|
"""
Contains function that implement the Clustermatch coefficient
(https://doi.org/10.1093/bioinformatics/bty899).
"""
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Iterable, Union
import numpy as np
from numpy.typing import NDArray
from numba import njit, get_num_threads
from numba.typed import List
from clustermatch.pytorch.core import unravel_index_2d
from clustermatch.sklearn.metrics import adjusted_rand_index as ari
from clustermatch.scipy.stats import rank
from clustermatch.utils import chunker
@njit(cache=True, nogil=True)
def _get_perc_from_k(k: int) -> list[float]:
"""
It returns the percentiles (from 0.0 to 1.0) that separate the data into k
clusters. For example, if k=2, it returns [0.5]; if k=4, it returns [0.25,
0.50, 0.75].
Args:
k: number of clusters. If less than 2, the function returns an empty
list.
Returns:
A list of percentiles (from 0.0 to 1.0).
"""
return [(1.0 / k) * i for i in range(1, k)]
@njit(cache=True, nogil=True)
def run_quantile_clustering(data: NDArray, k: int) -> NDArray[np.int16]:
"""
Performs a simple quantile clustering on one dimensional data (1d). Quantile
clustering is defined as the procedure that forms clusters in 1d data by
separating objects using quantiles (for instance, if the median is used, two
clusters are generated with objects separated by the median). In the case
data contains all the same values (zero variance), this implementation can
return less clusters than specified with k.
Args:
data: a 1d numpy array with numerical values.
k: the number of clusters to split the data into.
Returns:
A 1d array with the data partition.
"""
data_sorted = np.argsort(data, kind="quicksort")
data_rank = rank(data, data_sorted)
data_perc = data_rank / len(data)
percentiles = [0.0] + _get_perc_from_k(k) + [1.0]
cut_points = np.searchsorted(data_perc[data_sorted], percentiles, side="right")
current_cluster = 0
part = np.zeros(data.shape, dtype=np.int16) - 1
for i in range(len(cut_points) - 1):
lim1 = cut_points[i]
lim2 = cut_points[i + 1]
part[data_sorted[lim1:lim2]] = current_cluster
current_cluster += 1
return part
@njit(cache=True, nogil=True)
def _get_range_n_clusters(
n_features: int, internal_n_clusters: Iterable[int] = None
) -> NDArray[np.uint8]:
"""
Given the number of features it returns a tuple of k values to cluster those
features into. By default, it generates a tuple of k values from 2 to
int(np.round(np.sqrt(n_features))) (inclusive). For example, for 25 features,
it will generate this tuple: (2, 3, 4, 5).
Args:
n_features: a positive number representing the number of features that
will be clustered into different groups/clusters.
internal_n_clusters: it allows to force a different list of clusters. It
must be a list of integers. Repeated or invalid values will be dropped,
such as values lesser than 2 (a singleton partition is not allowed).
Returns:
A numpy array with integer values representing numbers of clusters.
"""
if internal_n_clusters is not None:
# remove k values that are invalid
clusters_range_list = list(
set([int(x) for x in internal_n_clusters if 1 < x < n_features])
)
else:
# default behavior if no internal_n_clusters is given: return range from
# 2 to sqrt(n_features)
n_sqrt = int(np.round(np.sqrt(n_features)))
n_sqrt = min((n_sqrt, 10))
clusters_range_list = list(range(2, n_sqrt + 1))
return np.array(clusters_range_list, dtype=np.uint16)
@njit(cache=True, nogil=True)
def _get_parts(data: NDArray, range_n_clusters: tuple[int]) -> NDArray[np.int16]:
"""
Given a 1d data array, it computes a partition for each k value in the given
range of clusters. This function only supports numerical data, and it
always runs run_run_quantile_clustering with the different k values.
If partitions with only one cluster are returned (singletons), then the
returned array will have negative values.
Args:
data: a 1d data vector. It is assumed that there are no nans.
range_n_clusters: a tuple with the number of clusters.
Returns:
A numpy array with shape (number of clusters, data rows) with
partitions of data.
"""
parts = np.zeros((len(range_n_clusters), data.shape[0]), dtype=np.int16)
for idx in range(len(range_n_clusters)):
k = range_n_clusters[idx]
parts[idx] = run_quantile_clustering(data, k)
# remove singletons
partitions_ks = np.array([len(np.unique(p)) for p in parts])
parts[partitions_ks == 1, :] = -1
return parts
def cdist_parts_basic(x: NDArray, y: NDArray) -> NDArray[float]:
"""
It implements the same functionality in scipy.spatial.distance.cdist but
for clustering partitions, and instead of a distance it returns the adjusted
Rand index (ARI). In other words, it mimics this function call:
cdist(x, y, metric=ari)
Args:
x: a 2d array with m_x clustering partitions in rows and n objects in
columns.
y: a 2d array with m_y clustering partitions in rows and n objects in
columns.
Returns:
A 2d array with m_x rows and m_y columns and the ARI between each
partition pair. Each ij entry is equal to ari(x[i], y[j]) for each i
and j.
"""
res = np.zeros((x.shape[0], y.shape[0]))
for i in range(res.shape[0]):
for j in range(res.shape[1]):
res[i, j] = ari(x[i], y[j])
return res
def cdist_parts_parallel(
x: NDArray, y: NDArray, executor: ThreadPoolExecutor
) -> NDArray[float]:
"""
It parallelizes cdist_parts_basic function.
Args:
x: same as in cdist_parts_basic
y: same as in cdist_parts_basic
executor: an pool executor where jobs will be submitted.
Results:
Same as in cdist_parts_basic.
"""
res = np.zeros((x.shape[0], y.shape[0]))
inputs = list(chunker(np.arange(res.shape[0]), 1))
tasks = {executor.submit(cdist_parts_basic, x[idxs], y): idxs for idxs in inputs}
for t in as_completed(tasks):
idx = tasks[t]
res[idx, :] = t.result()
return res
@njit(cache=True, nogil=True)
def get_coords_from_index(n_obj: int, idx: int) -> tuple[int]:
"""
Given the number of objects and and index, it returns the row/column
position of the pairwise matrix. For example, if there are n_obj objects
(such as genes), a condensed 1d array can be created with pairwise
comparisons between genes, as well as a squared symmetric matrix. This
function receives the number of objects and the index of the condensed
array, and returns the coordiates of the squared symmetric matrix.
Args:
n_obj: the number of objects.
idx: the index of the condensed pairwise array across all n_obj objects.
Returns
A tuple (i, j) with the coordinates of the squared symmetric matrix
equivalent to the condensed array.
"""
b = 1 - 2 * n_obj
x = np.floor((-b - np.sqrt(b ** 2 - 8 * idx)) / 2)
y = idx + x * (b + x + 2) / 2 + 1
return int(x), int(y)
def to_numpy(x):
"""
Converts x into a numpy array. It is used to convert pandas Series and
DataFrames into numpy objects.
"""
if x is None:
return x
func = getattr(x, "to_numpy", None)
if not callable(func):
return x
return x.to_numpy()
def get_chunks(
iterable: Union[int, Iterable], n_threads: int, ratio: float = 1
) -> Iterable[Iterable[int]]:
"""
It splits elements in an iterable in chunks according to the number of
CPU cores available for parallel processing.
Args:
iterable: an iterable to be split in chunks. If it is an integer, it
will split the iterable given by np.arange(iterable).
n_threads: number of threads available for parallelization.
ratio: a ratio that allows to increase the number of splits given
n_threads. For example, with ratio=1, the function will just split
the iterable in n_threads chunks. If ratio is larger than 1, then
it will split in n_threads * ratio chunks.
Results:
Another iterable with chunks according to the arguments given. For
example, if iterable is [0, 1, 2, 3, 4, 5] and n_threads is 2, it will
return [[0, 1, 2], [3, 4, 5]].
"""
if isinstance(iterable, int):
iterable = np.arange(iterable)
n = len(iterable)
expected_n_chunks = n_threads * ratio
res = list(chunker(iterable, int(np.ceil(n / expected_n_chunks))))
while len(res) < expected_n_chunks <= n:
# look for an element in res that can be split in two
idx = 0
while len(res[idx]) == 1:
idx = idx + 1
new_chunk = get_chunks(res[idx], 2)
res[idx] = new_chunk[0]
res.insert(idx + 1, new_chunk[1])
return res
def cm(
x: NDArray,
y: NDArray = None,
internal_n_clusters: Iterable[int] = None,
return_parts: bool = False,
n_chunks_threads_ratio: int = 3,
) -> tuple[NDArray[float], NDArray[np.uint64], NDArray[np.int16]]:
"""
This is the main function that computes the Clustermatch coefficient between
two arrays. This implementation only supports numerical data for
optimization purposes, but the original implementation can also work with
categorical data (https://github.com/sinc-lab/clustermatch).
To control the number of threads used, set the NUMBA_NUM_THREADS variable
to an integer. For example, NUMBA_NUM_THREADS=2 will use 2 cores.
Args:
x: an 1d or 2d numerical array with the data. NaN are not supported.
If it is 2d, then the coefficient is computed for each pair of rows.
y: an optional 1d numerical array. If x is 1d and y is given, it computes
the coefficient between x and y.
internal_n_clusters: a list of integer values indicating the number of
clusters used to split x and y.
return_parts: if True, for each object pair, it returns the partitions
that maximized the coefficient.
n_chunks_threads_ratio: allows to modify how pairwise comparisons are
split across different threads. It's given as the ratio parameter of
function get_chunks.
Returns:
If return_parts is False, only Clustermatch coefficients are returned.
In that case, if x is 2d, a np.ndarray of size n x n is
returned with the coefficient values, where n is the number of rows in x.
If only a single coefficient was computed (for example, x and y were
given as 1d arrays each), then a single scalar is returned.
If returns_parts is True, then it returns a tuple with three values:
1) the
coefficients, 2) the partitions indexes that maximized the coefficient
for each object pair, and 3) the partitions for all objects.
cm_values: if x is 2d, then it is a 1d condensed array of pairwise
coefficients. It has size (n * (n - 1)) / 2, where n is the number
of rows in x. If x and y are given, and they are 1d, then this is a
scalar. The Clustermatch coefficient is always between 0 and 1
(inclusive). If any of the two variables being compared has no
variation (all values are the same), the coefficient is not defined
(np.nan).
max_parts: an array with n * (n - 1)) / 2 rows (one for each object
pair) and two columns. It has the indexes pointing to each object's
partition (parts, see below) that maximized the ARI. If
cm_values[idx] is nan, then max_parts[idx] will be meaningless.
parts: a 3d array that contains all the internal partitions generated
for each object in data. parts[i] has the partitions for object i,
whereas parts[i,j] has the partition j generated for object i. The
third dimension is the number of columns in x (if 2d) or elements in
x/y (if 1d). For example, if you want to access the pair of
partitions that maximized the Clustermatch coefficient given x and y
(a pair of objects), then max_parts[0] and max_parts[1] have the
partition indexes in parts, respectively: parts[0][max_parts[0]]
points to the partition for x, and parts[1][max_parts[1]] points to
the partition for y.
"""
x = to_numpy(x)
y = to_numpy(y)
if x.ndim == 1 and y is not None:
assert x.shape == y.shape
X = np.zeros((2, x.shape[0]))
X[0, :] = x
X[1, :] = y
elif x.ndim == 2:
X = x
else:
raise ValueError("Wrong combination of parameters x and y")
# get number of cores to use
default_n_threads = get_num_threads()
if internal_n_clusters is not None:
_tmp_list = List()
for x in internal_n_clusters:
_tmp_list.append(x)
internal_n_clusters = _tmp_list
# get matrix of partitions for each object pair
range_n_clusters = _get_range_n_clusters(X.shape[1], internal_n_clusters)
# store a set of partitions per row (object) in X as a multidimensional
# array:
# - 1st dim: number of objects/rows in X
# - 2nd dim: number of partitions per object
# - 3rd dim: number of features per object (columns in X)
parts = np.zeros(
(X.shape[0], range_n_clusters.shape[0], X.shape[1]), dtype=np.int16
)
# cm_values stores the clustermatch coefficients
n = X.shape[0]
n_comp = (n * (n - 1)) // 2
cm_values = np.full(n_comp, np.nan)
# for each object pair being compared, max_parts has the indexes of the
# partitions that maximimized the ARI
max_parts = np.zeros((n_comp, 2), dtype=np.uint64)
with ThreadPoolExecutor(max_workers=default_n_threads) as executor:
# pre-compute the internal partitions for each object in parallel
inputs = get_chunks(n, default_n_threads, n_chunks_threads_ratio)
def compute_parts(idxs):
return np.array([_get_parts(X[i], range_n_clusters) for i in idxs])
for idx, ps in zip(inputs, executor.map(compute_parts, inputs)):
parts[idx] = ps
# Below, there are two layers of parallelism: 1) parallel execution
# across object pairs and 2) the cdist_parts_parallel function, which
# also runs several threads to compare partitions using ari. In 2) we
# need to disable parallelization in case len(cm_values) > 1 (that is,
# we have several object pairs to compare), because parallelization is
# already performed at this level. Otherwise, more threads than
# specified by the user are started.
cdist_parts_enable_threading = True if n_comp == 1 else False
cdist_func = None
map_func = executor.map
if cdist_parts_enable_threading:
map_func = map
def cdist_func(x, y):
return cdist_parts_parallel(x, y, executor)
else:
cdist_func = cdist_parts_basic
# compute coefficients
def compute_coef(idx_list):
"""
Given a list of indexes representing each a pair of
objects/rows/genes, it computes the Clustermatch coefficient for
each of them. This function is supposed to be used to parallelize
processing.
Args:
idx_list: a list of indexes (integers), each of them
representing a pair of objects.
Returns:
Returns a tuple with two arrays. These two arrays are the same
arrays returned by the main cm function (cm_values and
max_parts) but for a subset of the data.
"""
n_idxs = len(idx_list)
max_ari_list = np.full(n_idxs, np.nan, dtype=float)
max_part_idx_list = np.zeros((n_idxs, 2), dtype=np.uint64)
for idx, data_idx in enumerate(idx_list):
i, j = get_coords_from_index(n, data_idx)
# get partitions for the pair of objects
obji_parts, objj_parts = parts[i], parts[j]
# compute ari only if partitions are not marked as "missing"
# (negative values)
if obji_parts[0, 0] < 0 or objj_parts[0, 0] < 0:
continue
# compare all partitions of one object to the all the partitions
# of the other object, and get the maximium ARI
comp_values = cdist_func(
obji_parts,
objj_parts,
)
max_flat_idx = comp_values.argmax()
max_idx = unravel_index_2d(max_flat_idx, comp_values.shape)
max_part_idx_list[idx] = max_idx
max_ari_list[idx] =
|
np.max((comp_values[max_idx], 0.0))
|
numpy.max
|
import os, re
import numpy as np
import matplotlib.pyplot as plt
def prune( d, pattern=None, types=None ):
"""
@author: <NAME>
Delete dictionary keys with specified name pattern or types
Default types are: functions and modules.
>>> prune( {'a': 0, 'a_': 0, '_a': 0, 'a_a': 0, 'b': prune} )
{'a_a': 0}
"""
if pattern == None:
pattern = '(^_)|(_$)|(^.$)'
if types is None:
types = type( re ), type( re.sub )
grep = re.compile( pattern )
d2 = d.copy()
for k in d.keys():
if grep.search( k ) or type( d[k] ) in types:
del( d2[k] )
return d2
def parse( fd, prune_pattern=None, prune_types=None ):
"""
@author: <NAME> (modified by <NAME>)
parse vars in python file into new dict. ignore types and modules
through prune types.
inputs
fd (string or file) : full_path or file
returns
parameters (dict) : containing parameter and its value.
"""
d = {}
if isinstance(fd, str):
fd = open( os.path.expanduser( fd ) )
exec(fd.read(), d)
prune( d, prune_pattern, prune_types )
return d
def mw_to_m0( mw ):
""" converts moment magnitude to moment using eq. 9.73 in shearer 2009
inputs
mw (float) : moment magnitude
return
m0 (float): seismic moment
"""
return 10**(1.5*mw+9.1)
def poly_area(x,y):
""" vectorized implementation of shoelace formula.
inputs
x (ndarray) : x coordinates of polygon
y (ndarray) : y coordinates of polygon
type : 'green' or 'shoe' indicating either using greens theorem or the shoelace formula
returns
area (float) : area of polygons in units of (x,y)
"""
return 0.5*np.absolute(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def poly_area_bbox(x,y):
""" compute area based on bounding box around set of polygons.
inputs
x (ndarray) : x coordinates of polygon
y (ndarray) : y coordinates of polygon
returns
area (float) : area of polygon in units of (x,y)
"""
minx = np.min(x)
maxx = np.max(x)
miny =
|
np.min(y)
|
numpy.min
|
import os
import numpy as np
from pathlib import Path
from typing import Callable, List, Dict
from haco.DIDrive_core.utils.data_utils.data_writter import write_json, write_episode_lmdb
from haco.DIDrive_core.utils.others.image_helper import save_image, is_image
from haco.DIDrive_core.data import BenchmarkDatasetSaver
class CICTBenchmarkDatasetSaver(BenchmarkDatasetSaver):
def __init__(self, save_dir: str, obs_cfg: Dict, post_process_fn: Callable = None):
super().__init__(save_dir, obs_cfg, post_process_fn)
def save_episodes_data(self, episodes_data: List, start_episode: int):
"""
Save data from several episodes sampled from collector, with 'env_param' and 'data' key
saved in each episode.
:Arguments:
- episode_count (int): Start count of episode to save.
- episodes_data (List): Saved data of episodes.
"""
for episode, episode_data in enumerate(episodes_data):
data = list()
episode_path = Path(self._save_dir).joinpath('episode_%05d' % (start_episode + episode))
CICTBenchmarkDatasetSaver._make_episode_path(episode_path, episode_data['env_param'])
for idx, frame_data in enumerate(episode_data['data']):
observations = frame_data['obs']
actions = frame_data['action']
if 'real_steer' not in actions:
actions['real_steer'] = actions['steer']
actions['real_throttle'] = actions['throttle']
actions['real_brake'] = actions['brake']
measurements = [
observations['tick'],
observations['timestamp'],
observations['forward_vector'],
observations['acceleration'],
observations['location'],
observations['speed'],
observations['command'],
observations['velocity'],
observations['angular_velocity'],
observations['rotation'],
actions['steer'],
actions['throttle'],
actions['brake'],
actions['real_steer'],
actions['real_throttle'],
actions['real_brake'],
observations['tl_state'],
observations['tl_dis'],
]
measurements = [x if x.shape != () else np.float32([x]) for x in measurements]
measurements =
|
np.concatenate(measurements, 0)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 22 11:27:32 2019
@author: pjdudenas
"""
import fabio
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size':22})
class reduction:
def __init__(self,name):
self.name = name
def geometry(self,SDD,center_x, center_y, xpixels = 1475, ypixels = 1679,
pixel_size = .172,wavelength = .123894):
self.SDD = SDD
self.center_x = center_x
self.center_y = center_y
self.xpixels = xpixels
self.ypixels = ypixels
self.pixel_size = pixel_size
self.qpx = 2*np.pi*(pixel_size*10e6)/(self.SDD*10e6*wavelength)
self.qp = (np.arange(1,xpixels+1)-center_x)*self.qpx
self.qz = -(np.arange(1,ypixels+1)-center_y)*self.qpx
self.xran = np.arange(self.center_x,self.xpixels)
self.yran = np.arange(0,self.center_y+1)
self.wavelength = wavelength
def load(self):
self.data = np.array(fabio.open(self.name).data,dtype=float)
self.data[self.data < 1] = 1
def raw_plot(self,size=(12,9)):
fig, ax = plt.subplots(figsize=size)
plt.imshow(np.log(self.data),cmap='jet')
plt.colorbar()
plt.show()
def plot(self,size=(9,9),show_cbar='False'):
fig, ax = plt.subplots(figsize=size)
plt.imshow(np.log(self.data),cmap='jet',extent=[self.qp[0],self.qp[-1],self.qz[-1],self.qz[0]],
aspect='auto')
plt.xlim([-2,2])
plt.ylim([0,4])
plt.yticks(np.arange(1,5))
plt.xticks(np.arange(-2,3))
# tick_loc, tick_label = plt.yticks()
# ax.set_yticklabels(map(str,(np.abs(tick_loc))))
plt.xlabel(r'$q_p$ $[nm^{-1}]$')
plt.ylabel(r'$q_z$ $[nm^{-1}]$')
if show_cbar == 'True':
plt.colorbar()
plt.show()
def qp_linecut(self,ypixel1=1200,ypixel2=1215):
I = np.mean(self.data[ypixel1:ypixel2,self.xran],axis=0)
qp = self.qp[self.xran]
# print(self.xran.shape)
return qp, I
def qz_linecut(self,xpixel1=650,xpixel2=660):
I = np.mean(self.data[self.yran,xpixel1:xpixel2],axis=1)
qz = self.qz[self.yran]
return qz, I
def SRFconvert(self,alphai):
self.alphai = alphai*np.pi/180
ypos = (self.center_y - np.arange(1,self.ypixels+1))*self.pixel_size
ypos = ypos.reshape(len(ypos),1)
xpos = (np.arange(1,self.xpixels+1)-self.center_x)*self.pixel_size
xpos = xpos.reshape((1,len(xpos)))
gamma = np.arctan(xpos/self.SDD)
delta = np.arctan(ypos/np.sqrt(self.SDD**2+xpos**2))
qx = 2*np.pi/self.wavelength*(-np.cos(delta)*np.sin(gamma));
qy = 2*np.pi/self.wavelength*(np.cos(self.alphai)*(np.cos(delta)*np.cos(gamma)-1)+np.sin(self.alphai)*np.sin(delta));
self.qz = 2*np.pi/self.wavelength*(np.cos(self.alphai)*np.sin(delta) + np.sin(self.alphai)*(1 - np.cos(delta)*np.cos(gamma)));
self.q =
|
np.sqrt(qx**2+qy**2+self.qz**2)
|
numpy.sqrt
|
"""
GUI code modified based on https://github.com/miili/StreamPick
For earthquake PKiKP coda quality evaluation and stack
"""
import os
import pickle
import pandas as pd
import numpy as np
# GUI import
import PyQt5
from PyQt5 import QtGui
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sys
import signal
import scipy
import gpar
from gpar.util import util
from itertools import cycle
#figure plot import
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.transforms import offset_copy
from matplotlib.widgets import RectangleSelector
import matplotlib.colors as mcolors
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import shiftgrid
# from mpl_toolkits.axesgrid1 import make_axes_locatable
#obspy import
from obspy.taup import TauPyModel
import obspy
from obspy.core.trace import Trace
from obspy.core.stream import Stream
from obspy.core import read
from obspy.core import AttribDict
signal.signal(signal.SIGINT, signal.SIG_DFL)
# color = list(mcolors.cnames.values())
color = ['red', 'blue', 'green','yellow','cyan','magenta','purple']
#class for event first evaluation
class glanceEQ(QtWidgets.QMainWindow):
def __init__(self, array=None, parent=None, ap=None):
if ap is None:
self.qApp = QtWidgets.QApplication(sys.argv)
else:
self.qApp = ap
self.KeepGoing = False
if isinstance(array, str):
ar = util.loadArray(array)
elif isinstance(array, gpar.arrayProcess.Array):
ar = array
else:
msg = 'Define Array instance = gpar.arrayPropocess.Array() or a path to a pickle file'
raise ValueError(msg)
self.array = ar
self.eve_type = ['A','B','C','D']
self._shortcuts = {'eve_next': 'n',
'eve_prev': 'p',
'trim_apply': 'w',
'gain_up': 'u',
'gain_down': 'd',
'strip': 's',
'A':'a',
'B':'b',
'C':'c',
'D':'d'}
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = None
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,
'smooth':4.0,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QtWidgets.QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.show()
def _killLayout():
pass
def _initEqList(self):
self._eqlist = []
for _eve in self._events:
self._eqlist.append(_eve.ID)
self._eqlist.sort()
def _initPlots(self):
self.fig = Figure(facecolor='.86',dpi=100, frameon=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
# connect the events
self.fig.canvas.mpl_connect('scroll_event', self._pltOnScroll)
self.fig.canvas.mpl_connect('motion_notify_event', self._pltOnDrag)
self.fig.canvas.mpl_connect('button_release_event', self._pltOnButtonRelease)
def _initMenu(self):
# Next and Prev Earthquake
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['eve_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextEvent)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QPushButton('Prev >>',
shortcut=self._shortcuts['eve_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevEvent)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Earthquake drop-down
self.evecb = QComboBox(self)
for eve in self._eqlist:
self.evecb.addItem(eve)
self.evecb.activated.connect(self._pltEvent)
self.evecb.setMaximumWidth(1000)
self.evecb.setMinimumWidth(80)
# coda strip button
self.codabtn = QtWidgets.QPushButton('Strip',
shortcut=self._shortcuts['strip'],parent=self.main_widget)
self.codabtn.setToolTip('shortcut <b>s</b>')
self.codabtn.clicked.connect(self._appStrip)
self.codacb = QComboBox(self)
for med in ['all', 'coda','twoline']:
self.codacb.addItem(med)
self.codacb.activated.connect(self._selectMethod)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.wincb = QComboBox(self)
self.wincb.activated.connect(self._changeStrip)
self._updateWindow()
# edit/delete coda selected window
winEdit = QtWidgets.QPushButton('Coda Window')
winEdit.resize(winEdit.sizeHint())
winEdit.clicked.connect(self._editTimeWindow)
winDelt = QtWidgets.QPushButton('Delete')
winDelt.resize(winDelt.sizeHint())
winDelt.clicked.connect(self._deleteWin)
# Coda level
_radbtn = []
for _o in self.eve_type:
_radbtn.append(QRadioButton(_o.upper(), shortcut=self._shortcuts[_o.upper()]))
_radbtn[-1].setToolTip('Level: '+_o)
self.levelGrp = QButtonGroup()
self.levelGrp.setExclusive(True)
levelbtn = QHBoxLayout()
for _i, _btn in enumerate(_radbtn):
self.levelGrp.addButton(_btn, _i)
levelbtn.addWidget(_btn)
# plot slide beam figure button
self.sbcb = QComboBox(self)
for btype in ['beam', 'slide', 'vespetrum','strip']:
self.sbcb.addItem(btype)
self.sbcb.activated.connect(self._updatePlot)
self.vepcb = QComboBox(self)
for scale in ['log10', 'log','sqrt','beam']:
self.vepcb.addItem(scale)
self.vepcb.activated.connect(self._updatePlot )
self.vepcb.setEnabled(False)
self.codacb.setMaximumWidth(100)
self.codacb.setMinimumWidth(80)
self.ampmin = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=1)
self.ampmax = QDoubleSpinBox(decimals=1, maximum=5, minimum=-2, singleStep=.5, value=3)
self.ampmin.valueChanged.connect(self._updatePlot)
self.ampmax.valueChanged.connect(self._updatePlot)
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
# self._initAmp()
self.sbcb.activated.connect(self._activeAmp)
self.ttbtn = QtWidgets.QPushButton('Phases', parent=self.main_widget)
self.ttbtn.setCheckable(True)
self.ttbtn.clicked.connect(self._updatePlot)
# Arrange buttons
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Event'))
self.btnbar.addWidget(self.evecb)
##
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.codabtn)
self.btnbar.addWidget(self.codacb)
self.btnbar.addWidget(self.wincb)
self.btnbar.addWidget(winEdit)
self.btnbar.addWidget(winDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Level: '))
self.btnbar2.addLayout(levelbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('TYPE'))
self.btnbar2.addWidget(self.sbcb)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Scale'))
self.btnbar2.addWidget(self.vepcb)
self.btnbar2.addWidget(QLabel('AMP'))
self.btnbar2.addWidget(self.ampmin)
self.btnbar2.addWidget(self.ampmax)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ttbtn)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save', self._saveFile)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save as', self._saveFileFormat)
fileMenu.addSeparator()
fileMenu.addAction(QIcon().fromTheme('document-open'),
'Load array', self._openArray)
fileMenu.addAction(QtGui.QIcon().fromTheme('document-open'),
'Load Strip Pickle File', self._openFile)
fileMenu.addSeparator()
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Plot', self._savePlot)
fileMenu.addSeparator()
quit = QAction(QIcon().fromTheme('application-exit')," &Exit", self)
fileMenu.addAction(quit)
fileMenu.triggered[QAction].connect(self.closeArray)
def _hardExist(self):
self.deleteLater()
def _activeAmp(self):
if self.sbcb.currentText() == 'vespetrum':
self.ampmin.setEnabled(True)
self.ampmax.setEnabled(True)
self.vepcb.setEnabled(True)
if self.vepcb.currentText() == 'beam':
self.ampmax.setMaximum(100000)
# self.ampmax.setValue(1000)
self.ampmax.setSingleStep(500)
# self.ampmin.setValue(10)
self.ampmin.setMaximum(100000)
self.ampmin.setSingleStep(500)
elif self.vepcb.currentText() == 'sqrt':
self.ampmax.setMaximum(300)
# self.ampmax.setValue(30)
self.ampmax.setSingleStep(5)
# self.ampmin.setValue(3)
self.ampmin.setMaximum(300)
self.ampmin.setSingleStep(5)
elif self.vepcb.currentText() == 'log':
self.ampmax.setMaximum(12)
# # self.ampmax.setValue(7)
self.ampmax.setSingleStep(1)
# # self.ampmin.setValue(2)
self.ampmin.setMaximum(12)
self.ampmin.setSingleStep(1)
elif self.vepcb.currentText() == 'log10':
self.ampmax.setSingleStep(0.5)
self.ampmin.setSingleStep(0.5)
self.ampmax.setMaximum(5)
self.ampmin.setMaximum(5)
else:
self.ampmin.setEnabled(False)
self.ampmax.setEnabled(False)
self.vepcb.setEnabled(False)
def _createStatusBar(self):
"""
Creates the status bar
"""
sb =QStatusBar()
sb.setFixedHeight(18)
self.setStatusBar(sb)
self.statusBar().showMessage('Ready')
def _selectMethod(self, index):
self._method = self.codacb.currentText()
self.sbcb.setCurrentIndex(3)
self._updatePlot()
def _changeStrip(self,index):
if index == len(self.trinWin):
return self._newTrim()
else:
return self._appStrip()
def _newTrim(self):
"""
Creat new strip window
"""
newWin = self.defWindow(self)
if newWin.exec_():
self.trinWin.append(newWin.getValues())
self._updateWindow()
self.wincb.setCurrentIndex(len(self.trinWin)-1)
self._appStrip()
def _editTimeWindow(self):
"""
Edit existing coda selection window
"""
_i = self.wincb.currentIndex()
this_window = self.trinWin[_i]
editWindow = self.defWindow(self, this_window)
if editWindow.exec_():
self.trinWin[_i] = editWindow.getValues()
self._updateWindow()
self.wincb.setCurrentIndex(_i)
self._appStrip()
def _deleteWin(self):
"""
Delete window
"""
pass
_i = self.wincb.currentIndex()
def _updateWindow(self):
self.wincb.clear()
self.wincb.setCurrentIndex(-1)
for _i, _f in enumerate(self.trinWin):
self.wincb.addItem('Noise %.2f sec - Coda %.2f sec' %(_f['noise'], _f['coda']))
self.wincb.addItem('Create new Window')
def _appStrip(self, button=True, draw=True):
"""
Apply coda strip
"""
_method = self.codacb.currentText()
_j = self.wincb.currentIndex()
self._eventInfo(self._current_id)
self._current_strip = True
spts = int(self.trinWin[_j]['smooth'] / self._current_delta )
codaStrip(self._current_event, method=_method, window=spts,
siglen=self.trinWin[_j]['coda'], noise=self.trinWin[_j]['noise'],beamphase=self.beamphase,
model=self.trinWin[_j]['model'], stime=self.trinWin[_j]['stime'], etime=self.trinWin[_j]['etime'],)
self._btype = 'strip'
self.sbcb.setCurrentIndex(3)
self._setCodaStrip()
self._updatePlot()
def _pltEvent(self):
"""
Plot event from DropDown Menu
"""
_i = self.evecb.currentIndex()
while next(self._eventCycle) != self._eqlist[_i]:
pass
self._eventInfo(self._eqlist[_i])
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip=True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
self._drawFig()
def _pltPrevEvent(self):
"""
Plot previous events
"""
_j = self.evecb.currentIndex()
for _i in range(len(self._eqlist) - 1):
prevEvent = next(self._eventCycle)
self._eventInfo(prevEvent)
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
if _j == 0:
_n = len(self.evecb) - 1
self.evecb.setCurrentIndex(_n)
else:
self.evecb.setCurrentIndex(_j-1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _pltNextEvent(self):
_id = self._current_event.ID
level = self.eve_type[self.levelGrp.checkedId()]
if level == 'D':
self._current_strip = True
self._setCodaStrip()
else:
# if len(self._stripDF) != 0:
# existDF = self._stripDF[(self._stripDF.ID == _id)]
# else:
# existDF = pd.DataFrame()
# if len(existDF) == 0:
if not self._current_strip:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice is QMessageBox.Yes:
self._current_strip = True
self._appStrip()
return
self._eventInfo(next(self._eventCycle))
self._current_strip = False
_id = self._current_event.ID
if len(self._stripDF) != 0:
existDF = self._stripDF[self._stripDF.ID == _id]
else:
existDF = pd.DataFrame()
if len(existDF) != 0:
level = existDF.Level.iloc[0]
ind = self.eve_type.index(level)
self.levelGrp.button(ind).setChecked(True)
self._current_strip = True
else:
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == _id]
if len(_badDF) != 0:
self.levelGrp.button(3).setChecked(True)
self._current_strip = True
_i = self.evecb.currentIndex()
if _i == len(self.evecb) - 1:
self.evecb.setCurrentIndex(0)
else:
self.evecb.setCurrentIndex(_i+1)
if self._btype == 'strip':
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._drawFig()
def _eventInfo(self, eqid):
"""
Copies the array process result from the current Earthquake object
"""
for eve in self._events:
if eve.ID == eqid:
event = eve
self._current_event = event
self.beamphase = event.beamphase
self._current_id = eqid
if not hasattr(event, 'beam'):
return
self._current_beam = event.beam
filts = {}
for tr in self._current_beam:
filts[tr.stats.station] = tr.stats.channel
self._current_filts = filts
self._current_ID = event.ID
self._current_dis = event.dis
self._current_p = event.rayp
self._current_bb = event.bb
self._current_bakAz = event.baz
self._current_delta = event.delta
if hasattr(event, 'slideSt'):
self._current_slide = event.slideSt
if hasattr(event, 'energy'):
self._current_energy = event.energy
self._current_time = event.slantTime
self._current_K = event.slantK
self._current_type = event.slantType
def _setCodaStrip(self):
if not self._current_strip:
return
event = self._current_event
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) !=0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) !=0:
choice = QMessageBox.question(self, 'Replace stripping',
"Do you want to replace existed stripping?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = existDF.index
self._stripDF.drop(index,axis=0,inplace=True)
self._stripDF.reset_index(inplace=True, drop=True)
else:
return
if len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad Event',
"Want to replace it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
else:
return
level = self.eve_type[self.levelGrp.checkedId()]
ID = event.ID
lat = event.lat
lon = event.lon
dep = event.dep
mw = event.mw
dis = event.dis
bb = event.bb
bakAzi = event.baz
delta = event.delta
if level =='D':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,'Level':'D'}
msg = ('%s is Bad Event'%self._current_ID)
gpar.log(__name__, msg, level='info', pri=True)
self._badDF = self._badDF.append(newRow, ignore_index=True)
else:
if self._method == 'all':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'winName':win['name'], 'win':win,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'coda':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'winName':win['name'], 'win':win,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'codaResSt':event.codaResSt,
'codaSt':event.codaSt,
'crms':event.codaMod,
}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
elif self._method == 'twoline':
newRow = {'ID': ID, 'lat':lat,
'lon':lon,'dep':dep,
'Mw':mw,'Del':dis,
'BB':bb,'bakAzi':bakAzi,
'Level':level, 'delta': delta,
'winName':win['name'], 'win':win,
'twoResSt':event.twoResSt,
'twoSt':event.twoSt,
'trms':event.twoMod}
self._stripDF = self._stripDF.append(newRow, ignore_index=True)
def _drawFig(self):
self.fig.clear()
a = u"\u00b0"
if self._btype == 'beam':
num_plots = len(self._current_beam)
for _i, tr in enumerate(self._current_beam):
ax = self.fig.add_subplot(num_plots, 1, _i+1)
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax.plot(time, tr.data, 'k', label=label)
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'r', label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
if _i == 0:
ax.set_xlabel('Seconds')
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
elif self._btype == 'slide':
self.fig.suptitle('%s - %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._btype, self._current_event.dep, self._current_event.dis, a))
nfilts = len(self._current_slide.keys())
ax = self.fig.subplots(4, nfilts, sharex='col', sharey='row')
ax = ax.reshape(4,nfilts)
for ind, (name,st) in enumerate(self._current_slide.items()):
for _i, tr in enumerate(st):
if hasattr(tr.stats, 'channel'):
label = tr.stats.channel
else:
label=None
time = np.arange(tr.stats.npts) * tr.stats.delta + tr.stats.sac.b
ax[_i,ind].plot(time, tr.data, 'k', label=None)
ax[_i, ind].set_xlim([np.min(time), np.max(time)])
if label == 'Amplitude':
peak = np.max(tr.data) + 1
ax[_i,ind].set_ylim([-1, peak])
elif label == 'Slowness':
ax[_i,ind].set_ylim([0, 15])
rp = self._current_event.rayp
ax[_i, ind].hlines(rp, np.min(time), np.max(time), 'r', 'dashed')
elif label == 'Back Azimuth':
ax[_i, ind].set_ylim([0,360])
elif label == 'coherence':
ax[_i, ind].set_ylim([0,1])
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
arrival = self._current_event.arrivals[self.beamphase]['TT']# - self._current_event.time
ax[_i,ind].vlines(arrival, ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'r',label=self.beamphase)
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for pname, tt in _arr.items():
if pname is self.beamphase:
continue
ax[_i,ind].vlines(tt['TT'], ax[_i,ind].get_ylim()[0],ax[_i,ind].get_ylim()[1],'b',label=pname)
ax[_i,ind].legend()
# ax[_i,ind].set_aspect(aspect=0.3)
if _i == 3:
ax[_i,ind].set_xlabel('Seconds')
if _i == 0:
ax[_i,ind].set_title(name)
if ind == 0:
ax[_i,ind].set_ylabel(label)
elif self._btype == 'vespetrum':
num = len(self._current_energy)
extent=[np.min(self._current_time),np.max(self._current_time),np.min(self._current_K),np.max(self._current_K)]
vmin = float(self.ampmin.cleanText())
vmax = float(self.ampmax.cleanText())
if not hasattr(self._current_event, 'arrivals'):
self._current_event.getArrival()
for ind, _row in self._current_energy.iterrows():
# abspow = _row.POWER
name = _row.FILT
if self.vepcb.currentText() == 'log10':
abspow = np.log10(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'log':
abspow = np.log(np.abs(_row.POWER))
elif self.vepcb.currentText() == 'sqrt':
abspow = np.sqrt(np.abs(_row.POWER))
else:
abspow = np.abs(_row.POWER)
ax = self.fig.add_subplot(1, num, ind+1)
ax.imshow(abspow, extent=extent, aspect='auto', cmap='Reds', vmin=vmin, vmax=vmax, origin='lower')
arrival = self._current_event.arrivals[self.beamphase]['TT']
ax.vlines(arrival, ax.get_ylim()[0],ax.get_ylim()[1],'k',label=self.beamphase)
rp = self._current_event.rayp
ax.hlines(rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
ax.hlines(-rp, ax.get_xlim()[0],ax.get_xlim()[1], 'b')
if self.ttbtn.isChecked():
_arr = self._current_event.arrivals
# del _arr[self.beamphase]
for name, tt in _arr.items():
if name is self.beamphase:
continue
ax.vlines(tt['TT'], ax.get_ylim()[0],ax.get_ylim()[1],'b',label=name)
ax.legend()
ax.set_xlabel('Seconds')
if ind == 0:
ax.set_ylabel(self._current_type)
ax.set_title(name)
if self._current_type == 'slowness':
title = '%s - %s\nSlant Stack at a Backazimuth of %.1f %sN\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.baz,a,
self._current_event.dep, self._current_event.dis, a)
elif self._current_type == 'theta':
title = '%s - %s\nSlant Stack at a slowness of %.2f s/deg\nDep:%s Distance: %s%s' \
%(self._btype, self._current_ID, self._current_event.rayp,
self._current_event.dep, self._current_event.dis, a)
self.fig.suptitle(title)
elif self._btype == 'strip':
_i = self.wincb.currentIndex()
win = self.trinWin[_i]
if len(self._stripDF) != 0:
existDF = self._stripDF[(self._stripDF.ID == self._current_event.ID) & (self._stripDF.winName == win['name'])]
else:
existDF = pd.DataFrame()
if len(self._badDF) != 0:
_badDF = self._badDF[self._badDF.ID == self._current_event.ID]
else:
_badDF = pd.DataFrame()
if len(existDF) == 0 and len(_badDF) == 0:
choice = QMessageBox.question(self, 'Stripping?',
"Haven't stripping yet, want to do it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
self._appStrip()
else:
self._btype = 'beam'
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(_badDF) != 0:
choice = QMessageBox.question(self, 'Bad event!',
"Want to reevalua it?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
index = _badDF.index
self._badDF.drop(index,axis=0,inplace=True)
self._badDF.reset_index(inplace=True, drop=True)
self.sbcb.setCurrentIndex(0)
self._updatePlot()
else:
self.sbcb.setCurrentIndex(0)
self._updatePlot()
elif len(existDF) != 0:
trinwin = existDF.win.iloc[0]
stime = trinwin['stime']
etime = trinwin['etime']
delta = self._current_beam[0].stats.delta
# npts = int((etime - stime)/delta) + 1
npts = int((etime - stime)/delta)
# time = np.linspace(stime, etime, npts)
time = stime + np.arange(npts) * delta
sind = int(stime / delta)
# eind = int(etime / delta)
if self._method == 'all':
codamode = existDF.crms.iloc[0]
twomode = existDF.trms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
twoSt = existDF.twoSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
if nfilter == 1:
ax = ax.reshape(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'coda':
codamode = existDF.crms.iloc[0]
nfilter = len(codamode)
codaSt = existDF.codaSt.iloc[0]
cRes = existDF.codaResSt.iloc[0]
timeR = np.arange(cRes[0].stats.npts)*cRes[0].stats.delta - trinwin['noise']
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_coda = codaSt[ind].data
time_coda = stime + np.arange(len(data_coda)) * delta
ax[0,ind].plot(time_coda,np.log10(data_coda),'r', label='coda')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_c = "Coda: Mean RMS = %s"%(codamode['RMS'].iloc[ind])
ax[1,ind].plot(timeR,cRes[ind].data, 'r', label=label_c)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%codamode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
elif self._method == 'twoline':
twomode = existDF.trms.iloc[0]
nfilter = len(twomode)
twoSt = existDF.twoSt.iloc[0]
tRes = existDF.twoResSt.iloc[0]
timeR = np.arange(tRes[0].stats.npts)*tRes[0].stats.delta - trinwin['noise']
data_time = np.arange(twoSt[0].stats.npts) * delta + (twoSt[0].stats.starttime - self._current_beam[0].stats.starttime)
ax = self.fig.subplots(2, nfilter)
for ind in range(nfilter):
data = np.abs(scipy.signal.hilbert(self._current_beam[ind].data))[sind:sind+npts]
ax[0,ind].plot(time,np.log10(data),'k', label='beam')
data_two = twoSt[ind].data
ax[0,ind].plot(data_time, data_two,'b', label='twoline')
ax[0,ind].set_xlim([stime, etime])
ax[0,ind].set_ylim([-1, 5])
ax[0,ind].set_xlabel('Seconds')
ax[0,ind].legend()
label_t = "Twoline: Mean RMS = %s"%(twomode['RMS'].iloc[ind])
ax[1,ind].plot(timeR, tRes[ind].data, 'b',label=label_t)
ax[1,ind].legend()
ax[1,ind].set_xlabel('Seconds')
ax[1,ind].set_xlim([-trinwin['noise']/2, trinwin['noise']/2+trinwin['coda']])
ax[0,ind].set_title('Filter: %s'%twomode['FILT'].iloc[ind])
if ind is 0:
ax[0,ind].set_ylabel('log10(Amp)')
ax[1,ind].set_ylabel('Amp')
self.fig.suptitle('Coda Strip for %s using %s method in win %s\nDep:%s Distance: %s%s'
%(self._current_event.ID, self._method, trinwin['name'],
self._current_event.dep, self._current_event.dis, a))
self._canvasDraw()
#def _plotTT(self):
# if self.ttbtn.isChecked() is False:
def _updatePlot(self):
self._activeAmp()
self._btype = self.sbcb.currentText()
self._drawFig()
def _canvasDraw(self):
"""
Redraws the canvas and re-set mouse focus
"""
# if isinstance(st, obspy.core.stream.Stream):
# delta = st[0].stats.delta
# elif isinstance(st, obspy.core.trace.Trace):
# delta = st.stats.delta
for _i, _ax in enumerate(self.fig.get_axes()):
_ax.set_xticklabels(_ax.get_xticks())
self.fig.canvas.draw()
self.canvas.setFocus()
def _pltOnScroll(self, event):
"""
Scrolls/Redraws the plot along x axis
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
for _ax in axes:
left = _ax.get_xlim()[0]
right = _ax.get_xlim()[1]
extent_x = right - left
dxzoom = .2 * extent_x
aspect_left = (event.xdata - _ax.get_xlim()[0]) / extent_x
aspect_right = (_ax.get_xlim()[1] - event.xdata) / extent_x
up = _ax.get_ylim()[1]
down = _ax.get_ylim()[0]
extent_y = up - down
dyzoom = 0.5 * extent_y
aspect_down = (0 - _ax.get_ylim()[0]) / extent_y
aspect_up = _ax.get_ylim()[1] / extent_y
if event.button == 'up':
left += dxzoom * aspect_left
right -= dxzoom * aspect_right
down += dyzoom * aspect_down
up -= dyzoom * aspect_up
elif event.button == 'down':
left -= dxzoom * aspect_left
right += dxzoom * aspect_right
down -= dyzoom * aspect_down
up += dyzoom * aspect_up
else:
return
_ax.set_xlim([left, right])
_ax.set_ylim([down, up])
self._canvasDraw()
def _pltOnDrag(self, event):
"""
Drags/redraws the plot upon drag
"""
if event.inaxes is None:
return
if event.key == 'control':
axes = [event.inaxes]
else:
axes = self.fig.get_axes()
if event.button == 1:
if self._plt_drag is None:
self._plt_drag = event.xdata
return
for _ax in axes:
_ax.set_xlim([_ax.get_xlim()[0] +
(self._plt_drag - event.xdata), _ax.get_xlim()[1] + (self._plt_drag - event.xdata)])
else:
return
self._canvasDraw()
def _pltOnButtonRelease(self, event):
"""
On Button Release Reset drag variable
"""
self._plt_drag = None
# def _pltOnButtonPress(self, event):
# """
# This function is using for zoom in relative phase region
# """
def _saveFile(self):
if self.savefile is None:
return self._saveFileFormat()
savefile = str(self.savefile)
if os.path.splitext(savefile)[1].lower() == '.pkl':
self._savePickle(savefile)
elif os.path.splitext(savefile)[1].lower() == '.csv':
self._saveCSV(savefile)
def _saveFileFormat(self):
files_types = "Pickle (*.pkl);; CSV (*.csv)"
self.savefile,_ = QFileDialog.getSaveFileName(self,
'Save as', os.getcwd(), files_types)
self.savefile = str(self.savefile)
if os.path.splitext(self.savefile)[1].lower() == '.pkl':
self._savePickle(self.savefile)
elif os.path.splitext(self.savefile)[1].lower() == '.csv':
self._saveCSV(self.savefile)
def _savePickle(self, filename):
self._stripDF.to_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if len(self._badDF) != 0:
self._badDF.to_pickle(badname)
def _saveCSV(self, filename):
_stripDF = self._stripDF
_stripDF.drop(['codaSt','twoSt','twoResSt','codaResSt'])
_stripDF.to_csv(filename,index=False,sep=',')
if len(self._badDF) != 0:
_badDF = self._badDF
name = os.path.splitext(filename)
badname = name[0] +'.D' +name[1]
_badDF.to_csv(badname, index=False, sep=',')
def _openFile(self):
filename,_ = QFileDialog.getOpenFileName(self,'Load Pickle File',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
self._stripDF = pd.read_pickle(filename)
name = os.path.splitext(filename)
badname = name[0]+'.D'+name[1]
if os.path.exists(badname):
self._badDF = pd.read_pickle(badname)
self._pltEvent()
self.savefile = str(filename)
def _openArray(self):
filename,_ = QFileDialog.getOpenFileName(self, 'Load array',
os.getcwd(), 'Pickle Format (*.pkl)', '20')
if filename:
filename = str(filename)
ar = util.loadArray(filename)
self._refreshArray(ar)
def _refreshArray(self, ar):
self.array = ar
self._plt_drag = None
# init events in the array
self._events = ar.events #defines list self._events
self.savefile = ar.name+'.strip.pkl'
self._initEqList()
self._stripDF = pd.DataFrame()
self._badDF = pd.DataFrame()
self._btype = 'beam'
self._method = 'all'
self.trinWin = [{'name':'N200-C200','noise':200.0,'coda':200.0,
'stime':400.0,'etime':1800,'model':'ak135'}]
self._current_win = None
self._current_strip = False
self._eventCycle = cycle(self._eqlist)
self._eventInfo(next(self._eventCycle))
self.setWindowTitle('Array Analysis: %s'%self.array.name)
self.evecb.clear()
for eve in self._eqlist:
self.evecb.addItem(eve)
self._drawFig()
def _savePlot(self):
# path = os.getcwd()
# path = os.path.join(path,self.array.name,self._current_event.ID)
file_types = "Image Format (*.png *.pdf *.ps *.eps);; ALL (*)"
filename,_ = QFileDialog.getSaveFileName(self, 'Save Plot',
os.getcwd(), file_types)
if not filename:
return
filename = str(filename)
formats = os.path.splitext(filename)[1][1:].lower()
if formats not in ['png', 'pdf', 'ps', 'eps']:
formats = 'png'
filename += '.' +formats
self.fig.savefig(filename)
def closeArray(self,event):
if len(self._stripDF) > 0 and self.savefile is None:
ask = QMessageBox.question(self, 'Save stripping?',
'Do you want to save your coda data?',
QMessageBox.Save |
QMessageBox.Discard |
QMessageBox.Cancel, QMessageBox.Save)
if ask == QMessageBox.Save:
self._saveFileFormat()
self.close()
elif ask == QMessageBox.Cancel:
event.ignore()
class defWindow(QDialog):
def __init__(self, parent=None, windowvalue=None):
"""
Coda strip window dialog
"""
QDialog.__init__(self, parent)
self.setWindowTitle('Create new coda strip window')
self.noisewin = QDoubleSpinBox(decimals=1, maximum=400, minimum=20, singleStep=10, value=10)
self.codawin = QDoubleSpinBox(decimals=1, maximum=400, minimum=20, singleStep=10, value=10)
self.stime = QDoubleSpinBox(decimals=1, maximum=600, minimum=0, singleStep=50, value=50)
self.etime = QDoubleSpinBox(decimals=1, maximum=2400, minimum=1600, singleStep=50, value=50)
self.smooth = QDoubleSpinBox(decimals=1, maximum=20, minimum=1, singleStep=1, value=4)
self.winName = QLineEdit('Window Name')
self.winName.selectAll()
self.model = QLineEdit('ak135')
self.model.selectAll()
grid = QGridLayout()
grid.addWidget(QLabel('Window Name'), 0, 0)
grid.addWidget(self.winName, 0, 1)
grid.addWidget(QLabel('Noise Win.'), 1, 0)
grid.addWidget(self.noisewin, 1, 1)
grid.addWidget(QLabel('Coda Win.'), 2, 0)
grid.addWidget(self.codawin, 2, 1)
grid.addWidget(QLabel('Start Time.'), 3, 0)
grid.addWidget(self.stime, 3, 1)
grid.addWidget(QLabel('End Time.'), 4, 0)
grid.addWidget(self.etime, 4, 1)
grid.addWidget(QLabel('Smooth.'), 5, 0)
grid.addWidget(self.smooth, 5, 1)
grid.addWidget(QLabel('Model.'), 6, 0)
grid.addWidget(self.model, 6, 1)
grid.setVerticalSpacing(10)
btnbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
btnbox.accepted.connect(self.accept)
btnbox.rejected.connect(self.reject)
layout = QVBoxLayout()
layout.addWidget(QLabel('Define noise window and coda window for stripping'))
layout.addLayout(grid)
layout.addWidget(btnbox)
if windowvalue is not None:
self.winName.setText(windowvalue['name'])
self.noisewin.setValue(windowvalue['noise'])
self.codawin.setValue(windowvalue['coda'])
self.stime.setValue(windowvalue['stime'])
self.etime.setValue(windowvalue['etime'])
self.smooth.setValue(windowvalue['smooth'])
self.model.setText(windowvalue['model'])
self.setLayout(layout)
self.setSizeGripEnabled(False)
def getValues(self):
"""
Return window dialog values as a dictionary
"""
return dict(name=str(self.winName.text()),
noise=float(self.noisewin.cleanText()),
coda=float(self.codawin.cleanText()),
stime=float(self.stime.cleanText()),
etime=float(self.etime.cleanText()),
smooth=float(self.smooth.cleanText()),
model=str(self.model.text()))
# class for event stacking in arrays
class stackArray(QtWidgets.QMainWindow):
def __init__(self, arraylist=None, parent=None, ap=None):
if ap is None:
self.qApp = QApplication(sys.argv)
else:
self.qApp = ap
if isinstance(arraylist, str):
arlist = pd.read_csv(arraylist, delimiter='\s+')
elif isinstance(arraylist, pd.DataFrame):
arlist = arraylist
else:
msg = 'Define array list in DataFrame or a path to a csv file'
raise ValueError(msg)
self._shortcuts = {'arr_next': 'n',
'arr_prev': 'p',
'cancel': 'c',
'accept': 'a',
'stack': 's'}
self._list = arlist
self._initArrayList()
self._arrayCycle = cycle(self._namelist)
self.dis = [{'name':'All-Dis',
'mindis': 50.0,
'maxdis': 75.0,
'step':25.0,
'overlap':0,
'write':False}]
self._arrayInfo(next(self._arrayCycle))
self._initReg()
QMainWindow.__init__(self)
self.setupUI()
def setupUI(self):
self.main_widget = QWidget(self)
self._initMenu()
self._createStatusBar()
self._initPlots()
l = QVBoxLayout(self.main_widget)
l.addLayout(self.btnbar)
l.addLayout(self.btnbar2)
l.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.setGeometry(300, 300, 1200, 800)
self.setWindowTitle('Array Stack')
self.show()
def _killLayout():
pass
def _initPlots(self):
self.fig = Figure(dpi=100, constrained_layout=True)
self.canvas = FigureCanvas(self.fig)
self.canvas.setFocusPolicy(PyQt5.QtCore.Qt.StrongFocus)
self._drawFig()
self.fig.canvas.mpl_connect('key_press_event', self._selectRegOnPress)
def _initMenu(self):
# Next and Prev array
nxt = QtWidgets.QPushButton('Next >>',
shortcut=self._shortcuts['arr_next'], parent=self.main_widget)
nxt.clicked.connect(self._pltNextArray)
nxt.setToolTip('shortcut <b>n</d>')
nxt.setMaximumWidth(150)
prv = QtWidgets.QPushButton('Prev >>',
shortcut=self._shortcuts['arr_prev'], parent=self.main_widget)
prv.clicked.connect(self._pltPrevArray)
prv.setToolTip('shortcut <b>p</d>')
prv.setMaximumWidth(150)
# Array drop-down
self.arcb = QComboBox(self)
for arr in self._namelist:
self.arcb.addItem(arr)
self.arcb.activated.connect(self._pltArray)
self.arcb.setMaximumWidth(1000)
self.arcb.setMinimumWidth(80)
# filter selection
self.filtcb = QComboBox(self)
self.filtcb.addItem('all')
for filt in self._current_filter:
self.filtcb.addItem(filt)
self.filtcb.activated.connect(self._drawStack)
self.filtcb.setMaximumWidth(1000)
self.filtcb.setMinimumWidth(80)
# Select region
# Stacking earthquakes in array
self.stbtn = QtWidgets.QPushButton('Stack',
shortcut=self._shortcuts['stack'], parent=self.main_widget)
self.stbtn.setCheckable(True)
self.stbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.stbtn.setToolTip('shortcut <b>s</b>')
self.stbtn.clicked.connect(self._drawStack)
self.nbtn = QtWidgets.QPushButton('Norm',
parent=self.main_widget)
self.nbtn.setCheckable(True)
self.nbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.nbtn.clicked.connect(self._drawStack)
self.ebtn = QtWidgets.QPushButton('ERR',
parent=self.main_widget)
self.ebtn.setCheckable(True)
self.ebtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.ebtn.clicked.connect(self._drawStack)
# Select distance
self.discb = QComboBox(self)
self.discb.activated.connect(self._changeStack)
self._updateWindow()
disEdit = QtWidgets.QPushButton('Edit')
disEdit.resize(disEdit.sizeHint())
disEdit.clicked.connect(self._editDis)
disDelt = QtWidgets.QPushButton('Delete')
disDelt.resize(disEdit.sizeHint())
disDelt.clicked.connect(self._deleteDis)
# Select region
self.regcb = QComboBox(self)
self.regcb.activated.connect(self._changeRegion)
self._updateRegion()
regEdit = QtWidgets.QPushButton('Edit')
regEdit.resize(regEdit.sizeHint())
regEdit.clicked.connect(self._editRegion)
regDelt = QtWidgets.QPushButton('Delete')
regDelt.resize(regDelt.sizeHint())
regDelt.clicked.connect(self._deleteRegion)
#button to plot all regin in one plot
self.allbtn = QPushButton('All Region')
self.allbtn.setCheckable(True)
self.allbtn.setStyleSheet('QPushButton:checked {background-color: lightgreen;}')
self.allbtn.clicked.connect(self._stackAll)
#reset region button
self.rsbtn = QtWidgets.QPushButton('Reset')
self.rsbtn.clicked.connect(self._resetReg)
self.btnbar = QHBoxLayout()
self.btnbar.addWidget(prv)
self.btnbar.addWidget(nxt)
self.btnbar.addWidget(QLabel('Array'))
self.btnbar.addWidget(self.arcb)
vline = QFrame()
vline.setFrameStyle(QFrame.VLine | QFrame.Raised)
self.btnbar.addWidget(vline)
self.btnbar.addWidget(self.stbtn)
self.btnbar.addWidget(QLabel('Step'))
self.btnbar.addWidget(self.discb)
self.btnbar.addWidget(disEdit)
self.btnbar.addWidget(disDelt)
self.btnbar.addStretch(1)
self.btnbar2 = QHBoxLayout()
self.btnbar2.addWidget(QLabel('Region'))
self.btnbar2.addWidget(self.regcb)
self.btnbar2.addWidget(regEdit)
self.btnbar2.addWidget(regDelt)
self.btnbar2.addWidget(self.allbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.rsbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.nbtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(self.ebtn)
self.btnbar2.addWidget(vline)
self.btnbar2.addWidget(QLabel('Filter'))
self.btnbar2.addWidget(self.filtcb)
self.btnbar2.addStretch(1)
#Menubar
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(QtGui.QIcon().fromTheme('document-save'),
'Save Reg', self._saveFile)
def _saveFile(self):
if len(self._region) >= 2:
_region = self._region[1:]
for _reg in _region:
name = _reg['name']
array = self._current_array['name']
_df = self.regDf[name]
savename = os.path.join(array,name+'.pkl')
_df.to_pickle(savename)
def _initArrayList(self):
self._arlist = pd.DataFrame()
for _ind, _ar in self._list.iterrows():
name = _ar.NAME
tmp = os.path.join(name,_ar.FILE)
tmp_df = pd.read_pickle(tmp)
newRow = {'NAME':name,'DF':tmp_df,
'LAT':_ar.LAT, 'LON':_ar.LON}
self._arlist = self._arlist.append(newRow, ignore_index=True)
self._namelist = self._list.NAME.tolist()
def _initReg(self):
self._region = [{'name':'global',
'latmin':-90.0, 'latmax':90.0,
'lonmin': -180.0, 'lonmax': 180.0}]
self.stackSt = {}
self.stdSt = {}
win = self._current_array_df['win'].iloc[0]
window = [win['noise'], win['coda']]
self.regDf = {'global':self._current_array_df}
_stackSt, _stdSt = stackTR(self._current_array_df,
pklname=None,win=window,
mindis=self.dis[0]['mindis'],
maxdis=self.dis[0]['maxdis'],
step=self.dis[0]['step'],
overlap=self.dis[0]['overlap'],
write=self.dis[0]['write'])
self.stackSt = {'global': _stackSt}
self.stdSt = {'global': _stdSt}
def _arrayInfo(self, name):
ardf = self._arlist[self._arlist.NAME == name].iloc[0]
array = {'name':ardf.NAME,'lat':ardf.LAT,'lon':ardf.LON}
self._current_array = array
self._current_array_df = ardf.DF
self._current_filter = self._current_array_df.iloc[0].crms.FILT
# self._initReg()
def _createStatusBar(self):
sb = QStatusBar()
sb.setFixedHeight(18)
self.setStatusBar(sb)
self.statusBar().showMessage('Ready')
def _drawFig(self):
self.fig.clear()
# self.fig.add_subplot(1, 1, 1)
# _ax = self.fig.get_axes()
_ax = self.fig.add_subplot(1, 1, 1)
m = Basemap(projection='cyl', lon_0=0, lat_0=0.0,
area_thresh=10000,ax=_ax)
x0, y0 = m(self._current_array['lon'], self._current_array['lat'])
m.drawcoastlines(ax=_ax)
m.drawmapboundary(ax=_ax)
m.fillcontinents(color='lightgray',lake_color='white',ax=_ax)
parallels = np.arange(-90.0, 90.0, 60)
# labels = [left, right, top, bottom]
m.drawparallels(parallels,labels=[True, False, False, False],ax=_ax)
meridians = np.arange(-180.0, 180.0, 60.0)
m.drawmeridians(meridians,labels=[False, False, False, True],ax=_ax)
m.scatter(x0, y0, marker='*',c='r',s=100,alpha=0.7,ax=_ax,zorder=10)
if self.allbtn.isChecked() and len(self._region) < 2:
x, y = m(self._current_array_df.lon.tolist(), self._current_array_df.lat.tolist())
m.scatter(x, y, marker='o', c='blue', s=50, alpha=0.7,ax=_ax,zorder=10)
elif self.allbtn.isChecked() and len(self._region) >= 2:
_region = self._region[1:,]
_current_df = self._current_array_df
_rest_df = _current_df.copy()
for _j, _reg in enumerate(_region):
_name = _region['name']
_df = self.regDf[_name]
_rest_df = _rest_df[~((_rest_df['lat']>_reg['latmin']) &
(_rest_df['lat']<_reg['latmax']) &
(_rest_df['lon']>_reg['lonmin']) &
(_rest_df['lon']<_reg['lonmax']))]
x, y = m(_df.lon.tolist(), _df.lat.tolist())
m.scatter(x, y, marker='o', c=color[_i], s=50, alpha=0.7,ax=_ax,zorder=10)
x, y = m(_rest_df.lon.tolist(), _rest_df.lat.tolist())
m.scatter(x, y, marker='o', c='k', s=50, alpha=0.7,ax=_ax,zorder=10)
elif self.allbtn.isChecked() is False:
_i = self.regcb.currentIndex()
if _i == 0:
x, y = m(self._current_array_df.lon.tolist(), self._current_array_df.lat.tolist())
m.scatter(x, y, marker='o', c='blue', s=50, alpha=0.7,ax=_ax,zorder=10)
self._pltRectangle()
else:
_reg = self._region[_i]
_name = _reg['name']
_df = self.regDf[_name]
x, y = m(_df.lon.tolist(), _df.lat.tolist())
m.scatter(x, y, marker='o', c=color[_i-1], s=50, alpha=0.7,ax=_ax,zorder=10)
self.fig.suptitle('Earthquakes in array %s'%self._current_array['name'])
self._canvasDraw()
def _canvasDraw(self):
for _i, _ax in enumerate(self.fig.get_axes()):
_ax.set_xticklabels(_ax.get_xticks())
self.fig.canvas.draw()
self.canvas.setFocus()
def _pltArray(self):
_i = self.arcb.currentIndex()
while next(self._arrayCycle) != self._namelist[_i]:
pass
self._arrayInfo(self._namelist[_i])
self.filtcb.clear()
self.filtcb.addItem('all')
for filt in self._current_filter:
self.filtcb.addItem(filt)
self._initReg()
self._drawFig()
# self._resetReg()
def _pltPrevArray(self):
_j = self.arcb.currentIndex()
for _i in range(len(self._namelist) - 1):
prevarray = next(self._arrayCycle)
self._arrayInfo(prevarray)
self.filtcb.clear()
self.filtcb.addItem('all')
for filt in self._current_filter:
self.filtcb.addItem(filt)
self.arcb.setCurrentIndex(_j-1)
self._initReg()
self._drawFig()
# self._resetReg()
def _pltNextArray(self):
_i = self.arcb.currentIndex()
self._arrayInfo(next(self._arrayCycle))
self.filtcb.clear()
self.filtcb.addItem('all')
for filt in self._current_filter:
self.filtcb.addItem(filt)
self.arcb.setCurrentIndex(_i+1)
# self._resetReg()
self._initReg()
self._drawFig()
def _calStack(self):
_i = self.discb.currentIndex()
self._arrayInfo(self._current_array['name'])
savefile = None
win = self._current_array_df['win'].iloc[0]
window = [win['noise'], win['coda']]
_j = self.regcb.currentIndex()
_reg = self._region[_j]
if self.dis[_i]['write']:
savefile = _reg['name'] + '.'+self.dis[_i]['name'] + '.sac'
_current_df = self._current_array_df
_df = _current_df[(_current_df['lat']>_reg['latmin']) &
(_current_df['lat']<_reg['latmax']) &
(_current_df['lon']>_reg['lonmin']) &
(_current_df['lon']<_reg['lonmax'])]
_df.reset_index(inplace=True,drop=True)
self.regDf[_reg['name']] = _df
_stackSt, _stdSt = stackTR(_df,
pklname=savefile,win=window,
mindis=self.dis[_i]['mindis'],
maxdis=self.dis[_i]['maxdis'],
step=self.dis[_i]['step'],
overlap=self.dis[_i]['overlap'],
write=self.dis[_i]['write'])
self.stackSt[_reg['name']] = _stackSt
self.stdSt[_reg['name']] = _stdSt
# if self.stbtn.isChecked():
# self._drawStack()
# else:
# self._drawFig()
def _drawStack(self):
self.fig.clear()
if self.stbtn.isChecked() is False:
self._drawFig()
return
# self.fig.add_subplot(121)
_i = self.discb.currentIndex()
this_dis = self.dis[_i]
win = self._current_array_df['win'].iloc[0]
window = [win['noise'], win['coda']]
step_forward = this_dis['step'] * (1 - this_dis['overlap'])
n = int((this_dis['maxdis'] - this_dis['mindis'])/step_forward)
ftype = self.filtcb.currentText()
if ftype == 'all':
current_filter = self._current_filter
else:
current_filter = [ftype]
n_filt = len(current_filter)
if self.allbtn.isChecked() is False or len(self._region) < 2:
_i = self.regcb.currentIndex()
_current_df = self._current_array_df
_name = self._region[_i]['name']
_df = self.regDf[_name]
_stackSt = self.stackSt[_name]
_stdSt = self.stdSt[_name]
#n = len(_stackSt)
# gs = self.fig.add_gridspec(n,2)
gs = gridspec.GridSpec(ncols=n_filt+1, nrows=n, figure=self.fig)
_ax = self.fig.add_subplot(gs[:,0])
m = Basemap(projection='cyl', lon_0=0, lat_0=0.0,
area_thresh=10000,ax=_ax)
x0, y0 = m(self._current_array['lon'], self._current_array['lat'])
alon = _current_df[(~_current_df.lon.isin(_df.lon)) & (~_current_df.lat.isin(_df.lat))].lon.tolist()
alat = _current_df[(~_current_df.lon.isin(_df.lon)) & (~_current_df.lat.isin(_df.lat))].lat.tolist()
x, y = m(alon, alat)
xt, yt = m(_df.lon.tolist(), _df.lat.tolist())
m.drawcoastlines(ax=_ax)
m.drawmapboundary(ax=_ax)
m.fillcontinents(color='lightgray',lake_color='white',ax=_ax)
parallels = np.arange(-90.0, 90.0, 60)
m.drawparallels(parallels,ax=_ax)
meridians = np.arange(-180.0, 180.0, 60.0)
m.drawmeridians(meridians,ax=_ax)
if _i == 0:
c = 'blue'
else:
c = color[_i-1]
m.scatter(x0, y0, marker='*',c='r',s=100,alpha=0.7,ax=_ax,zorder=10)
m.scatter(x, y, marker='o', c='k', s=50, alpha=0.7,ax=_ax,zorder=10)
m.scatter(xt, yt, marker='o', c='r', s=50, alpha=0.7,ax=_ax,zorder=10)
# self.fig.add_subplot(122)
delta = _stackSt[0].stats.delta
npts = _stackSt[0].stats.npts
time = np.arange(npts)*delta + _stackSt[0].stats.sac.b
for ind, f in enumerate(current_filter):
_st = _stackSt.select(station=f).copy()
_st.sort(['channel'])
_std_st = _stdSt.select(station=f).copy()
_std_st.sort(['channel'])
delta = _st[0].stats.delta
sind = int(window[0]/delta)
eind = sind + int(window[1]/delta)
for i in range(n):
_ax_st = self.fig.add_subplot(gs[i,ind+1])
if i == n-1:
_ax_st.set_xlabel('Time (s)')
if i == 0:
_ax_st.set_title('Filter: %s'%f)
peak, data = norm(_st[i].data, sind, eind)
if self.nbtn.isChecked():
_ax_st.plot(time, data,'darkred', label=_st[i].stats.channel)
if self.ebtn.isChecked():
_ax_st.errorbar(time, data, yerr=2*_std_st[i].data,
marker='.',mew=0.1, ecolor='red', linewidth=0.2, markersize=0.2,
capsize=0.1, alpha=0.5)
_ax_st.set_ylim([-0.1, 1.1])
else:
_ax_st.plot(time, _st[i].data,'darkred', label=_st[i].stats.channel)
if self.ebtn.isChecked():
_ax_st.errorbar(time, _st[i].data, yerr=2*_std_st[i].data,
marker='.',mew=0.1, ecolor='red', linewidth=0.2, markersize=0.2,
capsize=0.1, alpha=0.5)
peak = peak + 0.1
_ax_st.set_ylim([-0.1, peak])
_ax_st.hlines(0,time[0],time[-1],'k')
_ax_st.set_xlim([-window[0], window[0]+window[1]])
_ax_st.legend()
else:
_region = self._region[1:]
_current_df = self._current_array_df
#_i = self.discb.currentIndex()
#this_dis = self.dis[_i]
#step_forward = this_dis['step'] * (1 - this_dis['overlap'])
#n = int((this_dis['maxdis'] - this_dis['mindis'])/step_forward) + 1
gs = self.fig.add_gridspec(n,n_filt+1)
_ax = self.fig.add_subplot(gs[:,0])
m = Basemap(projection='cyl', lon_0=0.0, lat_0=0.0,
area_thresh=10000,ax=_ax)
x0, y0 = m(self._current_array['lon'], self._current_array['lat'])
x, y = m(self._current_array_df.lon.tolist(), self._current_array_df.lat.tolist())
m.drawcoastlines(ax=_ax)
m.drawmapboundary(ax=_ax)
m.fillcontinents(color='lightgray',lake_color='white',ax=_ax)
parallels = np.arange(-90.0, 90.0, 60)
m.drawparallels(parallels,ax=_ax)
meridians = np.arange(-180.0, 180.0, 60.0)
m.drawmeridians(meridians,ax=_ax)
x0, y0 = m(self._current_array['lon'], self._current_array['lat'])
m.scatter(x0, y0, marker='*',c='r',s=100,alpha=0.7,ax=_ax,zorder=10)
for _i, _reg in enumerate(_region):
_name = _reg['name']
# print(_name)
_df = self.regDf[_name]
x, y = m(_df.lon.tolist(), _df.lat.tolist())
m.scatter(x, y, marker='o', c=color[_i], s=50, alpha=0.7,ax=_ax,zorder=10)
_stackSt = self.stackSt[_name]
_stdSt = self.stdSt[_name]
delta = _stackSt[0].stats.delta
npts = _stackSt[0].stats.npts
time = np.arange(npts)*delta + _stackSt[0].stats.sac.b
for ind, f in enumerate(current_filter):
_st = _stackSt.select(station=f).copy()
_st.sort(['channel'])
_std_st = _stdSt.select(station=f).copy()
_std_st.sort(['channel'])
delta = _st[0].stats.delta
sind = int(window[0]/delta)
eind = sind + int(window[1]/delta)
for i in range(n):
_ax_st = self.fig.add_subplot(gs[i,ind+1])
if i == n-1:
_ax_st.set_xlabel('Time (s)')
if i == 0:
_ax_st.set_title('Filter: %s'%f)
peak, data = norm(_st[i].data, sind, eind)
label = _name+':'+_st[i].stats.channel
if self.nbtn.isChecked():
_ax_st.plot(time, data,color=color[_i],label=label)
if self.ebtn.isChecked():
_ax_st.errorbar(time, data, yerr=2*_std_st[i].data,
marker='.',mew=0.1, ecolor=color[_i], linewidth=0.2, markersize=0.2,
capsize=0.1, alpha=0.5)
_ax_st.set_ylim([-0.1, 1.1])
else:
_ax_st.plot(time, _st[i].data,color='dark'+color[_i],label=label)
if self.ebtn.isChecked():
_ax_st.errorbar(time, _st[i].data, yerr=2*_std_st[i].data,
marker='.',mew=0.1, ecolor=color[_i], linewidth=0.2, markersize=0.2,
capsize=0.1, alpha=0.5)
peak = peak+0.1
_ax_st.set_ylim([-0.1, peak])
_ax_st.hlines(0,time[0],time[-1],'k')
_ax_st.set_xlim([-window[0], window[0]+window[1]])
_ax_st.legend()
self._canvasDraw()
def _stackAll(self):
if self.stbtn.isChecked():
self._drawStack()
else:
self._drawFig()
def _line_select_callback(self, eclick, erelease):
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
# msg= 'Startposition: (%f, %f)\tendposition: (%f, %f)'%(x1, y1, x2, y2)
# gpar.log(__name__,msg,level='info',pri=True)
def _pltRectangle(self):
_ax = self.fig.get_axes()[0]
self._RS = RectangleSelector(_ax, self._line_select_callback,
drawtype='box', useblit=True,
button=[1],
minspanx=1, minspany=1,
interactive=True,
state_modifier_keys={'move': ' ','center': 'ctrl',
'square': 'shift','clear': self._shortcuts['cancel']})
def _selectRegOnPress(self,event):
if event.key is not None:
event.key = event.key.lower()
if event.inaxes is None:
return
if event.key == self._shortcuts['accept'] and self._RS.active:
extents=self._RS.extents
_value = dict(name='name',lonmin=extents[0],lonmax=extents[1],latmin=extents[2],latmax=extents[3])
self._newReg(_value)
def _newReg(self, value=None):
newReg = self.defReg(regionValue=value)
if newReg.exec_():
self._region.append(newReg.getValues())
self._updateRegion()
self.regcb.setCurrentIndex(len(self._region)-1)
self._calStack()
# self._appStack()
def _editRegion(self):
_i =self.regcb.currentIndex()
this_region = self._region[_i]
editRegion = self.defReg(self, this_region)
if editRegion.exec_():
self._region[_i] = editRegion.getValues()
self.updateRegion()
self.regcb.setCurrentIndex(_i)
self._calStack()
self._drawStack()
def _deleteRegion(self):
_i = self.regcb.currentIndex()
name = self._region[_i]['name']
if name == 'global':
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText("Warning Text")
msg.setInformativeText("Global region is not deletebale")
msg.exec_()
return
else:
self._region.pop(_i)
self._updateRegion()
def _changeRegion(self, index):
if index == len(self._region):
self._newReg()
if self.stbtn.isChecked():
self._drawStack()
else:
self._drawFig()
else:
if self.stbtn.isChecked():
self._drawStack()
else:
self._drawFig()
def _updateRegion(self):
self.regcb.clear()
self.regcb.setCurrentIndex(-1)
for _i, _f in enumerate(self._region):
self.regcb.addItem('Region: %s'%(_f['name']))
self.regcb.addItem('Create new region')
def _resetReg(self):
self._region = [{'name':'global',
'latmin':-90.0, 'latmax':90.0,
'lonmin': -180.0, 'lonmax': 180.0}]
self.regDf = {'global':self._current_array_df}
self._updateRegion()
self._drawFig()
def _changeStack(self,index):
if index == len(self.dis):
self._newDis()
if self.stbtn.isChecked():
self._drawStack()
else:
self._drawFig()
else:
self._calStack()
if self.stbtn.isChecked():
self._drawStack()
else:
self._drawFig()
def _newDis(self):
newDis = self.defDisStep(self)
if newDis.exec_():
self.dis.append(newDis.getValues())
self._updateWindow()
self.discb.setCurrentIndex(len(self.dis)-1)
self._calStack()
def _editDis(self):
_i = self.discb.currentIndex()
this_window = self.dis[_i]
editWindow = self.defDisStep(self, this_window)
if editWindow.exec_():
self.dis[_i] = editWindow.getValues()
self.updateWindow()
self.discb.setCurrentIndex(_i)
self._appStack()
def _deleteDis(self):
pass
_i = self.discb.currentIndex()
def _updateWindow(self):
self.discb.clear()
self.discb.setCurrentIndex(-1)
for _i, _f in enumerate(self.dis):
self.discb.addItem('Step %.2f deg - overlap %.2f ' %(_f['step'], _f['overlap']))
self.discb.addItem('Create new distance stack')
class defReg(QDialog):
def __init__(self, parent=None, regionValue=None):
QDialog.__init__(self, parent)
self.setWindowTitle('Assign Name for the Region')
self.Name = QLineEdit('Name')
self.Name.selectAll()
self.latmin = QDoubleSpinBox(decimals=1, maximum=90.0, minimum=-90.0, singleStep=5, value=0)
self.latmax = QDoubleSpinBox(decimals=1, maximum=90.0, minimum=-90.0, singleStep=5, value=0)
self.lonmin = QDoubleSpinBox(decimals=1, maximum=180.0, minimum=-180.0, singleStep=5, value=0)
self.lonmax = QDoubleSpinBox(decimals=1, maximum=180.0, minimum=-180.0, singleStep=5, value=0)
# self.saveTr = ['True', 'False']
grid = QGridLayout()
grid.addWidget(QLabel('Region Name'), 0, 0)
grid.addWidget(self.Name, 0, 1)
grid.addWidget(QLabel('Min. Lat'), 1, 0)
grid.addWidget(self.latmin, 1, 1)
grid.addWidget(QLabel('Max. Lat'), 2, 0)
grid.addWidget(self.latmax, 2, 1)
grid.addWidget(QLabel('Min. Lon'), 3, 0)
grid.addWidget(self.lonmin, 3, 1)
grid.addWidget(QLabel('Max. Lon'), 4, 0)
grid.addWidget(self.lonmax, 4, 1)
grid.setVerticalSpacing(10)
btnbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
btnbox.accepted.connect(self.accept)
btnbox.rejected.connect(self.reject)
layout = QVBoxLayout()
layout.addWidget(QLabel('Define noise window and coda window for stripping'))
layout.addLayout(grid)
layout.addWidget(btnbox)
if regionValue is not None:
self.Name.setText(regionValue['name'])
self.latmin.setValue(regionValue['latmin'])
self.latmax.setValue(regionValue['latmax'])
self.lonmin.setValue(regionValue['lonmin'])
self.lonmax.setValue(regionValue['lonmax'])
self.setLayout(layout)
self.setSizeGripEnabled(False)
def getValues(self):
return dict(name=str(self.Name.text()),
latmin=float(self.latmin.cleanText()),
latmax=float(self.latmax.cleanText()),
lonmin=float(self.lonmin.cleanText()),
lonmax=float(self.lonmax.cleanText()))
class defDisStep(QDialog):
def __init__(self, parent=None, stepvalue=None):
QDialog.__init__(self, parent)
self.setWindowTitle('Create new stacking distance step')
self.mindis = QDoubleSpinBox(decimals=1, maximum=180, minimum=0, singleStep=1, value=50)
self.maxdis = QDoubleSpinBox(decimals=1, maximum=180, minimum=0, singleStep=1, value=75)
self.step = QDoubleSpinBox(decimals=1, maximum=100, minimum=0.1, singleStep=0.1, value=0.1)
self.overlap = QDoubleSpinBox(decimals=2, maximum=1.0, minimum=0.0, singleStep=0.1, value=0.1)
self.Name = QLineEdit('Name')
self.Name.selectAll()
self.saveTr = ['True', 'False']
grid = QGridLayout()
grid.addWidget(QLabel('Name'), 0, 0)
grid.addWidget(self.Name, 0, 1)
grid.addWidget(QLabel('Min. Dis'), 1, 0)
grid.addWidget(self.mindis, 1, 1)
grid.addWidget(QLabel('Max. Dis'), 2, 0)
grid.addWidget(self.maxdis, 2, 1)
grid.addWidget(QLabel('Step'), 3, 0)
grid.addWidget(self.step, 3, 1)
grid.addWidget(QLabel('Overlap'), 4, 0)
grid.addWidget(self.overlap, 4, 1)
_savebtn = [QRadioButton("Yes"), QRadioButton("No")]
self.saveGrp = QButtonGroup()
self.saveGrp.setExclusive(True)
sbtn = QHBoxLayout()
for _i, _btn in enumerate(_savebtn):
self.saveGrp.addButton(_btn, _i)
sbtn.addWidget(_btn)
grid.addWidget(QLabel('Save Stack'), 5, 0)
grid.addLayout(sbtn, 5, 1)
btnbox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
btnbox.accepted.connect(self.accept)
btnbox.rejected.connect(self.reject)
layout = QVBoxLayout()
layout.addWidget(QLabel('Define distance steps and overlap for stacking'))
layout.addLayout(grid)
layout.addWidget(btnbox)
if stepvalue is not None:
self.Name.setText(stepvalue['name'])
self.mindis.setValue(stepvalue['mindis'])
self.maxdis.setValue(stepvalue['maxdis'])
self.step.setValue(stepvalue['step'])
self.overlap.setValue(stepvalue['overlap'])
self.setLayout(layout)
self.setSizeGripEnabled(False)
def getValues(self):
savefile = self.saveTr[self.saveGrp.checkedId()]
return dict(name=str(self.Name),
mindis=float(self.mindis.cleanText()),
maxdis=float(self.maxdis.cleanText()),
step=float(self.step.cleanText()),
overlap=float(self.overlap.cleanText()),
write=bool(savefile))
def codaStrip(eve, method='all',
siglen=200, noise=200,beamphase='PKiKP',
phase_list=['P','PP','PcP','ScP','PKiKP','SP','ScS'],
model='ak135', stime=400.0, etime=1800.0,
window=10, write=False):
"""
Function to remove background coda noise for events
"""
if phase_list is None:
phase_list = eve.phase_list
if not hasattr(eve, 'arrivals'):
eve.getArrival(phase_list=phase_list,model=model)
if not hasattr(eve, 'beam'):
msg = ('Earthquake object has not calculate the beamforming yet')
gpar.log(__name__,msg,level='error',pri=True)
st = eve.beam
delta = st[0].stats.delta
noi_sind = int((eve.arrivals['P']['TT'] - 300.0)/delta)
noi_win = int(100.0/delta)
filts=[]
starttime = st[0].stats.starttime
tt1 = eve.arrivals[beamphase]['TT'] - noise #- starttime
tt2 = eve.arrivals[beamphase]['TT'] + siglen #- starttime
# tari = eve.arrivals[beamphase]['UTC'] - starttime
tari = eve.arrivals[beamphase]['TT']
n_tr = len(st)
npts = st[0].stats.npts
if npts%2 !=0:
npts = npts-1
data = np.empty((n_tr,npts))
# mean = np.empty((n_tr, 1))
for ind, tr in enumerate(st):
npt = tr.stats.npts
if npt%2 != 0:
tdata = tr.data[:-1]
else:
tdata = tr.data
tmp_data = np.abs(scipy.signal.hilbert(tdata))
# tmp_data = moving_ave(tmp_data, window)
# mean[ind,:] = np.mean(tmp_data[noi_sind:noi_sind+noi_win])
# print(mean)
# data[ind,:] = tmp_data - mean
# print(data[ind,:])
data[ind,:] = tmp_data
filts.append(tr.stats.channel)
data = moving_ave(data, window)
# data = np.abs(scipy.signal.hilbert(tr.data))
sig_pts = int(siglen/delta) + 1
noi_pts = int(noise/delta) + 1
noi_ind1 = int(tt1/delta)
sig_ind = int(tari/delta)
noi_ind2 = int(tt2/delta)
time_before = tt1 + np.arange(int(noise/delta)+1) * delta
# data_before = np.empty((n_tr, noi_pts))
data_before = data[:, noi_ind1: noi_ind1 + noi_pts]
# print(np.min(data_before))
data_sig = data[:, sig_ind:sig_ind + sig_pts]
time_after = tt2 + np.arange(int(noise/delta)+1) * delta
data_after = data[:, noi_ind2: noi_ind2+noi_pts]
# print(np.min(data_after))
sind = int(stime/delta)
# npts = int((etime - stime)/delta) + 1
npts = int((etime - stime)/delta)
# time = np.matrix(np.linspace(stime, etime, npts))
time = stime + np.arange(npts) * delta
obs_data = data[:, sind: sind+npts]
ind = int((tari - stime)/delta)
res_ind = int((tt1 - stime)/delta)
pts = int((noise*2+siglen)/delta)+1
if method == 'all':
#fitting coda model
coda_par = codaFit(np.append(time_before,time_after),np.append(data_before,data_after,axis=1))
# print(coda_par)
#getting predict noise signal in linear scale
coda_data = np.asarray(np.exp(np.transpose(coda_par[0,:]) - np.transpose(coda_par[1,:]) \
*np.log(time) - np.transpose(coda_par[2,:])*time))
# coda_data = np.asarray(coda_data)
#getting residual signal after removing the predict noise
# coda_res = moving_ave(obs_data, window) - coda_data
coda_res = obs_data - coda_data
res = np.mean(coda_res[:,ind:ind+sig_pts],axis=-1)
#store coda model information
_df = pd.DataFrame(columns=['FILT','lnA','B','C','RMS'])
_df['FILT'] = filts
_df['lnA'] = np.asarray(coda_par)[0]
_df['B'] = np.asarray(coda_par)[1]
_df['C'] = np.asarray(coda_par)[2]
_df['RMS'] = res
# codamod = {'RMS':res,'lnA':coda_par[0][0],'B':coda_par[1][0],'C':coda_par[2][0]}
# eve.codaMod = codamod
eve.codaMod = _df
codaSt = obspy.core.stream.Stream()
resSt = obspy.core.stream.Stream()
for i in range(n_tr):
_tr = obspy.core.trace.Trace()
_tr.stats.delta = delta
_tr.stats.npts = npts
_tr.stats.starttime = starttime + stime
_tr.stats.channel = filts[i]
_tr.data = coda_data[i,:]
codaSt.append(_tr)
_tr = obspy.core.trace.Trace()
_tr.stats.delta = delta
_tr.stats.npts = pts
_tr.stats.starttime = eve.arrivals[beamphase]['UTC'] - noise
_tr.stats.channel = filts[i]
_tr.data = coda_res[i,res_ind:res_ind+pts]
resSt.append(_tr)
eve.codaSt = codaSt
eve.codaResSt = resSt
#fittint twoline model
#For multi-filters all calculations are using np.matrix
#all np.matrix are converted back to np.array to store
twoline_par_before = twoLineFit(time_before, data_before)
twoline_par_after = twoLineFit(time_after, data_after)
y1 = twoline_par_before[0,:] + twoline_par_before[1,:] * tari
y2 = twoline_par_after[0,:] + twoline_par_after[1,:] * tt2
k = (y2 - y1)/(tt2 - tari)
b = y2 - k * tt2
t1 = np.matrix(np.linspace(tt1,tari, int(noise/delta)+1))
d1 = np.asarray(np.transpose(twoline_par_before[0,:]) + np.transpose(twoline_par_before[1,:]) * t1)
t2 = np.matrix(np.linspace(tari+delta, tt2, int(siglen/delta)))
d2 = np.asarray(np.transpose(k) * t2 + np.transpose(b))
t3 = np.matrix(np.linspace(tt2+delta,tt2+noise, int(noise/delta)))
d3 = np.asarray(np.transpose(twoline_par_after[0,:]) + np.transpose(twoline_par_after[1,:]) * t3)
two_data = np.append(d1,d2,axis=-1)
two_data = np.append(two_data,d3,axis=-1)
two_res = moving_ave(obs_data[:,res_ind: res_ind+pts], window) - 10**two_data
res = np.mean(two_res[:,int(int(noise)/delta):int(int(noise)/delta)+sig_pts],axis=-1)
_df = pd.DataFrame(columns=['FILT','kn1','bn1','kn2','bn2','RMS'])
_df['FILT'] = filts
_df['kn1'] = np.asarray(twoline_par_before)[1]
_df['bn1'] = np.asarray(twoline_par_before)[0]
_df['kn2'] = np.asarray(twoline_par_after)[1]
_df['bn2'] = np.asarray(twoline_par_after)[0]
_df['RMS'] = res
# twomod = {'kn1':twoline_par_before[1][0],'bn1':twoline_par_before[0][0],
# 'kn2':twoline_par_after[1][0],'bn2':twoline_par_after[0][0],'RMS':res}
eve.twoMod = _df
twoSt = obspy.core.stream.Stream()
resSt = obspy.core.stream.Stream()
pts = int((noise*2+siglen)/delta)+1
# res_ind = int((tt1 - stime)/delta)
for i in range(n_tr):
_tr = obspy.core.trace.Trace()
_tr.stats.delta = delta
_tr.stats.starttime = starttime + tt1
_tr.stats.channel = filts[i]
_tr.data = two_data[i]
twoSt.append(_tr)
_trr = obspy.core.trace.Trace()
_trr.stats.delta = delta
_trr.stats.starttime = eve.arrivals[beamphase]['UTC'] - noise
_trr.stats.channel = filts[i]
_trr.stats.npts = pts
_trr.data = two_res[i]
resSt.append(_trr)
eve.twoSt = twoSt
eve.twoResSt = resSt
elif method == 'coda':
#fitting coda model
coda_par = codaFit(np.append(time_before,time_after),np.append(data_before,data_after,axis=1))
#getting predict noise signal in linear scale
coda_data = np.asarray(np.exp(np.transpose(coda_par[0,:]) - np.transpose(coda_par[1,:]) \
*np.log(time) - np.transpose(coda_par[2,:])*time))
#getting residual signal after removing the predict noise
coda_res = moving_ave(obs_data, window) - coda_data
res = np.mean(coda_res[:,ind:ind+sig_pts],axis=-1)
#store coda model information
_df = pd.DataFrame(columns=['FILT','lnA','B','C','RMS'])
_df['FILT'] = filts
_df['lnA'] = np.asarray(coda_par)[0]
_df['B'] =
|
np.asarray(coda_par)
|
numpy.asarray
|
import numpy as np
import math
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import xarray as xr
#
# Plot various fields from LLC4320 Hawaii/North Pacific cutout region.
# Shows how to read files, what associated grid is, what times different
# file name indices correspond to etc....
#
# Fields directories
droot='/nobackup1b/users/jahn/hinpac/grazsame3/run/run.0354';
sdflds=['offline-0604', 'offline']
sdfld=['THETA','SALT','UVEL','VVEL','WVEL']
##########################
# Read in Grid variables #
##########################
nz=40;nx=1080;ny=2700;
fxc="%s/XC.data"%(droot)
fxg="%s/XG.data"%(droot)
fyc="%s/YC.data"%(droot)
fyg="%s/YG.data"%(droot)
dtyp='>f4';nr=nx*ny;
RS = lambda phi: np.reshape(phi,(ny,nx))
xc=np.fromfile(fxc, dtype=dtyp,count=nr);
xg=np.fromfile(fxg, dtype=dtyp,count=nr);
yc=np.fromfile(fyc, dtype=dtyp,count=nr);
yg=np.fromfile(fyg, dtype=dtyp,count=nr);
print('West limit %10.5fE' % min(xc))
print('East limit %10.5fE' % max(xc))
print('North limit %10.5fN' % max(yc))
print('South limit %10.5fN' % min(yc))
xc=RS(xc);xg=RS(xg);yc=RS(yc);yg=RS(yg);
# Show horizontal Grid variables
fig=plt.figure(figsize=(20, 8), dpi= 80, facecolor='w', edgecolor='k')
plt.subplot(1,4,1); plt.imshow(xc,origin='lower',cmap='gist_ncar');plt.title('XC - lon cell center');cbar=plt.colorbar()
ax=plt.subplot(1,4,2); plt.imshow(xg,origin='lower',cmap='gist_ncar');plt.title('XG - lon cell corner (SW)');cbar=plt.colorbar()
ax=plt.subplot(1,4,3); plt.imshow(yc,origin='lower',cmap='gist_ncar');plt.title('YC - lat cell center');cbar=plt.colorbar()
ax=plt.subplot(1,4,4); plt.imshow(yg,origin='lower',cmap='gist_ncar');plt.title('YG - lat cell corner (SW)');cbar=plt.colorbar()
plt.savefig('grid-plots.png', bbox_inches='tight')
# Show how latitudinal grid spacing decreases with latitude and longitudinal spacing is constant
fig=plt.figure(figsize=(20, 8), dpi= 80, facecolor='w', edgecolor='k')
plt.subplot(1,2,1);
plt.plot(yc[1:-1,1]-yc[0:-2,1],yc[1:-1,1]);plt.plot(yc[1:-1,-1]-yc[0:-2,-1],yc[1:-1,-1]);
plt.title('Latitudinal spacing versus latitude')
plt.ylabel('Latitude');plt.xlabel('Latitudinal spacing');
plt.subplot(1,2,2);
plt.plot(xc[1,1:-1]-xc[1,0:-2],xc[1,0:-2]);plt.xlim((0.0195,0.022))
plt.title('Longitudinal spacing versus longitude')
plt.ylabel('Longitude');plt.xlabel('Longitudinal spacing');
plt.savefig('grid-line-plots.png', bbox_inches='tight')
# Show vertical levels information
fdrf="%s/DRF.data"%(droot)
drf=np.fromfile(fdrf, dtype=dtyp,count=nz);
zf=[0]
zf=-np.concatenate((zf,np.cumsum(drf)))
zc=0.5*( zf[0:-1]+zf[1:] )
fig=plt.figure(figsize=(20, 8), dpi= 80, facecolor='w', edgecolor='k')
plt.subplot(1,2,1);
plt.plot(zf,'x');
plt.title('Cell interface depths');plt.ylabel('Depth (m)')
plt.subplot(1,2,2);
plt.plot(zc,'.');
plt.title('Cell center depths');plt.ylabel('Depth (m)');
plt.savefig('vert-grid-line-plots.png', bbox_inches='tight')
###################################################################################################
# Read and plot physical fields (velocity components, temperature, salinity, sea-surface height). #
###################################################################################################
# Get times and create table of iteration numbers plus dates and times
import os
import time
itvalLo=144
itvalHi=1259856
itList=np.arange(itvalLo,itvalHi+144,144)
os.environ['TZ']='UTC'
tVals=[]
for i in itList:
ts=time.gmtime(time.mktime(time.strptime('2011/09/11:UTC','%Y/%m/%d:%Z'))+25.*i)
tstr=time.strftime('%Y-%m-%dT%H:%M:%S', ts)
tVals.append(tstr)
tsLo=time.gmtime(time.mktime(time.strptime('2011/09/11:UTC','%Y/%m/%d:%Z'))+25.*itList[0])
tstr=time.strftime('%Y-%m-%dT%H:%M:%S', tsLo)
print("Initial time and time step number ", tstr, itList[0])
tsHi=time.gmtime(time.mktime(time.strptime('2011/09/11:UTC','%Y/%m/%d:%Z'))+25.*itList[-1])
tstr=time.strftime('%Y-%m-%dT%H:%M:%S', tsHi)
print("Final time and time step number ", tstr, itList[-1])
tsNumList=xr.DataArray(itList,coords={'Time':tVals},dims=('Time'))
# Get a particular timestep number
tN=10
print("Time step number: ",tsNumList[tN].values)
print("Corresponding time: ",tsNumList[tN].coords)
#
# Get and plot fields
#
itVal=tsNumList[tN].values
nrin=nz
lp=10
cm='gist_ncar'
def PLT():
phi=
|
np.fromfile(fn, dtype='>f4',count=nx*ny*nrin)
|
numpy.fromfile
|
# BSD 3-Clause License
#
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Run this module first thing, to test your installation of romcomma.
**Contents**:
**predict**: Prediction using a GaussianBundle.
**test_input**: A rudimentary test input, for installation testing.
"""
from romcomma import distribution, function, data, model
from romcomma.typing_ import NP
from numpy import zeros, eye, pi, full, atleast_2d
from pathlib import Path
from scipy.stats import ortho_group
EFFECTIVELY_ZERO = 1.0E-64
BASE_PATH = Path('X:\\comma_group1\\Rom\\dat\\TestFunctions\\Scalar.RBF')
def scalar_function_of_normal(store_name: str, N: int, M: int, X_std: float, noise_std: float, CDF_scale: NP.Array=None, CDF_loc: NP.Array=None,
pre_function_with_parameters: function.CallableWithParameters = None,
function_with_parameters: function.CallableWithParameters = None) -> data.Store:
X_marginal = distribution.Univariate('norm', loc=0, scale=X_std)
X_dist = distribution.Multivariate.Independent(M=M, marginals=X_marginal)
noise_dist = (distribution.Multivariate.Normal(mean=
|
zeros(1, dtype=float)
|
numpy.zeros
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 19:20:14 2018
@author: nemec
"""
import numpy as np
from multiprocessing import Pool
#calculating the life time of the spots according to the choosen decay rate
def decay(spot_area,time,D):
t = spot_area/D +time
return t
#calculate the meridional flow
def merflow(lat):
if abs(lat-90) <= 75:
u = 22*np.sin(2.4*(90-lat)*np.pi/180)*7.0922e-3
if abs(lat-90) > 75:
u = 0
return u
#calculate the differential rotation
def diffrot(lat):
rot = 0.1813 - 2.3*np.sin((90-lat)*np.pi/180)**2.-1.62*np.sin((90-lat)*np.pi/180)**4.
return rot
#define the decay rate
D = 30.9 #MHS per day
#setting up the grid on which to mask the spots
#in this case pixels have size of 0.1 by 0.1 degree
factor = 10.
xvalues = np.around(np.linspace(0,359,num=360*factor),decimals=1)
yvalues = np.around(np.linspace(0,180,num=180*factor),decimals=1)
x,y = np.meshgrid(xvalues,yvalues)
#for doing the sin/cos calculations
conv = np.pi/180.
# =============================================================================
# if you want to experiment with random parameters for the positions and areas,
#just comment out the line, where I read in the input file and define the coordinates
# and area yourself
# =============================================================================
#reading in the file
data = np.loadtxt("AR-mod.txt")
#defining the coordinates
long_pos = data[:, 2]
long_neg = data[:,4]
#need to redifine grid so that north pole is a + 90 degree and south pole at -90 degree
lat_pos = 90-data[:,1]
lat_neg = 90 -data[:,3]
#define the area at the time of emergence
spot = data[:,5]
#define which part of the input should then be used
start = 70724
end = 74519
grid_max = 359
#only use this for calculations that should not be run parallel!
#positivx= open("positivx3.txt","w")
#positivy= open("positivy3.txt","w")
#negativx= open("negativx3.txt","w")
#negativy= open("negativy3.txt","w")
#for i in range(start,end):
#starting doing the spot masking parallel
def f(i):
#for i in range(start,end):
#print(i)
positivx= open("positivx{}.txt".format(i),"w")
positivy= open("positivy{}.txt".format(i),"w")
negativx= open("negativx{}.txt".format(i),"w")
negativy= open("negativy{}.txt".format(i),"w")
spot_area = spot[i]
time = data[i,0]
t = decay(spot_area,time,D)
phi_pos = 90-lat_pos[i]
phi_neg = 90-lat_neg[i]
#print(t)
if np.int(t-time) == 0:
area = spot[i]/(30.81*np.pi/2.*np.pi/2.)
r = area**(1./2.)
#define positive polarity patch
x_min_pos = long_pos[i]-r/2.*1./np.cos(phi_pos*conv)
x_max_pos = long_pos[i]+r/2.*1./np.cos(phi_pos*conv)
y_min_pos = lat_pos[i]-r/2.
y_max_pos = lat_pos[i]+r/2.
#define negative polarity patch
x_min_neg = long_neg[i]-r/2.*1./np.cos(phi_neg*conv)
x_max_neg = long_neg[i]+r/2.*1./np.cos(phi_neg*conv)
y_min_neg = lat_neg[i]-r/2.
y_max_neg = lat_neg[i]+r/2.
if x_min_pos < 0 and x_max_pos >0:
x_min_pos1= grid_max+x_min_pos
x_pos_pos = x[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos1) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos1 = x[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos1 = y[np.where((x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos1:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos1:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_pos < 0. and x_max_pos <0:
x_min_pos2= grid_max+x_min_pos
x_max_pos2 = grid_max+x_max_pos
x_pos_pos2 = x[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos2 = y[np.where((x >= x_min_pos2) & (x<=x_max_pos2) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos2:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos2:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
#
if x_min_pos > 0.:
x_pos_pos3 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos3 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos3:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos3:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg >0:
x_min_neg1= grid_max+x_min_neg
x_pos_neg1 = x[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg= y[np.where((x >= x_min_neg1) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_neg1 = x[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg1 = y[np.where((x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg1:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg1:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg < 0. and x_max_neg <0:
x_min_neg2= grid_max+x_min_neg
x_max_neg2 = grid_max +x_max_neg
x_pos_neg2 = x[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg2 = y[np.where((x >= x_min_neg2) & (x<=x_max_neg2) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg2:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg2:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_min_neg > 0.:
x_pos_neg3 = x[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
y_pos_neg3 = y[np.where((x >= x_min_neg) & (x<=x_max_neg) & (y>=y_min_neg) & (y <=y_max_neg))]
for item in x_pos_neg3:
negativx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_neg3:
negativy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos <grid_max:
x_max_pos4= x_max_pos-grid_max
x_pos_pos = x[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos = y[np.where((x >= x_min_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
x_pos_pos4 = x[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos4 = y[np.where((x<=x_max_pos4) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos4:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos4:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_pos >grid_max and x_min_pos >grid_max:
x_min_pos5= x_min_pos-grid_max
x_max_pos5 =x_max_pos-grid_max
x_pos_pos5 = x[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos5 = y[np.where((x >= x_min_pos5) & (x<=x_max_pos5) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos5:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos5:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
#
if x_max_pos > grid_max:
x_pos_pos6 = x[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
y_pos_pos6 = y[np.where((x >= x_min_pos) & (x<=x_max_pos) & (y>=y_min_pos) & (y <=y_max_pos))]
for item in x_pos_pos6:
positivx.write("%f \t %f \t %f\n" % (item,r,time))
for item in y_pos_pos6:
positivy.write("%f \t %f \t %f\n" % (item,r,time))
if x_max_neg >grid_max and x_min_neg <grid_max:
x_max_neg4= x_max_pos-grid_max
x_pos_neg = x[
|
np.where((x >= x_min_neg) & (y>=y_min_neg) & (y <=y_max_neg))
|
numpy.where
|
import scipy
import numpy
try:
import cupy
except:
import numpy as cupy
from apricot import FacilityLocationSelection
from sklearn.datasets import load_digits
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
digits_data = load_digits()
X_digits = digits_data.data
norm = lambda x: numpy.sqrt((x*x).sum(axis=1)).reshape(x.shape[0], 1)
cosine = lambda x: numpy.dot(x, x.T) / (norm(x).dot(norm(x).T))
X_digits_sparse = scipy.sparse.csr_matrix(cosine(X_digits))
X_digits_corr_cupy = cupy.corrcoef(cupy.array(X_digits), rowvar=True) ** 2
X_digits_cosine_cupy = cupy.array(cosine(X_digits))
digits_corr_ranking = [424, 1647, 396, 339, 1030, 331, 983, 1075, 1482,
1539, 1282, 493, 885, 823, 1051, 236, 537, 1161, 345, 1788, 1432, 1634,
1718, 1676, 146, 1286, 655, 1292, 556, 533, 1545, 520, 1711, 1428, 620,
1276, 305, 438, 1026, 183, 2, 384, 1012, 798, 213, 1291, 162, 1206, 227,
1655, 233, 1508, 410, 1295, 1312, 1350, 514, 938, 579, 1066, 82, 164, 948,
1588, 1294, 1682, 943, 517, 959, 1429, 762, 898, 1556, 881, 1470, 1549, 1325,
1568, 937, 347, 1364, 126, 732, 1168, 241, 573, 731, 815, 864, 1639, 1570, 411,
1086, 696, 870, 1156, 353, 160, 1381, 326]
digits_corr_gains = [736.794, 114.2782, 65.4154, 61.3037, 54.5428, 38.7506, 34.097,
32.6649, 30.2813, 27.8395, 24.1843, 17.568, 16.6615, 15.2973, 13.2629, 9.7685,
9.5183, 7.9484, 7.8094, 7.0224, 6.623, 6.061, 6.0469, 5.323, 5.167, 5.0563,
4.8848, 4.7694, 4.4766, 4.4577, 4.3198, 3.9347, 3.5501, 3.3284, 3.0123, 2.9994,
2.9739, 2.8233, 2.7572, 2.558, 2.5281, 2.4412, 2.4328, 2.3452, 2.2498, 2.2457,
2.2127, 2.1542, 2.1416, 2.0876, 2.0715, 2.0482, 2.0053, 1.9996, 1.9912, 1.973,
1.8029, 1.7865, 1.7284, 1.7255, 1.7201, 1.7169, 1.6614, 1.6445, 1.6147, 1.5874,
1.5827, 1.5822, 1.5784, 1.5164, 1.4876, 1.4319, 1.4288, 1.3736, 1.3485, 1.3039,
1.2872, 1.2771, 1.2587, 1.2391, 1.2279, 1.2006, 1.1654, 1.1491, 1.1445, 1.137,
1.1122, 1.0785, 1.0771, 1.0402, 1.0321, 1.0192, 1.0158, 0.9734, 0.9627, 0.9612,
0.9401, 0.9291, 0.912, 0.8924]
digits_euclidean_ranking = [945, 392, 1507, 793, 1417, 1039, 97, 1107, 1075,
867, 360, 186, 1584, 1422, 885, 1084, 1327, 1696, 991, 146, 181, 765,
175, 1513, 1120, 877, 1201, 1764, 1711, 1447, 1536, 1286, 438, 612, 6,
514, 410, 1545, 384, 1053, 1485, 983, 310, 51, 654, 1312, 708, 157, 259,
1168, 117, 1634, 1537, 1188, 1364, 1713, 579, 582, 69, 200, 1678, 798, 183,
520, 1011, 1295, 1291, 938, 1276, 501, 696, 948, 925, 558, 269, 1066, 573,
762, 1294, 1588, 732, 1387, 1568, 1026, 1156, 79, 1222, 1414, 864, 1549,
1236, 213, 411, 151, 233, 924, 126, 345, 1421, 1562]
digits_euclidean_gains = [7448636.0, 384346.0, 250615.0, 224118.0, 166266.0,
127456.0, 122986.0, 109483.0, 93463.0, 67173.0, 55997.0, 54721.0, 51497.0,
47765.0, 45073.0, 33857.0, 30100.0, 25043.0, 22260.0, 19700.0, 19135.0,
17545.0, 17000.0, 15462.0, 15315.0, 14996.0, 14819.0, 13244.0, 12529.0,
12474.0, 11702.0, 11639.0, 11612.0, 11266.0, 11187.0, 9722.0, 9244.0,
8645.0, 8645.0, 8461.0, 8404.0, 8115.0, 7998.0, 7351.0, 7153.0, 6992.0,
6956.0, 6919.0, 6711.0, 6684.0, 6526.0, 6348.0, 6099.0, 5969.0, 5460.0,
5433.0, 5163.0, 5141.0, 5090.0, 4900.0, 4842.0, 4683.0, 4165.0, 4104.0,
4099.0, 4099.0, 3998.0, 3959.0, 3912.0, 3807.0, 3703.0, 3675.0, 3670.0,
3636.0, 3564.0, 3407.0, 3395.0, 3196.0, 3188.0, 3168.0, 3156.0, 3144.0,
3093.0, 3078.0, 3059.0, 2997.0, 2944.0, 2891.0, 2886.0, 2865.0, 2804.0,
2779.0, 2756.0, 2748.0, 2709.0, 2696.0, 2651.0, 2637.0, 2619.0, 2602.0]
digits_cosine_ranking = [424, 615, 1545, 1385, 1399, 1482, 1539, 1075, 331, 493,
885, 236, 345, 1282, 1051, 823, 537, 1788, 1549, 834, 1634, 1009, 1718, 655,
1474, 1292, 1185, 396, 1676, 2, 183, 533, 1536, 438, 1276, 305, 1353, 620,
1026, 983, 162, 1012, 384, 91, 227, 798, 1291, 1655, 1485, 1206, 410, 556,
1161, 29, 1320, 1295, 164, 514, 1294, 1711, 579, 938, 517, 1682, 1325, 1222,
82, 959, 520, 1066, 943, 1556, 762, 898, 732, 1086, 881, 1588, 1470, 1568, 1678,
948, 1364, 62, 937, 1156, 1168, 241, 573, 347, 908, 1628, 1442, 126, 815, 411,
1257, 151, 23, 696]
digits_cosine_gains = [1418.7103, 47.8157, 25.4947, 21.0313, 19.7599, 19.0236,
16.3013, 13.5381, 11.811, 9.0032, 6.2765, 5.9886, 5.2185, 4.6696, 4.1744,
4.0718, 3.0075, 2.8132, 2.5777, 2.2983, 2.2391, 2.2223, 2.0622, 1.9568,
1.9192, 1.7356, 1.7038, 1.6463, 1.6003, 1.5979, 1.3458, 1.3415, 1.288,
1.1595, 1.0048, 0.9198, 0.8886, 0.8454, 0.8446, 0.829, 0.8162, 0.799,
0.7805, 0.7723, 0.7717, 0.7681, 0.7533, 0.7227, 0.7017, 0.6899, 0.6895,
0.6448, 0.6397, 0.6334, 0.6014, 0.5881, 0.5677, 0.5628, 0.5534, 0.5527,
0.5428, 0.5415, 0.5384, 0.5249, 0.5232, 0.498, 0.4944, 0.4877, 0.4799,
0.4788, 0.4775, 0.4663, 0.4641, 0.4589, 0.4447, 0.4437, 0.4408, 0.4382,
0.4312, 0.4266, 0.4238, 0.4184, 0.4168, 0.4058, 0.4, 0.3983, 0.3892,
0.3855, 0.3837, 0.3818, 0.3765, 0.3524, 0.3519, 0.3471, 0.3331, 0.3289,
0.3268, 0.324, 0.3197, 0.3173]
def test_digits_corr_small_greedy():
model = FacilityLocationSelection(10, 'corr', 10)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[:10])
assert_array_almost_equal(model.gains, digits_corr_gains[:10], 4)
def test_digits_corr_small_greedy_rank_initialized():
model = FacilityLocationSelection(10, 'corr', 10, initial_subset=digits_corr_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_greedy_bool_initialized():
mask = numpy.zeros(X_digits.shape[0], dtype=bool)
mask[digits_corr_ranking[:5]] = True
model = FacilityLocationSelection(10, 'corr', 10, initial_subset=mask)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_pivot():
model = FacilityLocationSelection(10, 'corr', 5)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[:10])
assert_array_almost_equal(model.gains, digits_corr_gains[:10], 4)
def test_digits_corr_small_pivot_rank_initialized():
model = FacilityLocationSelection(10, 'corr', 5, initial_subset=digits_corr_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_pivot_bool_initialized():
mask = numpy.zeros(X_digits.shape[0], dtype=bool)
mask[digits_corr_ranking[:5]] = True
model = FacilityLocationSelection(10, 'corr', 5, initial_subset=mask)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_pq():
model = FacilityLocationSelection(10, 'corr', 1)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[:10])
assert_array_almost_equal(model.gains, digits_corr_gains[:10], 4)
def test_digits_corr_small_pq_rank_initialized():
model = FacilityLocationSelection(10, 'corr', 1, initial_subset=digits_corr_ranking[:5])
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_pq_bool_initialized():
mask = numpy.zeros(X_digits.shape[0], dtype=bool)
mask[digits_corr_ranking[:5]] = True
model = FacilityLocationSelection(10, 'corr', 1, initial_subset=mask)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking[5:15])
assert_array_almost_equal(model.gains, digits_corr_gains[5:15], 4)
def test_digits_corr_small_truncated():
model = FacilityLocationSelection(15, 'corr', 1)
model.fit(X_digits)
assert_array_equal(model.ranking[:10], digits_corr_ranking[:10])
assert_array_almost_equal(model.gains[:10], digits_corr_gains[:10], 4)
def test_digits_corr_small_truncated_pivot():
model = FacilityLocationSelection(15, 'corr', 5)
model.fit(X_digits)
assert_array_equal(model.ranking[:10], digits_corr_ranking[:10])
assert_array_almost_equal(model.gains[:10], digits_corr_gains[:10], 4)
def test_digits_corr_large_greedy():
model = FacilityLocationSelection(100, 'corr', 100)
model.fit(X_digits)
assert_array_equal(model.ranking, digits_corr_ranking)
assert_array_almost_equal(model.gains, digits_corr_gains, 4)
def test_digits_corr_large_pivot():
model = FacilityLocationSelection(100, 'corr', 50)
model.fit(X_digits)
|
assert_array_equal(model.ranking, digits_corr_ranking)
|
numpy.testing.assert_array_equal
|
def xmaslight():
# This is the code from my
#NOTE THE LEDS ARE GRB COLOUR (NOT RGB)
# Here are the libraries I am currently using:
import time
import re
import math
# You are welcome to add any of these:
import random
import numpy as np
# import scipy
# import sys
# If you want to have user changable values, they need to be entered from the command line
# so import sys sys and use sys.argv[0] etc
# some_value = int(sys.argv[0])
# IMPORT THE COORDINATES (please don't break this bit)
coordfilename = "coords.txt"
fin = open(coordfilename,'r')
coords_raw = fin.readlines()
coords_bits = [i.split(",") for i in coords_raw]
coords = []
for slab in coords_bits:
new_coord = []
for i in slab:
new_coord.append(int(re.sub(r'[^-\d]','', i)))
coords.append(new_coord)
#set up the pixels (AKA 'LEDs')
PIXEL_COUNT = len(coords) # this should be 500
pixels = neopixel.NeoPixel(board.D18, PIXEL_COUNT, auto_write=False)
# YOU CAN EDIT FROM HERE DOWN
# I get a list of the coordinates which is not overly useful here other than to set the max and min coordinates
xs = []
ys = []
zs = []
for i in coords:
xs.append(i[0])
ys.append(i[1])
zs.append(i[2])
slow = 0
ballradius = 220
# the eight colours in GRB order
# if you are turning a lot of them on at once, keep their brightness down please
colourA = [0,50,0] # red
colourB = [40,60,0] # orange
colourC = [45, 45, 0] # yellow
colourD = [38, 0, 0] # green
colourE = [38, 0, 38] # teal
colourF = [0, 0, 38] # blue
colourG = [0, 13, 38] # indigo
colourH = [0, 25, 38] # violet
run = 1
coordmat = np.asmatrix(np.array(coords) + np.array([0.,0.,220]),
dtype=np.float64).transpose() # Put LED coordinates into appropriate numpy matrix form to prepare for rotations.
cnt = 0
while run == 1:
time.sleep(slow)
LED = 0
while LED < len(coords):
# Check which octant LED lives in to generate colored octahedron
if coordmat[0, LED]**2 + coordmat[1, LED]**2 + coordmat[2, LED]**2 < ballradius**2:
if coordmat[0, LED] < 0:
if coordmat[1, LED] < 0:
if coordmat[2, LED] < 0:
pixels[LED] = colourA
else:
pixels[LED] = colourB
else:
if coordmat[2, LED] < 0:
pixels[LED] = colourC
else:
pixels[LED] = colourD
else:
if coordmat[1, LED] < 0:
if coordmat[2,LED] < 0:
pixels[LED] = colourE
else:
pixels[LED] = colourF
else:
if coordmat[2, LED] < 0:
pixels[LED] = colourG
else:
pixels[LED] = colourH
LED += 1
# use the show() option as rarely as possible as it takes ages
# do not use show() each time you change a LED but rather wait until you have changed them all
pixels.show() ## <NAME> had to comment this out since he doesn't have LEDs. Won't work until hardware is available.
# now we get ready for the next cycle
# We do this similarly to how Matt did his translating plane effect: use a static spatial coloring function,
# but rotate all of the LEDs!
#Do rotate-y stuff here
#Rotation Matrix
# Small scalar amount (in radians) to rotate for one timestep of animation (plays role of "inc" variable in Matt's original code)
theta = 0.2
# UNIT vector axis about which to rotate for one timestep of animation
if cnt%100 == 0: #Switch up the rotation axis every so often to keep things interesting
ux = random.uniform(-1.0, 1.0)
uy = random.uniform(-1.0, 1.0)
uz = random.uniform(-1.0, 1.0)
length = math.sqrt(ux**2+uy**2+uz**2)
ux = ux / length
uy = uy / length
uz = uz / length
u = np.matrix(
[
[ux],
[uy],
[uz]
]
)
UX = np.matrix( #Cross Product Matrix
[
[0., -uz, uy],
[uz, 0., -ux],
[-uy, ux, 0.]
]
)
UXU = np.matmul(u,u.transpose()) #Generate Outer Product
I = np.matrix( #Identity Matrix
[
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]
]
)
# Setup rotation matrix using R = \cos(\theta) I + \sin(\theta) UX + (1 - \cos(\theta)) UXU (Rodrigues' Rotation Formula)
RotMat = np.cos(theta) * I + np.sin(theta) * UX + (1 - np.cos(theta)) * UXU
coordmat =
|
np.matmul(RotMat,coordmat)
|
numpy.matmul
|
# Copyright (c) ASU GitHub Project.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
################################################################################
from __future__ import print_function
import math
import os
import random
import copy
import scipy
import string
import numpy as np
import torch
import torch.utils.data
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from skimage.transform import resize
try: # SciPy >= 0.19
from scipy.special import comb
except ImportError:
from scipy.misc import comb
from PIL import Image, ImageDraw, ImageFont
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * (t ** (n - i)) * (1 - t) ** i
def bezier_curve(points, nTimes=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
Control points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
nTimes is the number of time steps, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, nTimes)
polynomial_array = np.array([bernstein_poly(i, nPoints - 1, t) for i in range(0, nPoints)])
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
def data_augmentation(x, y, prob=0.5):
# augmentation by flipping
cnt = 3
while random.random() < prob and cnt > 0:
degree = random.choice([0, 1, 2])
x = np.flip(x, axis=degree)
y = np.flip(y, axis=degree)
cnt = cnt - 1
return x, y
def elastic_transform(image):
alpha = 991
sigma = 8
random_state =
|
np.random.RandomState(None)
|
numpy.random.RandomState
|
#!/usr/bin/env python
import tensorflow as tf
import numpy as np
from align.detect_face import create_mtcnn, detect_face
from scipy import misc
from align.align_dataset_mtcnn import *
def initialize_mtcnn(gpu_memory_fraction, rect_minsize = 100, mtcnn_thresholds = [0.6, 0.7, 0.7], scale_factor = 0.709):
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = create_mtcnn(sess, None)
minsize = rect_minsize
threshold = mtcnn_thresholds
factor = scale_factor
return pnet, rnet, onet, minsize, threshold, factor
def align_image(img, pnet, rnet, onet, minsize, threshold, factor, image_height = 160, image_width = 160, view_mtcnn_scores = False, NED_threshold = 0.3, filter_sideways = True, keep_aspect_ratio = False, discard_border_landmark_faces = False, margin = 32, visualize = False):
#distance_info = {}
if img.ndim<2:
#create_dir(parent_dir+'/failed_only_two_channels')
#img = cv2.imread(image_path)
#cv2.imwrite(parent_dir+'/failed_only_two_channels/'+filename+'.png', img)
#print('Unable to align "%s"' % image_path)
return [], None, [], []
elif img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
bounding_boxes, landmark_points, final_info = detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if not np.all(landmark_points):
return [], None, [], []
nrof_faces = bounding_boxes.shape[0]
best_face_shape = (1,1,1)
best_face_index = 0
best_face_cropped = None
best_face_bb = [0,0,0,0]
best_face_conf = 0
if nrof_faces>0:
for i in range(nrof_faces):
det = bounding_boxes[i, 0:4]
conf = bounding_boxes[i, 4]
img_size = np.asarray(img.shape)[0:2]
if nrof_faces>1:
bounding_box_size = (det[2]-det[0])*(det[3]-det[1])
img_center = img_size / 2
offsets = np.vstack([ (det[0]+det[2])/2-img_center[1], (det[1]+det[3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(bounding_box_size-offset_dist_squared*2.0) # some extra weight on the centering
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] =
|
np.maximum(det[1]-margin/2, 0)
|
numpy.maximum
|
import numpy as np
import numpy.random as npr
from test_util import *
from funkyyak import grad
|
npr.seed(1)
|
numpy.random.seed
|
import numpy as np
from torchvision.transforms import Compose
def get_transforms(cfg):
transforms = []
if cfg is not None:
for trf_cfg in cfg:
if trf_cfg.name in globals():
trf_cls = globals()[trf_cfg.name]
trf = trf_cls(**dict(trf_cfg.args or {}))
transforms.append(trf)
else:
raise ValueError("Transofrm {} does not exist!"
"".format(trf_cfg.name))
return Compose(transforms)
class ClockwiseRotation(object):
def __init__(self, deg):
self.deg = deg
def __repr__(self):
return "{0.__class__.__name__}(def={0.deg})".format(self)
def __call__(self, events):
"""
:param np.ndarray events: [num_events, 4] array containing (x, y, ts, p) values
:return: np.ndarray [num_events, 4]
"""
x, y, ts, p = np.split(events, 4, axis=-1)
# Compute the center of the events cloud
xc = (x.max() - x.min()) / 2
yc = (y.max() - y.min()) / 2
# Apply rotation
angle = np.radians(self.deg)
x_rot = ((x - xc) * np.cos(angle)) - ((y - yc) * np.sin(angle)) + xc
y_rot = ((x - xc) * np.sin(angle)) + ((y - yc) * np.cos(angle)) + yc
# Translate events so that the top-left most event is in (0,0)
x_left =
|
np.min(x_rot)
|
numpy.min
|
import mmcv
import numpy as np
from terminaltables import AsciiTable
from .bbox_overlaps import bbox_overlaps
from .class_names import get_classes
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision (for single or multiple scales).
Args:
recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float or ndarray: calculated average precision
"""
no_scale = False
if recalls.ndim == 1:
no_scale = True
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
elif mode == '11points':
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
if no_scale:
ap = ap[0]
return ap
def tpfp_imagenet(det_bboxes,
gt_bboxes,
gt_ignore,
default_iou_thr,
area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): the detected bbox
gt_bboxes (ndarray): ground truth bboxes of this image
gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
default_iou_thr (float): the iou thresholds for medium and large bboxes
area_ranges (list or None): gt bbox area ranges
Returns:
tuple: two arrays (tp, fp) whose elements are 0 and 1
"""
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp
# of a certain scale.
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1
gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1
iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
default_iou_thr)
# sort all detections by scores in descending order
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore, dtype=bool)
else:
gt_areas = gt_w * gt_h
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
max_iou = -1
matched_gt = -1
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlaped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
max_iou = ious[i, j]
matched_gt = j
# there are 4 cases for a det bbox:
# 1. it matches a gt, tp = 1, fp = 0
# 2. it matches an ignored gt, tp = 0, fp = 0
# 3. it matches no gt and within area range, tp = 0, fp = 1
# 4. it matches no gt but is beyond area range, tp = 0, fp = 0
if matched_gt >= 0:
gt_covered[matched_gt] = 1
if not (gt_ignore[matched_gt] or gt_area_ignore[matched_gt]):
tp[k, i] = 1
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1)
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def tpfp_default(det_bboxes, gt_bboxes, gt_ignore, iou_thr, area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): the detected bbox
gt_bboxes (ndarray): ground truth bboxes of this image
gt_ignore (ndarray): indicate if gts are ignored for evaluation or not
iou_thr (float): the iou thresholds
Returns:
tuple: (tp, fp), two arrays whose elements are 0 and 1
"""
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
# a certain scale
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0] + 1) * (
det_bboxes[:, 3] - det_bboxes[:, 1] + 1)
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes)
ious_max = ious.max(axis=1)
ious_argmax = ious.argmax(axis=1)
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered =
|
np.zeros(num_gts, dtype=bool)
|
numpy.zeros
|
# USEFUL FUNC.(TOOL) IN IMSNG MODULE
# CREATED IN 19.03.03 BY <NAME>
# UPDATE : 20.01.03
#============================================================
# MODULE
#------------------------------------------------------------
import os, sys, glob
import numpy as np
from astropy.io import ascii, fits
import astropy.coordinates as coord
import astropy.units as u
from multiprocessing import Process, Pool
import multiprocessing as mp
import time
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.nddata import Cutout2D
import matplotlib.pyplot as plt
from imsng import phot_tbd
#============================================================
def timename():
'''
CONVERT 'TIME' TO YYMMDD, HHMMSS FORM.
INPUT : NONE
OUTPUT : STRIG FORM OF 'YYMMDD', 'HHMMSS'
'''
import numpy as np
import time
now = time.gmtime(time.time())
y, m, d = now.tm_year, now.tm_mon, now.tm_mday
ho, mi, se = now.tm_hour, now.tm_min, now.tm_sec
yy = str(y)[2:]
if len(str(m)) < 2:
mm = '0'+str(m)
else:
mm = str(m)
if len(str(d)) < 2:
dd = '0'+str(d)
else:
dd = str(d)
if len(str(ho)) < 2:
hour = '0'+str(ho)
else:
hour = str(ho)
if len(str(mi)) < 2:
mini = '0'+str(mi)
else:
mini = str(mi)
if len(str(se)) < 2:
sec = '0'+str(se)
else:
sec = str(se)
yymmdd = yy+mm+dd
hhmmss = hour+mini+sec
return yymmdd, hhmmss
#------------------------------------------------------------
def detection(name, ra, dec, time, location):
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
target = SkyCoord(ra, dec, unit='deg') # defaults to ICRS frame
site = location
del_midnight= np.linspace(-12, +12, 720) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
sunaltaz_night = get_sun(time_night).transform_to(frame_night)
# indx_set = np.where( sunaltaz_night.alt > -18 * u.deg )
indx_rise = np.where( sunaltaz_night.alt < -18 * u.deg )
sunset = del_midnight[np.min(indx_rise)]
sunrise = del_midnight[np.max(indx_rise)]
del_midnight= np.linspace(sunset.value, sunrise.value, 100) * u.hour
time_night = time+del_midnight
frame_night = AltAz(obstime=time_night, location=site)
targetaltaz_night = target.transform_to(frame_night)
return targetaltaz_night
#------------------------------------------------------------
def rts_maker(filename, savepath, obs, obstbl, intbl, date, hhmmss):
from astropy.coordinates import Angle
import numpy as np
import os, glob, sys
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.coordinates import get_sun, get_moon
from astropy.io import ascii
from astropy.table import Table, Column
indx_obs = np.where(obstbl['obs'] == obs)
lat, lon, height = obstbl['lat'][indx_obs], obstbl['lon'][indx_obs], obstbl['height'][indx_obs]
utoff, ul = obstbl['utoff'][indx_obs], obstbl['ul'][indx_obs]
lat = lat * u.deg # North
lon = lon * u.deg # East
height = height * u.m
utoff = utoff * u.hour
ul = 20 # limiting magnitude
location = EarthLocation(lat=lat, lon=lon, height=height)
time = Time('20'+date[0:2]+'-'+date[2:4]+'-'+date[4:6]+' 00:00:00')+utoff
risetime = []
maxtime = []
settime = []
namelist = []
ralist = []
delist = []
for i in range(len(intbl)):
name, ra, dec = intbl['name'][i], intbl['ra'][i], intbl['dec'][i]
targetaltaz_night = detection(name, ra, dec, time, location)
try:
alt_max = np.max(targetaltaz_night.alt)
alt_max_time = targetaltaz_night.obstime[targetaltaz_night.alt == alt_max] + utoff
alt_rise30_time = targetaltaz_night.obstime[targetaltaz_night.alt >= 25 * u.deg][0] + utoff
alt_set30_time = targetaltaz_night.obstime[targetaltaz_night.alt <= 0 * u.deg][0] + utoff
if alt_max >= 30.0 *u.deg:
risetime.append(alt_rise30_time.value[0][11:])
maxtime.append(alt_max_time.value[0][11:])
settime.append(alt_set30_time.value[0][11:])
namelist.append(name)
ralist.append(Angle(str(ra)+'d').to_string(unit=u.hour, sep=':'))
delist.append(Angle(str(dec)+'d').to_string(unit=u.degree, sep=':'))
except:
pass
risetime = np.array(risetime)
maxtime = np.array(maxtime)
settime = np.array(settime)
namelist = np.array(namelist)
ralist = np.array(ralist)
delist = np.array(delist)
targettbl = Table( [namelist, ralist, delist, risetime, maxtime, settime], names=['name', 'ra', 'dec', 'rise', 'transit', 'set'])
ascii.write( targettbl,
output=savepath+date+'/'+date+'-'+hhmmss+'-targetlist-'+obs+'-'+filename+'.txt',
format='fixed_width',
delimiter=None,
overwrite=True)
'''
ascii.write( targettbl,
output=savepath+date+'/'+date+'-'+hhmmss+'-targetlist-'+obs+'-'+filename+'.txt',
delimiter=None,
overwrite=True)
'''
#------------------------------------------------------------
def sendmail(filename, subject, sendID, sendPW, reciver):
'''
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import smtplib
from email.mime.text import MIMEText
import codecs
email_text = codecs.open(filename, 'rb', 'utf-8')
msg = MIMEText(email_text.read())
email_text.close()
msg['Subject'] = subject
msg['From'] = sendID
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(sendID, sendPW)
smtp_gmail.sendmail(sendID, reciver, msg.as_string())
smtp_gmail.quit()
comment = 'Send '+filename+'\n'+'From '+sendID+' To '+reciver; print(comment)
#------------------------------------------------------------
def send_gmail(subject, contents, fromID, fromPW, toIDs, ccIDs=None, path=None):
'''
SEND GMAIL
Security reference
https://cpuu.postype.com/post/23066
Code reference
https://kimdoky.github.io/python/2017/07/21/smtplib_email.html
File attach
https://brunch.co.kr/@jk-lab/31
'''
import os
import smtplib
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
#msg = MIMEBase('mixed')
#msg = MIMEText(contents, 'plain', 'utf-8')
msg = MIMEMultipart()
msg['Subject'] = Header(s=subject, charset="utf-8")
msg['From'] = fromID
msg['To'] = toIDs
if ccIDs != None:
msg['Cc'] = ccIDs
msg.attach(MIMEText(contents, 'plain', 'utf-8'))
# ATTACH TEXT FILE ON MAIL
if path != None:
if type(path) != list:
filelist = []
filelist.append(path)
else:
filelist = path
for file in filelist:
part = MIMEBase("application", "octet-stream")
part.set_payload(open(file, 'rb').read())
part.add_header( 'Content-Disposition',
'attachment; filename="%s"'% os.path.basename(file))
msg.attach(part)
# ACCESS TO GMAIL & SEND MAIL
smtp_gmail = smtplib.SMTP_SSL('smtp.gmail.com', 465)
smtp_gmail.login(fromID, fromPW)
smtp_gmail.sendmail(msg["From"], msg["To"].split(",") + msg["Cc"].split(","), msg.as_string())
smtp_gmail.quit()
comment = 'Send '+str(path)+'\nFrom\t'+fromID+'\nTo'; print(comment); print(toIDs)
#------------------------------------------------------------
def calc_rts(filename, observatory, obsdate, dts, obsinfofile, catname, altlimit=30., moonseperation=30.):
#cal_visibility.py - <NAME>, 2015/02/01
#calculate rise transit set time and moon distance for observation of IMSNG
#to be changed : adding observatory, bar plot, adding object, calculating for all nights of 2015
# Usage : python cal_visibility.py obsdate dts
# python cal_visibility.py 2015/03/21 1
# python 3 ported, 2019-03-05, <NAME>
import mskpy.observing as obs
from astropy.coordinates import Angle
import astropy.units as u
from astropy.io import ascii
from astropy.table import Table, vstack
import ephem
import numpy as np
import string
import os, sys
import astropy.coordinates as coord
# altitite and moon seperation parameter
#moon serperation is a little bit close (2~3 deg)
#altlimit = 25.
#moonseperation = 30.
#observatory = 'LOAO'
#obsdate = '2019/10/06'
#dts = '0'
observatory = str(observatory)
obsdate = str(obsdate)
dts = str(dts)
moonsepcut = 360.-moonseperation
#obs_info = ascii.read("obs_info.dat")
obs_info = ascii.read(obsinfofile)
if type(catname)==str : tdata = ascii.read(catname)
else : tdata = catname
#catname = 'targetlist_test.dat'
# Obseravatory information
indx_obs = np.where(observatory == obs_info['obs'])
obsname = obs_info['obs'][indx_obs][0]
obslat = obs_info['lat'][indx_obs][0]
obslon = obs_info['lon'][indx_obs][0]
obstz = obs_info['utoff'][indx_obs][0]
observ = ephem.Observer()
observ.date = obsdate+' 01:00:00'
observ.lon = str(obslon)
observ.lat = str(obslat)
observ.elevation= obs_info['height'][indx_obs][0]
# Day Time Saving
if int(dts) ==0:
#print ('No day Time saving')
dts = float(dts)
else:
#print ('Ok then I will plus 1 hr to local time.')
dts = float(dts)
# objects from catalog file
objname = tdata['name']
ra = tdata['ra']
dec = tdata['dec']
prior = tdata['sort']
radd = ra
decdd = dec
# Moon distance and information
mcoord = ephem.Moon()
mcoord.compute(obsdate)
#print ('Moon ra, dec \n')
mheader ='Moon ra, dec'
#print (mcoord.ra,mcoord.dec,'\n')
minfo = mheader+' '+str(mcoord.ra)+' '+str(mcoord.dec)+'\n'
mphase = ephem.Moon(obsdate+' 00:00:00')
#print ('Moon phase : '+ "%.2f" % mphase.moon_phase)
mphasestr ='Moon phase : '+ "%.2f" % mphase.moon_phase +'\n'
# Angular distance calculation
def angsep(ra1deg, dec1deg, ra2deg, dec2deg) :
ra1rad = ra1deg*np.pi/180
dec1rad = dec1deg*np.pi/180
ra2rad = ra2deg*np.pi/180
dec2rad = dec2deg*np.pi/180
cos_a = np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))
anglesep = np.arccos(cos_a)*180/np.pi
return anglesep
'''
targets = []
targets.append(rad)
targets.append(decd)
targets.append(objname)
'''
msep = angsep(radd,decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))
#sunrise calculation
observ.horizon = '-18'
sunrise = observ.next_rising(ephem.Sun())
sunset = observ.previous_setting(ephem.Sun())
aaa = ephem.Date.tuple(sunset)
#hrr = int(aaa[3]+obstz+dts+24)
hrr = int(aaa[3]+obstz+dts)
mrr = aaa[4]
#print ('sunset : '+str(hrr)+':'+str(mrr))
sunsetstr = '-18 deg sunset : '+str(int(hrr))+':'+str(mrr)+'\n'
sunseti = hrr + mrr/60. + 0.25
bbb = ephem.Date.tuple(sunrise)
hrr = bbb[3]+obstz+dts
mrr = bbb[4]
#print ('sunrise : '+str(int(hrr))+':'+str(mrr))
sunrisestr = '-18 deg sunrise : '+str(int(hrr))+':'+str(mrr)+'\n'
sunriseti = hrr + mrr/60. -0.25
#f = open("rts_vis_"+obsdate[0:4]+obsdate[5:7]+obsdate[8:10]+"_"+observatory+".txt",'w')
f = open(filename,'w')
#g = open("targets.data",'w')
#header = '{:25s} {:12s} {:10s} {:5s} {:5s} {:5s} {:5s} {:1s}'.format('name', 'ra', 'dec', 'rise(LT)', 'transit(LT)', 'set(LT)', 'moon_dist(deg)', 'sort')+'\n'
header = 'name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) sort \n'
dashline = '#'+'-'*60+'\n'
f.write(obsdate)
f.write('\nobservatory = '+observatory+'\n')
f.write(sunsetstr)
f.write(sunrisestr)
f.write(minfo)
f.write(mphasestr)
f.write('alt limit = '+str(altlimit)+'\n')
f.write('Moon seeperation = '+str(moonseperation)+'\n')
f.write(dashline)
f.write(header)
pobj = []
prt = []
ptt = []
pst = []
telescope = obs.Observer(obslon*u.deg, obslat*u.deg, dts+obstz, obsdate, observatory)
for n in range(len(objname)):
ra_hms = Angle(str(ra[n])+'d').to_string(unit=u.hour, sep=':')[:-2]
de_hms = Angle(str(dec[n])+'d').to_string(unit=u.deg, sep=':')[:-3]
# 35 deg altitute cut
rtscal = obs.rts(radd[n], decdd[n], obsdate, obslon, obslat, float(obstz)+dts, limit=altlimit, precision=1440)
rt = rtscal[0]
tt = rtscal[1]
st = rtscal[2]
if rtscal[0]==None:
#print (objname[n], ra_hms, de_hms, rtscal[0], rtscal[1], rtscal[2],"%.2f" % msep[n])
vis=objname[n]+' '+ra_hms+' '+de_hms+ ' '+str(rtscal[0])+' '+ str(rtscal[1])+' '+ str(rtscal[2])+' '+str(int(msep[n]))+ str(prior[n])+'\n'
#f.write(vis)
#print(vis)
elif sunriseti < rtscal[0] < sunseti and sunriseti < rtscal[2] < sunseti and sunriseti < rtscal[1] < sunseti :
#print ('It can be seen in daytime!')
pass
# moon seperation = 50 deg cut
elif msep[n] < moonseperation :
#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')
pass
elif msep[n] > moonsepcut :
#print (objname[n]+' is close to the Moon by ',str(360-msep[n])+' deg')
pass
else:
rtp = "%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp = "%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp = "%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis = '{:25s} {:12s} {:10s} {:5s} {:5s} {:5s} {:5s} {:1s}'.format(objname[n], ra_hms, de_hms, rtp, ttp, stp, str(int(msep[n])), str(prior[n]))+'\n'
f.write(vis)
#print(vis)
#targets = objname[n]+' , '+ra_hms+' hr , '+de_hms+' deg \n'
#g.write(targets)
#print (objname[n], ra_hms, de_hms, rtp, ttp, stp, "%.2f" % msep[n])
f.close()
#g.close()
#os.system('pluma '+"rts_vis_"+obsdate[0:4]+obsdate[5:7]+obsdate[8:10]+"_loao.txt &")
#targetfile ="rts_vis_"+obsdate[0:4]+obsdate[5:7]+obsdate[8:10]+".txt"
#------------------------------------------------------------
def ds9regmaker(name, ra, dec, radius=1.5, color='green', dashlist=8, width=2, font='helvetica', fontsize=10, filename='ds9.reg'):
'''
name = intbl['Object Name']
ra = intbl['RA']
dec = intbl['DEC']
radius=1.5
color='green'
dashlist=8
width=2
font='helvetica'
fontsize=10
filename='ds9.reg'
'''
c = SkyCoord(ra, dec, unit='deg')
hmsdms = c.to_string('hmsdms')
hms, dms = [], []
for hd in hmsdms:
rahms = hd.split(' ')[0].replace('h', ':').replace('m', ':').replace('s', '')
dedms = hd.split(' ')[1].replace('d', ':').replace('m', ':').replace('s', '')
hms.append(rahms)
dms.append(dedms)
f = open(filename, 'w')
head = """# Region file format: DS9 version 4.1\nglobal color={} dashlist={} 3 width={} font="{} {} normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\nfk5
""".format(color, dashlist, width, font, fontsize)
f.write(head)
for r,d,n0 in zip(hms, dms, name):
n = n0.replace('<a HREF="javascript:void(0)">', '').replace('</a>', '')
reg = """circle({},{},{}") # text={}{}{}
""".format(r, d, format(radius, ".3f"), '{', n, '}')
f.write(reg)
f.close()
#------------------------------------------------------------
def rtsmaker(observatory, headname, save_path, obspath, catpath, start, end, altlimit=30., moonseperation=40., sunlimit='-18', numlimit=100):
import pytz
import jdcal
import ephem
#from numpy import *
import numpy as np
import os, sys
import string
import datetime
import astropy.units as u
from astropy.io import ascii
import mskpy.observing as obs
import astropy.coordinates as coord
from astropy import units as u
from astropy.coordinates import SkyCoord
#------------------------------------------------------------#
# INPUT SAMPLE
#------------------------------------------------------------#
'''
observatory = 'SAO'
save_path = './'
obspath = "/home/gw/Research/observatory.txt"
catpath = 'MS181101ab_Preliminary-all_candidates.txt'
start = '2019/04/17'
end = '2019/04/19'
#altitute limit and moon seperation, moon serperation is a little bit close (2~3 deg)
numlimit = 100
altlimit = 30.
moonseperation = 40.
sunlimit = '-18'
'''
#------------------------------------------------------------#
# OBSERVATORY INFO.
#------------------------------------------------------------#
obsinfo = ascii.read(obspath)
obsname = np.copy(obsinfo['name'])
obsindex = np.where(obsname == observatory)[0]
obslat = (np.copy(obsinfo['latitude(N+)'])[obsindex])[0]
obslon = (np.copy(obsinfo['longitude(E+)'])[obsindex])[0]
obsalt = (np.copy(obsinfo['altitude'])[obsindex])[0]
obstz = (np.copy(obsinfo['timezone'])[obsindex])[0]
tz = pytz.timezone(obstz)
#------------------------------------------------------------#
observ = ephem.Observer()
observ.lat = str(obslat)
observ.lon = str(obslon)
observ.elevation= obsalt
observ.horizon = sunlimit
#------------------------------------------------------------#
#objects from catalog file
tdata = ascii.read(catpath)
objname = tdata['name']
ra = tdata['ra']
dec = tdata['dec']
prior = tdata['sort']
rank = tdata['rank']
dist = tdata['dist']
RA = coord.Angle(ra, unit = u.deg)
Dec = coord.Angle(dec, unit = u.deg)
radd = RA.value
rad = RA.hour
decd = Dec.value
decdd = Dec.degree
#angular distance calculation
def angsep(ra1deg, dec1deg, ra2deg, dec2deg) :
ra1rad = ra1deg*np.pi/180
dec1rad = dec1deg*np.pi/180
ra2rad = ra2deg*np.pi/180
dec2rad = dec2deg*np.pi/180
cos_a = np.sin(dec1rad)*np.sin(dec2rad)+(np.cos(dec1rad)*np.cos(dec2rad)*np.cos(ra1rad-ra2rad))
anglesep = np.arccos(cos_a)*180/np.pi
return anglesep
#dates to calculate
fmt = '%Y/%m/%d'
startdt = datetime.datetime.strptime(start, fmt)
enddt = datetime.datetime.strptime(end, fmt)
startmjd = (jdcal.gcal2jd(startdt.year, startdt.month, startdt.day))[1]
endmjd = (jdcal.gcal2jd(enddt.year, enddt.month, enddt.day))[1]
for i in range(int(endmjd-startmjd+1)):
onedaymjd = startmjd+i+1
oneday = jdcal.jd2gcal(2400000.5, onedaymjd)
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2])
dst = tz.dst(onedaydt, is_dst=True)
dst = dst.seconds/3600
onedaydt = datetime.datetime(oneday[0], oneday[1], oneday[2], tzinfo=tz)
onedayutc = onedaydt.astimezone(pytz.utc)
observ.date = onedayutc
# Moon distance and information
mcoord = ephem.Moon()
mcoord.compute(observ)
minfo = 'Moon ra, dec : '+str(mcoord.ra)+' '+str(mcoord.dec)+'\n'
mphase = ephem.Moon(observ.date)
mphasestr = 'Moon phase : '+ "%.2f" % mphase.moon_phase +'\n'
msep = angsep(radd, decdd, np.degrees(mcoord.ra), np.degrees(mcoord.dec))
# SUNSET CALC.
sunset = observ.previous_setting(ephem.Sun())
sunsettu = ephem.Date.tuple(sunset)
sunsetdt = datetime.datetime(sunsettu[0],sunsettu[1],sunsettu[2],sunsettu[3],int(sunsettu[4]),tzinfo=pytz.utc)
sunsetlocal = sunsetdt.astimezone(tz)
sunsetstr = sunlimit+' deg sunset : '+str(sunsetlocal.hour)+':'+str(sunsetlocal.minute)+'\n'
sunsethour = sunsetlocal.hour+sunsetlocal.minute/60.+sunsetlocal.second/3600.
# SUNRISE CALC.
sunrise = observ.next_rising(ephem.Sun())
sunrisetu = ephem.Date.tuple(sunrise)
sunrisedt = datetime.datetime(sunrisetu[0],sunrisetu[1],sunrisetu[2],sunrisetu[3],int(sunrisetu[4]),tzinfo=pytz.utc)
sunriselocal = sunrisedt.astimezone(tz)
sunrisestr = sunlimit+' deg sunrise : '+str(sunriselocal.hour)+':'+str(sunriselocal.minute)+'\n'
sunrisehour = sunriselocal.hour+sunriselocal.minute/60.+sunriselocal.second/3600.
#print (observatory)
#print ('Local mid night in UTC : '+str(observ.date))
#print (minfo,mphasestr,sunsetstr,sunrisestr)
# MAKE RESULT FILE
stryear = str(oneday[0])
strmonth = str(oneday[1])
strday = str(oneday[2]-1)
if int(strmonth) < 10 : strmonth = '0'+strmonth
if int(strday) < 10 : strday = '0'+strday
f = open(save_path+'/'+headname+'-'+stryear+strmonth+strday+"-rts_vis-"+observatory+".txt",'w')
f.write('#\t'+str(observ.date)+' UTC & Day Time Saving +'+str(dst)+'\n')
f.write('#\tObservatory\t= '+observatory+'\n')
f.write('#\t'+sunsetstr)
f.write('#\t'+sunrisestr)
f.write('#\t'+minfo)
f.write('#\t'+mphasestr)
f.write('#\tMoon seperation = '+str(moonseperation)+'\n')
f.write('#\tAltitude limit = '+str(altlimit)+'\n')
f.write('#\tRank : the lower rank, the higher priority\n')
f.write('#------------------------------------------------------- \n')
f.write('name ra dec rise(LT) transit(LT) set(LT) moon_dist(deg) distance(Mpc) rank\n')
numcount = 0
for n in range(len(rad)):
#calculate rise transit set time with altitute limit
param_rts = dict( ra=radd[n],
dec=decdd[n],
date=onedaydt,
lon=obslon,
lat=obslat,
tz=obstz,
limit=altlimit,
precision=1440)
rtscal = obs.rts(**param_rts)
rt = rtscal[0]
tt = rtscal[1]
st = rtscal[2]
if rtscal[0]== None:
#print (objname[n],ra[n],dec[n], rtscal[0], rtscal[1], rtscal[2],"%.2f" % msep[n])
pass
elif sunrisehour < rtscal[0] < sunsethour and sunrisehour < rtscal[2] < sunsethour and sunrisehour < rtscal[1] < sunsethour:
#print (objname[n]+' It can be seen in daytime!')
pass
elif msep[n] < moonseperation or msep[n] > 360-moonseperation:
#print (objname[n]+' too close to Moon < '+str(moonseperation)+' deg')
pass
else:
if numcount < numlimit:
c= SkyCoord(ra=ra[n]*u.degree, dec=dec[n]*u.degree, frame='icrs')
c_ra= c.ra.hms
c_dec= c.dec.dms
nra='%02d:%02d:%.3f' %(c_ra[0], abs(c_ra[1]), abs(c_ra[2]))
ndec='%02d:%02d:%.3f' %(c_dec[0], abs(c_dec[1]), abs(c_dec[2]))
rtp ="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp ="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp ="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis ='{:8s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:4s} {:4s}'.format(objname[n],str(nra),str(ndec),rtp,ttp,stp,str(int(msep[n])),str(int(dist[n])),str(rank[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
if numcount < numlimit:
rtp="%.2d" % int(rt)+':'+"%.2d" % int((rt-int(rt))*60)
ttp="%.2d" % int(tt)+':'+"%.2d" % int((tt-int(tt))*60)
stp="%.2d" % int(st)+':'+"%.2d" % int((st-int(st))*60)
vis='{:24s} {:12s} {:12s} {:5s} {:5s} {:5s} {:3s} {:2s}'.format(objname[n],str(ra[n]),str(dec[n]),rtp,ttp,stp,str(int(msep[n])),str(prior[n]))+'\n'
f.write(vis)
#print (objname[n],ra[n],dec[n], rtp,ttp,stp,"%.2f" % msep[n])
numcount+= 1
else:
pass
'''
f.close()
#-------------------------------------------------------------------------#
def getccdinfo(obs, path_obs):
'''
GET CCD INFORMATION (GAIN, PIXEL SCALE, FOV)
gain, pixscale, fov = getccdinfo(obs, path_obs)
INPUT:
path_obs = '/home/sonic/Research/table/obs.txt'
OUTPUT:
gain, pixscale, fov
'''
obstbl = ascii.read(path_obs)
indx_obs = np.where(obstbl['obs']==obs)
outdict = dict()
gain = obstbl[indx_obs]['gain'][0]
pixscale = obstbl[indx_obs]['pixelscale'][0] * u.arcsecond / u.pixel
fov = obstbl[indx_obs]['fov'][0] * u.deg * u.deg
rdnoise = obstbl[indx_obs]['RDnoise'][0]
outdict['obs'] = obs
outdict['gain'] = gain * u.electron / u.second
outdict['pixelscale'] = pixscale
outdict['fov'] = fov
outdict['rdnoise'] = rdnoise
return outdict
#-------------------------------------------------------------------------#
'''
def wcsremap(inim, tempim, outim='wr.fits'):
# outim = '{}/wr{}'.format(os.path.dirname(inim), os.path.basename(inim))
# com = 'wcsremap -template {} -source {} -outIm {}'.format(inim, tempim, outim)
# com = 'wcsremap -template {} -source {} -outIm {}'.format(tempim, inim, outim)
com = f'wcsremap -template {tempim} -source {inim} -outIm {outim}'.format(tempim, inim, outim)
print(f"""INPUT IMAGE\t: {inim}
TEMP IMAGE\t: {tempim}
OUTPUT IMAGE\t: {outim}""")
print(com)
os.system(com)
'''
#-------------------------------------------------------------------------#
def hotpants(inim, refim, iu=60000, tu=6000000000, tl=-100000):
if os.path.dirname(inim) == '':
interval = './'
else:
interval = '/'
# outim = os.path.dirname(inim)+interval+'hd'+os.path.basename(inim)
outim = '{}{}hd{}'.format(os.path.dirname(inim), interval, os.path.basename(inim))
# convfile = os.path.dirname(inim)+interval+'hc'+os.path.basename(inim)
convfile = '{}{}hc{}'.format(os.path.dirname(inim), interval, os.path.basename(inim))
com = 'hotpants -c t -n i -iu {} -tu {} -tl {} -v 0 -inim {} -tmplim {} -outim {} -oci {}'.format(iu, tu, tl, inim, refim, outim, convfile)
print(com)
os.system(com)
return outim
#-------------------------------------------------------------------------#
def epochimcomb(imlist, outim='imcomb.fits', path_save='.'):
'''
epochimcomb(imlist, outim='imcomb.fits', path_save='.')
imlist = glob.glob('Calib*20181229*.fits')
epochimcomb(imlist)
'''
#------------------------------------------------------------
import numpy as np
from astropy.nddata import fits_ccddata_reader, fits_ccddata_writer
# from astropy.nddata import CCDData
from matplotlib import pyplot as plt
from ccdproc import Combiner
from astropy.time import Time
from astropy.io import fits
from imsng import phot
#------------------------------------------------------------
# EXTRACT INFO. FROM THE FIRST IMAGE
#------------------------------------------------------------
data0 = fits_ccddata_reader(imlist[0], unit='adu')
meta0 = data0.meta
wcs0 = data0.wcs
part = imlist[0].split('-')
#------------------------------------------------------------
# IMAGE COMBINE
#------------------------------------------------------------
comlist = []
dateobslist = []
explist = []
print('{} IMAGE COMBINE START\n'.format(len(imlist)))
for inim in imlist:
print(inim)
hdr = fits.getheader(inim)
dateobslist.append(Time(hdr['DATE-OBS'], format='isot').jd)
explist.append(hdr['EXPTIME'])
comlist.append(fits_ccddata_reader(inim, unit='adu'))
dateobs = Time(np.mean(dateobslist), format='jd')
totexp = np.sum(explist)
try:
comim = '{}-{}-{}-{}-{}-{}-{}-com.fits'.format(part[0], part[1], part[2], dateobs.isot[0:10].replace('-', ''), dateobs.isot[11:19].replace(':', ''), part[5], int(totexp))
except:
print('IMAGE NAME FORMAT IS NOT Calib-... .fits.')
comim = outim
c = Combiner(comlist)
cdata = c.median_combine()
cdata.meta = meta0
cdata.wcs = wcs0
print('OUTPUT IMAGE :\t{}\n'.format(comim))
fits_ccddata_writer(cdata, path_save+'/'+comim)
#------------------------------------------------------------
phot.puthdr(comim, 'TOTEXP', totexp, hdrcomment='Total exposure time in seconds')
phot.puthdr(comim, 'JD', dateobs.jd, hdrcomment='Center Julian Date at start of exposure')
phot.puthdr(comim, 'MJD', dateobs.mjd, hdrcomment='Center Modified Julian Date at start of exposure')
phot.puthdr(comim, 'DATE-OBS', dateobs.isot, hdrcomment='YYYY-MM-DDThh:mm:ss observation start, UT')
phot.puthdr(comim, 'NCOMBINE', len(imlist), hdrcomment='THE NUMBER OF COMBINED IMAGES')
for i, inim in enumerate(imlist):
phot.puthdr(comim, 'COMBINE{}'.format(i+1), inim, hdrcomment='{} COMBINED IMAGE'.format(i+1))
print('DONE')
return comim
#------------------------------------------------------------
def combname(imlist):
import numpy as np
from astropy.time import Time
from astropy.io import fits
#------------------------------------------------------------
# EXTRACT INFO. FROM THE FIRST IMAGE
#------------------------------------------------------------
part = imlist[0].split('-')
#------------------------------------------------------------
# IMAGE COMBINE
#------------------------------------------------------------
comlist = []
dateobslist = []
explist = []
for inim in imlist:
# print(inim)
hdr = fits.getheader(inim)
dateobslist.append(Time(hdr['DATE-OBS'], format='isot').jd)
explist.append(hdr['EXPTIME'])
dateobs = Time(np.mean(dateobslist), format='jd')
totexp = np.sum(explist)
comim = '{}-{}-{}-{}-{}-{}-{}-com.fits'.format(part[0], part[1], part[2], dateobs.isot[0:10].replace('-', ''), dateobs.isot[11:19].replace(':', ''), part[5], int(totexp))
return comim, hdr, dateobs, totexp
#------------------------------------------------------------
def swarpcomb(imlist, listname='obj.list', path_save='.', path_obs = '/home/sonic/Research/table'):
import os, glob
import numpy as np
from imsng import tool, phot
'''
imlist = glob.glob('Calib*.fits')
path_save = '.'
path_obs = '/home/sonic/Research/table'
listname = 'obj.list'
'''
# imlist = glob.glob(imkey); imlist.sort()
f = open(listname, 'w')
for inim in imlist:
f.write(inim+'\n')
# print(inim)
f.close()
comim, hdr, dateobs, totexp = tool.combname(imlist)
part = comim.split('-')
gain, pixscale, fov = tool.getccdinfo(part[1], path_obs)
conf = 'default.swarp'
os.system('swarp -d > {}/{}'.format(path_save, conf))
com = 'swarp @{} -c {} -IMAGEOUT_NAME {} -COMBINE_TYPE MEDIAN -RESAMPLE N -PIXEL_SCALE {} -GAIN_DEFAULT {} -SUBTRACT_BACK Y'.format(listname, conf, comim, pixscale, gain)
print(com)
os.system(com)
phot.puthdr(comim, 'OBJECT', hdr['OBJECT'], hdrcomment='OBJECT')
phot.puthdr(comim, 'TOTEXP', totexp, hdrcomment='Total exposure time in seconds')
phot.puthdr(comim, 'JD', dateobs.jd, hdrcomment='Center Julian Date at start of exposure')
phot.puthdr(comim, 'MJD', dateobs.mjd, hdrcomment='Center Modified Julian Date at start of exposure')
phot.puthdr(comim, 'DATE-OBS', dateobs.isot, hdrcomment='YYYY-MM-DDThh:mm:ss observation start, UT')
phot.puthdr(comim, 'NCOMBINE', len(imlist), hdrcomment='THE NUMBER OF COMBINED IMAGES')
for i, inim in enumerate(imlist):
phot.puthdr(comim, 'COMBINE{}'.format(i+1), inim, hdrcomment='{} COMBINED IMAGE'.format(i+1))
os.system('rm coadd.weight.fits default.swarp obj.list swarp.xml')
return comim
#------------------------------------------------------------
def trim(inim, position, size, outim='trim.fits'):
# Load the image and the WCS
hdu = fits.open(inim)[0]
wcs = WCS(hdu.header)
# Make the cutout, including the WCS
cutout = Cutout2D(hdu.data, position=position, size=size, wcs=wcs)
# Put the cutout image in the FITS HDU
hdu.data = cutout.data
# Update the FITS header with the cutout WCS
hdu.header.update(cutout.wcs.to_header())
# Write the cutout to a new FITS file
hdu.writeto(outim, overwrite=True)
#------------------------------------------------------------
def calc_app(mag, magerr, gwdist0, gwdiststd0, gwdist1, gwdiststd1):
import numpy as np
app = mag+5*np.log10(gwdist1/gwdist0)
apperr = np.sqrt( (magerr)**2 + ((5*gwdiststd1)/(np.log(5)*gwdist1))**2 + ((5*gwdiststd0)/(np.log(5)*gwdist0))**2 )
return app, apperr
#------------------------------------------------------------
def abs2app(mag, magerr, gwdist, gwdiststd):
import numpy as np
app = 5*np.log10(gwdist)-5+mag
apperr = 5*gwdiststd/(gwdist*np.log(10))
return app, apperr
#------------------------------------------------------------
def z2dist(z):
from astropy import units as u
from astropy import constants as const
import numpy as np
H0 = 70 * u.km / (u.second * u.Mpc)
c = const.c.to(u.km / u.second)
d = c*z/H0
return d
#------------------------------------------------------------
def limitmag(ul0, t0, t):
import numpy as np
ul = ul0 -(-2.5*np.log10(np.sqrt(t/t0)))
return ul
#------------------------------------------------------------
def exptime4limitmag(ul0, ul, t0):
import numpy as np
t = t0*(10.**(2*((ul-ul0)/2.5)))
return t
#------------------------------------------------------------
def ToO_request(ul0, m0, n, nsigma=5):
'''
ul0 : base n sigma depth
m0 : target magnitude
n : n*exposure time
nsigma : ? sigma depth (default 5)
return depth, target magnitude error
'''
import numpy as np
ul = ul0+2.5*np.log10(np.sqrt(n))
dul = ul-m0
mer = 1./(nsigma*(dul*(100**0.2)))
return round(ul, 3), round(mer, 3)
#------------------------------------------------------------
def sqsum(a, b):
'''
SQUARE SUM
USEFUL TO CALC. ERROR
'''
return np.sqrt(a**2.+b**2.)
#------------------------------------------------------------
def puthdr(inim, hdrkey, hdrval, hdrcomment=''):
from astropy.io import fits
hdr = fits.getheader(inim)
fits.setval(inim, hdrkey, value=hdrval, comment=hdrcomment)
comment = inim+'\t'+'('+hdrkey+'\t'+str(hdrval)+')'
#------------------------------------------------------------
def gregistering(images_to_align, ref_image):
import os
# import sys, glob
import alipy
# from multiprocessing import Process, Pool
# import multiprocessing as mp
import time
starttime = time.time()
if ref_image == '': ref_image = images_to_align[0]
identifications = alipy.ident.run(ref_image, images_to_align, visu=False)
for id in identifications: # list of the same length as images_to_align.
if id.ok == True: # i.e., if it worked
print("%20s : %20s, flux ratio %.2f" % (id.ukn.name, id.trans, id.medfluxratio))
else:
print("%20s : no transformation found !" % (id.ukn.name))
outputshape = alipy.align.shape(ref_image)
for id in identifications:
if id.ok == True:
params_align = dict( filepath = id.ukn.filepath,
uknstarlist = id.uknmatchstars,
refstarlist = id.refmatchstars,
shape = alipy.align.shape(ref_image),
outdir = os.path.dirname(ref_image),
makepng = False)
alipy.align.irafalign(**params_align)
deltime = time.time() - starttime
print('All PROCESS IS DONE.\t('+str(round(deltime, 1))+' sec)')
#-------------------------------------------------------------------------#
def wcsremap(inim, refim, outim, path_com='/data3/wcsremap/wcsremap-1.0.1/wcsremap'):
import os
com = f'{path_com} -template {refim} -source {inim} -outim {outim}'
print(com)
os.system(com)
return outim
#-------------------------------------------------------------------------#
def imcombine_routine(images_to_align, ref_image):
'''
path_data = '/data3/paek/factory/doao/20201209-1m-IMSNG'
images_to_align = sorted(glob.glob('/data3/paek/factory/doao/20201209-1m-IMSNG/Calib-DOAO*-R-60.fits'))
ref_image = '/data3/paek/factory/doao/20201209-1m-IMSNG/Calib-DOAO-NGC6946-20201209-094720-R-60.fits'
'''
from pyraf import iraf
from imsng import tool_tbd
import glob, os
from astropy.io import fits
from astropy.time import Time
import numpy as np
images_to_align.remove(ref_image)
print('Reference image\t: {}'.format(ref_image))
print('Input images\t:')
for inim in images_to_align: print(inim)
hdr = fits.getheader(ref_image)
obs = os.path.basename(ref_image).split('-')[1]
obj = os.path.basename(ref_image).split('-')[2]
# Image align
print('#\tIMAGE REGISTERING')
tool_tbd.gregistering(images_to_align, ref_image)
# for inim in images_to_align: tool_tbd.gregistering(inim, ref_image)
comlist = [ref_image]
for inim in images_to_align: comlist.append(outim_gregistering(inim))
jdlist = []
exptimes = []
print('Summon {}/imcombine.list'.format(os.path.dirname(ref_image)))
f = open('{}/imcombine.list'.format(os.path.dirname(ref_image)), 'w')
for i, comin in enumerate(comlist):
f.write('{}\n'.format(comin))
hdr_tmp = fits.getheader(comin)
jdlist.append(float(hdr_tmp['jd']))
exptimes.append(float(hdr_tmp['exptime']))
hdr['IMCOMB{}'.format(i)] = comin
f.close()
exptime = np.sum(exptimes)
jd = Time(np.mean(jdlist), format='jd')
dateobs = jd.isot
utdate = dateobs.split('T')[0].replace('-', '')
uttime = dateobs.split('T')[1].replace(':', '')[:6]
outim = '{}/Calib-{}-{}-{}-{}-{}-{}.com.fits'.format(os.path.dirname(ref_image), obs, obj, utdate, uttime, hdr['filter'], int(exptime))
param_imcomb = dict(
input="@{}/imcombine.list".format(os.path.dirname(ref_image)),
output=outim,
combine="median",
project="no",
reject="none",
scale="none",
zero="mode",
)
print('#\t{} IMAGE IMCOMBINE'.format(len(comlist)))
iraf.imcombine(**param_imcomb)
tool_tbd.puthdr(outim, 'DATE-OBS', dateobs, hdrcomment='YYYY-MM-DDThh:mm:ss observation start, UT')
tool_tbd.puthdr(outim, 'JD', jd.value, hdrcomment='Julian Date at start of exposure')
tool_tbd.puthdr(outim, 'EXPTIME', exptime, hdrcomment='Exposure time in seconds')
# for i, comin in enumerate(comlist): tool_tbd.puthdr(outim, 'IMCOMB{}'.format(i), os.path.basename(comin), hdrcomment='Combined image {}'.format(i))
return outim
#-------------------------------------------------------------------------#
def outim_gregistering(inim):
part = os.path.splitext(inim)
outim = '{}_gregister{}'.format(part[0], part[1])
return outim
#-------------------------------------------------------------------------#
def subtraction_routine(inim, refim):
'''
obs = 'LOAO'
path_refim = '/data3/paek/factory/ref_frames/{}'.format(obs)
inim = '/data3/paek/factory/test/Calib-LOAO-NGC6946-20201213-014607-R-180-com.fits'
obj = 'NGC6946'
filte = 'R'
'''
# inseeing = fits.getheader(inim)['seeing']
# refseeing = fits.getheader(refim)['seeing']
# if inseeing > refseeing:
# images_to_align = [inim]
# ref_image = refim
# else:
# images_to_align = [refim]
# ref_image = inim
gregistering([refim], inim)
# Registered reference image
grefim = '{}/{}'.format(os.path.dirname(inim), os.path.basename(outim_gregistering(refim)))
subim = hotpants(inim, grefim, iu=60000, tu=6000000000, tl=-100000)
ds9com = 'ds9 {} {} {}&'.format(inim, grefim, subim)
# os.system(ds9com)
return subim, ds9com
#-------------------------------------------------------------------------#
def subtraction_routine2(inim, refim):
'''
obs = 'LOAO'
path_refim = '/data3/paek/factory/ref_frames/{}'.format(obs)
inim = '/data3/paek/factory/test/Calib-LOAO-NGC6946-20201213-014607-R-180-com.fits'
obj = 'NGC6946'
filte = 'R'
'''
# inseeing = fits.getheader(inim)['seeing']
# refseeing = fits.getheader(refim)['seeing']
# if inseeing > refseeing:
# images_to_align = [inim]
# ref_image = refim
# else:
# images_to_align = [refim]
# ref_image = inim
outim = refim.replace('.fits', '.wcsremap.fits')
wcsremap(refim, inim, outim)
# Registered reference image
subim = hotpants(inim, outim, iu=60000, tu=6000000000, tl=-100000)
ds9com = 'ds9 {} {} {}&'.format(inim, outim, subim)
# os.system(ds9com)
return outim, ds9com
#-------------------------------------------------------------------------#
def stampimage(inim, x, y, name='IMSNG_transient', outname='./IMSNG_transient.png'):
import matplotlib.pyplot as plt
from astropy.io import fits
from matplotlib.colors import LogNorm
from matplotlib.patches import Circle
from astropy.visualization import ZScaleInterval, LinearStretch
from astropy.wcs import WCS
from astropy.visualization import (MinMaxInterval, SqrtStretch, ImageNormalize)
# from ligo.skymap.plot.marker import reticle
'''
PLOT IMAGE AND SHOW DESINATED OBJECTS
rahms = '06:16:39.2560'
decdms = '-21:29:59.370'
name = 'J{}{}'.format(rahms[:11].replace(':','').replace('.',''), decdms[:11].replace(':','').replace('.',''))
imsngname = 'IMSNG {}'.format(name)
# input
# inim = '/data3/IMSNG/IMSNGgalaxies/NGC2207/LOAO/R/Calib-LOAO-NGC2207-20201216-072752-R-600-com.fits'
inim = '/data1/Test/Calib-LOAO-NGC2207-20201216-072752-R-600-com.fits'
outname = '{}/test.png'.format(os.path.dirname(inim))
txt = imsngname
# 6:16:39.2560, -21:29:59.370
# 94.16356667, -21.499825
# 1448.5771, 1631.7396
'''
size = 100
# x, y = round(1448.5771), round(1631.7396)
x1, x2 = x-size, x+size
y1, y2 = y-size, y+size
# image information
data0, hdr = fits.getdata(inim, header=True)
# data0[y1:y2, x1:x2]
data = data0[y1:y2, x1:x2]
# wcs = WCS(hdr)
# plot
plt.close('all')
plt.rc('font', family='serif')
fig = plt.figure()
x = 1080 / 4 / fig.dpi
y = 1080 / 4 / fig.dpi
fig.set_figwidth(x)
fig.set_figheight(y)
ax = fig.add_subplot(111)
norm_zscale = ImageNormalize(data, interval=ZScaleInterval(), stretch=LinearStretch())
im = ax.imshow(data, cmap='gray', origin='lower', norm=norm_zscale)
# Marker and text --> not use
# marker = reticle(
# # inner=50.0, outer=150.0,
# which='lt', angle=180
# )
# ax.plot(round(x), round(y), markersize=100, markeredgewidth=4, marker=marker, color='yellow')
# ax.text(x, y+size/10, str(txt), color='gold', fontsize=5)
ax.set_title(name, fontsize=10)
ax.grid('both', linestyle='--', color='white', alpha=0.5)
ax.set_xlabel('x [pix]', fontsize=12)
ax.set_ylabel('y [pix]', fontsize=12)
plt.tight_layout()
plt.minorticks_on()
fig.savefig(outname, bbox_inches='tight', overwrite=True)
#-------------------------------------------------------------------------#
def dict2table(dictionary, path_save):
print('Dictionary to table @{}'.format(path_save))
keys = list(dictionary.keys())
f = open(path_save, 'w')
f.write('{}\t{}\n'.format('key', 'value'))
for key in keys:
f.write('{}\t{}\n'.format(key, dictionary[key]))
f.close()
#-------------------------------------------------------------------------#
def imsng_name_correction(inim, alltbl, radius):
'''
path_alltarget = '/home/paek/table/alltarget.dat'
alltbl = ascii.read(path_alltarget)
inim = '/data3/IMSNG/IMSNGgalaxies/NGC0772/CBNUO/R/Calib-CBNUO-NGC0772-20201210-144734-R-180.fits'
'''
import os
from astropy.coordinates import SkyCoord
import astropy.io.ascii as ascii
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from imsng import calib
# center x, y --> ra, dec
w = WCS(inim)
hdr = fits.getheader(inim)
xcent, ycent= hdr['NAXIS1']/2., hdr['NAXIS2']/2.
ra, dec = w.all_pix2world(xcent, ycent, 1)
# matching
c = SkyCoord(ra, dec, frame='icrs', unit='deg')
c_all = SkyCoord(alltbl['ra'], alltbl['dec'], unit=(u.hourangle, u.deg))
indx, sep, _ = c.match_to_catalog_sky(c_all)
# object (header, closest matched one)
obj = hdr['object']
robj = alltbl['obj'][indx]
if (obj != robj) & (sep < radius):
print('Image OBJECT header\t\t:{}'.format(hdr['object']))
print('Real OBJECT field\t\t:{}'.format(robj))
print('Separation {} and {}\t:{}'.format(obj, robj, sep.to(u.arcmin)))
puthdr(inim, 'OBJECT', robj)
# calib.changehdr(inim, 'OBJECT', robj)
# file name change
# newim = inim.replace(obj, robj)
# mvcom = 'mv {} {}'.format(inim, newim)
# print(mvcom)
# os.system(mvcom)
else:
print('Object header is correct.')
pass
return robj, sep
#-------------------------------------------------------------------------#
def SE_seeing(inim, obs, path_obs, path_config, seeing_assume, frac=0.68, clean=True):
# import os
# import numpy as np
# from imsng import tool_tbd
# from imsng import phot_tbd
# from astropy import units as u
# from astropy.io import fits
# from astropy.io import ascii
# import matplotlib.pyplot as plt
# print('Quick seeing measurement with SE')
'''
inim = '/data3/paek/factory/loao/2020_1215/afzobj.NGC2207.20201216.0211.fits'
path_config = '/home/paek/config'
obs = 'LOAO'
path_obs = '/home/paek/table/obs.dat'
seeing_assume = 3 * u.arcsecond
frac = 0.68
'''
#------------------------------------------------------------
# Input
#------------------------------------------------------------
hdr = fits.getheader(inim)
a = hdr['naxis1']/2.
b = hdr['naxis2']/2.
#------------------------------------------------------------
# CCD information
obsdict = getccdinfo(obs, path_obs)
gain = obsdict['gain']
pixscale = obsdict['pixelscale']
fov = obsdict['fov']
# rdnoise = obsdict['readoutnoise']
#------------------------------------------------------------
# OUTPUT NAMES
fmt0 = '.fits'
fmt1 = '.fit'
fmt2 = '.fts'
if fmt0 in inim:
cat = '{}/{}'.format(os.path.dirname(inim), os.path.basename(inim).replace(fmt0, '.cat'))
elif fmt1 in inim:
cat = '{}/{}'.format(os.path.dirname(inim), os.path.basename(inim).replace(fmt1, '.cat'))
elif fmt2 in inim:
cat = '{}/{}'.format(os.path.dirname(inim), os.path.basename(inim).replace(fmt2, '.cat'))
# cat = '{}/{}'.format(os.path.dirname(inim), os.path.basename(inim).replace('.fits', '.cat'))
# SE configurations
param = '{}/simple.param'.format(path_config)
conv = '{}/simple.conv'.format(path_config)
nnw = '{}/simple.nnw'.format(path_config)
conf = '{}/simple.sex'.format(path_config)
# SE parameters
param_insex = dict(
#------------------------------
# CATALOG
#------------------------------
CATALOG_NAME = cat,
#------------------------------
# CONFIG FILES
#------------------------------
CONF_NAME = conf,
PARAMETERS_NAME = param,
FILTER_NAME = conv,
STARNNW_NAME = nnw,
#------------------------------
# PHOTOMETRY
#------------------------------
GAIN = str(gain.value),
PIXEL_SCALE = str(pixscale.value),
#------------------------------
# STAR/GALAXY SEPARATION
#------------------------------
SEEING_FWHM = str(seeing_assume.value),
)
com = phot_tbd.sexcom(inim, param_insex)
os.system(com)
rawtbl = ascii.read(cat)
# Point source selection
indx_sel = np.where(
(rawtbl['FLAGS'] == 0) &
(sqsum((rawtbl['X_IMAGE']-a)/a, (rawtbl['Y_IMAGE']-b)/b) < frac) &
(rawtbl['CLASS_STAR']>0.9) &
(rawtbl['FWHM_WORLD']>0.0)
)
seltbl = rawtbl[indx_sel]
# Seeing in arcsecond/pixel as median value
seeing = np.median(seltbl['FWHM_WORLD'].to(u.arcsecond))
peeing = np.median(seltbl['FWHM_IMAGE']) * u.pix
# Header update
try:
puthdr(inim, hdrkey='SEEING', hdrval=seeing.value, hdrcomment='SEEING [arcsec]')
puthdr(inim, hdrkey='PEEING', hdrval=peeing.value, hdrcomment='PEEING [pix]')
except:
print('try/except: Too low stars to measure seeing. Use 3.0 arcsecond seeing.')
puthdr(inim, hdrkey='SEEING', hdrval=3.0, hdrcomment='SEEING [arcsec]')
puthdr(inim, hdrkey='PEEING', hdrval=(3.0*u.arcsecond*pixscale).value, hdrcomment='PEEING [pix]')
# Clean output catalog
if clean == True:
rmcom = 'rm {}'.format(cat)
# print(rmcom)
os.system(rmcom)
else:
pass
return seeing, peeing
#------------------------------------------------------------
def cr_removal(inim, gain, rdnoise):
'''
inim
obs = 'LOAO'
gain = 2.68
rdnoise = 4.84
'''
import os
from astroscrappy import detect_cosmics
from astropy.io import fits
import time
data, hdr = fits.getdata(inim, header=True)
param_cr = dict(
indat=data,
sigclip=4.0,
sigfrac=0.3,
objlim=5.0,
gain=gain, readnoise=rdnoise,
pssl=0.0,
niter=4,
sepmed=True,
cleantype='meanmask',
fsmode='median',
psfmodel='gauss',
psffwhm=hdr['seeing'],
# Default
inmask=None,
satlevel=65536,
psfsize=7,
psfk=None, psfbeta=4.765,
verbose=False
)
time_st = time.time()
_, crdata = detect_cosmics(**param_cr)
fits.writeto('{}/cr{}'.format(os.path.dirname(inim), os.path.basename(inim)), crdata, hdr, overwrite=True)
puthdr('{}/cr{}'.format(os.path.dirname(inim), hdrkey='history', hdrval='Cosmic-rays were removed with the LACosmic {}'.format(time.strftime("%c")), hdrcomment=''))
time_delta = time.time() - time_st
print('Remove cosmic-ray for {} [{} sec]'.format(inim, round(time_delta, 3)))
#------------------------------------------------------------
def npstr2str(arr):
outlist = []
for i in arr:
outlist.append(str(i))
outarr = np.array(outlist)
# return outarr
return outlist
#------------------------------------------------------------
def obs_summary(filte, ic_cal_phot, ic_com_phot, path_save):
'''
# Observation summary plots
# 2020.12.26 Created by <NAME>
filte = 'R'
ic_cal_phot, ic_com_phot
path_save = path_data
'''
#============================================================
# import os, glob
import numpy as np
import matplotlib.pyplot as plt
# from astropy.table import Table, vstack
# from astropy.io import ascii
# from astropy.io import fits
from astropy.time import Time
# from astropy.coordinates import SkyCoord
# from astropy import units as u
from astropy.wcs import WCS
# from astropy import constants as const
# from imsng import phot, tool
from matplotlib.gridspec import GridSpec
# import time
#============================================================
# Input
#============================================================
# filte = 'R'
# path_save = path_data
#============================================================
# USER SETTING
#============================================================
# PATH
#------------------------------------------------------------
ic_cl_pht = ic_cal_phot.filter(filter=filte)
ic_cl_pht.summary.sort('jd')
cltbl = ic_cl_pht.summary[
(ic_cl_pht.summary['jd'].mask == False) &
(ic_cl_pht.summary['ul5_1'].mask == False) &
(ic_cl_pht.summary['seeing'].mask == False) &
(ic_cl_pht.summary['skyval'].mask == False) &
(ic_cl_pht.summary['skysig'].mask == False)
]
ic_cm_pht = ic_com_phot.filter(filter=filte)
ic_cm_pht.summary.sort('jd')
cmtbl = ic_cm_pht.summary[
(ic_cm_pht.summary['jd'].mask == False) &
(ic_cm_pht.summary['ul5_1'].mask == False) &
(ic_cm_pht.summary['seeing'].mask == False) &
(ic_cm_pht.summary['skyval'].mask == False) &
(ic_cm_pht.summary['skysig'].mask == False)
]
#------------------------------------------------------------
# t0 = Time('2020-12-05T12:14:04', format='isot', scale='utc')
time_t0 = Time(np.min(cltbl['jd']), format='jd')
t0 = time_t0.jd
cmtbl['delt'] = cmtbl['jd'] - t0
cltbl['delt'] = cltbl['jd'] - t0
delt_com = np.copy(cmtbl['delt']).astype('float64')
delt_cal = np.copy(cltbl['delt']).astype('float64')
totalt_obs = 24*(np.max(delt_cal) - np.min(delt_cal))
#============================================================
# PLOT
#------------------------------------------------------------
plt.close('all')
plt.rc('font', family='serif')
fig = plt.figure()
x = 1920 / 2 / fig.dpi
y = 1080 / fig.dpi
fig.set_figwidth(x)
fig.set_figheight(y)
#------------------------------------------------------------
# GRID
#------------------------------------------------------------
ncols = 3
nrows = 5
grid = GridSpec(nrows, ncols,
left=0.1, bottom=0.15, right=0.94, top=0.94, wspace=3, hspace=0.1)
ax1 = fig.add_subplot(grid[0:1, 0:ncols])
ax2 = fig.add_subplot(grid[1:2, 0:ncols])
ax3 = fig.add_subplot(grid[2:3, 0:ncols])
ax4 = fig.add_subplot(grid[3:4, 0:ncols])
ax5 = fig.add_subplot(grid[4:nrows, 0:ncols])
#------------------------------------------------------------
param_plot = dict(
# fmt='o-',
ms=5,
marker='o',
mec='k',
mfc='None',
color='silver',
# alpha=0.75,
alpha=0.5,
)
param_plot_com = dict(
# fmt='o-',
ms=10,
marker='v',
mec='k',
mfc='None',
color='silver',
alpha=0.75,
)
#------------------------------------------------------------
# Depth
#------------------------------------------------------------
depth = np.copy(cltbl['ul5_1']).astype('float64')
depth_com = np.copy(cmtbl['ul5_1']).astype('float64')
ax1.plot(delt_cal, depth, **param_plot)#, label=r'5$\sigma$ depth')
ax1.axhline(y=np.median(depth), color='dodgerblue', alpha=0.5, linestyle='--', label='Single = {}'.format(round(np.median(depth), 1)))
ax1.plot(delt_com, depth_com, **param_plot_com)#, label=r'5$\sigma$ depth')
ax1.axhline(y=np.median(depth_com), color='tomato', linestyle='--', label='Combined = {}'.format(round(np.median(depth_com), 1)))
#------------------------------------------------------------
# Seeing
#------------------------------------------------------------
seeing = np.copy(cltbl['seeing']).astype('float64')
# ax2.plot(delt_cal, seeing, 'o-')#, label=r'5$\sigma$ depth')
ax2.plot(delt_cal, seeing, **param_plot)#, label=r'5$\sigma$ depth')
ax2.axhline(y=np.median(seeing), color='dodgerblue', alpha=0.5, linestyle='--', label='Median = {}'.format(round(
|
np.median(seeing)
|
numpy.median
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hdf5storage
import os
import numpy as np
# This instantiates local generators for decoupled randomness
from numpy.random import Generator, PCG64
# Perform a single-shot decode, while passing through the file system
def decode_matlab_file(eng, code_type, input_llr, ref_bits, num_snr, num_codewords, mode='soft'):
# Static path
# !!!: This is the full path of the decoder .m function
decoder_path = 'InsertPathToMatlabDecoder.m'
# Draw a random integer (independent of global seed)
rg = Generator(PCG64())
random_idx = np.round(1e10 * rg.standard_normal()).astype(np.int)
# Filenames
filename_in = 'scratch/in%d.mat' % random_idx
filename_out = 'scratch/out%d.mat' % random_idx
# Copy
input_llr = np.copy(input_llr)
# Restore and reshape
if mode == 'soft':
input_llr = 2 * np.arctanh(input_llr)
input_llr = np.reshape(input_llr, (num_snr, num_codewords, -1))
# Write to input file
hdf5storage.savemat(filename_in, {'llr_input': input_llr,
'code_type': code_type})
# Create input dictionary
args = {'filename_in': filename_in,
'filename_out': filename_out}
# Call decoder
_ = eng.run_func(decoder_path, args)
# Read output file
contents = hdf5storage.loadmat(filename_out)
rec_bits = contents['bits_out']
# Convert to arrays
rec_bits = np.asarray(rec_bits)
# Compute error rates
bler = np.mean(
|
np.any(rec_bits != ref_bits, axis=-1)
|
numpy.any
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import norm
import pandas as pd
from tqdm import tqdm
class BlackScholes__Pricing_Simulations:
def __init__(self, call, stock, strike, maturity, interest, volatility, dividend, Npath, mu):
self.call = call
self.stock = stock
self.strike = strike
self.maturity = maturity
self.interest = interest
self.volatility = volatility
self.dividend = dividend
self.dt = 1/360
self.Npath = Npath
self.mu = mu
self.d1 = (self.volatility * np.sqrt(self.maturity)) ** (-1) * (
np.log(self.stock / self.strike) + (
self.interest - self.dividend + self.volatility ** 2 / 2) * self.maturity)
self.d2 = self.d1 - self.volatility * np.sqrt(self.maturity)
def price(self):
if self.call:
return np.exp(-self.dividend * self.maturity) * norm.cdf(self.d1) * self.stock - norm.cdf(self.d2) * self.strike * np.exp(-self.interest * self.maturity)
else:
return norm.cdf(-self.d2) * self.strike * np.exp(-self.interest * self.maturity) - norm.cdf(-self.d1) * self.stock * np.exp(-self.dividend * self.maturity)
def delta(self):
if self.call:
return norm.cdf(self.d1) * np.exp(-self.dividend * self.maturity)
else:
return (norm.cdf(self.d1) - 1) * np.exp(-self.dividend * self.maturity)
def gamma(self):
return np.exp(-self.dividend * self.maturity) * norm.pdf(self.d1) / (
self.stock * self.volatility * np.sqrt(self.maturity))
def vega(self):
return self.stock * norm.pdf(self.d1) * np.sqrt(self.maturity) * np.exp(-self.dividend * self.maturity)
def theta(self):
if self.call:
return -np.exp(-self.dividend * self.maturity) * (self.stock * norm.pdf(self.d1) * self.volatility) / (
2 * np.sqrt(self.maturity)) - self.interest * self.strike * np.exp(
-self.interest * np.sqrt(self.maturity)) * norm.cdf(self.d2) + self.dividend * self.stock * np.exp(-self.dividend * self.maturity) * norm.cdf(self.d1)
else:
return -np.exp(-self.dividend * self.maturity) * (self.stock * norm.pdf(self.d1) * self.volatility) / (
2 * np.sqrt(self.maturity)) + self.interest * self.strike * np.exp(
-self.interest * np.sqrt(self.maturity)) * norm.cdf(-self.d2) - self.dividend * self.stock * np.exp(
-self.dividend * self.maturity) * norm.cdf(-self.d1)
def rho(self):
if self.call:
return self.strike * self.maturity * np.exp(-self.interest * self.maturity) * norm.cdf(self.d2)
else:
return -self.strike * self.maturity * np.exp(-self.interest * self.maturity) * norm.cdf(-self.d2)
def monte_carlo_bs(self):
St_P = [self.stock] * self.Npath
St_Q = [self.stock] * self.Npath
for t in range(0, int(self.tau/self.dt)):
rand = np.random.normal(0, 1, [1, self.Npath])
St_P *= ( np.exp ( self.dt * ( self.mu + self.interest - 0.5 * self.volatility ** 2) + self.volatility * np.sqrt(self.dt)* rand))
St_Q *= ( np.exp ( self.dt * ( self.mu - 0.5 * self.volatility ** 2) + self.volatility *
|
np.sqrt(self.dt)
|
numpy.sqrt
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
from op_tester import op_tester
def test_cumsum_1d(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_exclusive(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
expected = np.array([0., 1., 3., 6., 10.]).astype(np.float32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], exclusive=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_reverse(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
tx = torch.flip(tx, [0])
out = torch.cumsum(tx, 0)
out = torch.flip(out, [0])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_1d_reverse_exclusive(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
expected = np.array([14., 12., 9., 5., 0.]).astype(np.float32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1, exclusive=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_axis_0(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_axis_1(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_2d_negative_axis(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(-1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, axis.item(0))
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d(op_tester):
a0 = np.array([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 22, 24, 26, 28], [30, 32, 34, 36, 38],
[40, 42, 44, 46, 48], [50, 52, 54, 56, 58]],
[[60, 63, 66, 69, 72], [75, 78, 81, 84, 87],
[90, 93, 96, 99, 102], [105, 108, 111, 114,
117]]]).astype(np.float32)
a1 = np.array([[[0, 1, 2, 3, 4], [5, 7, 9, 11, 13], [15, 18, 21, 24, 27],
[30, 34, 38, 42, 46]],
[[20, 21, 22, 23, 24], [45, 47, 49, 51, 53],
[75, 78, 81, 84, 87], [110, 114, 118, 122, 126]],
[[40, 41, 42, 43, 44], [85, 87, 89, 91, 93],
[135, 138, 141, 144, 147], [190, 194, 198, 202,
206]]]).astype(np.float32)
a2 = np.array([[[0, 1, 3, 6, 10], [5, 11, 18, 26, 35],
[10, 21, 33, 46, 60], [15, 31, 48, 66, 85]],
[[20, 41, 63, 86, 110], [25, 51, 78, 106, 135],
[30, 61, 93, 126, 160], [35, 71, 108, 146, 185]],
[[40, 81, 123, 166, 210], [45, 91, 138, 186, 235],
[50, 101, 153, 206, 260], [55, 111, 168, 226,
285]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected[a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_v2(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
out = torch.cumsum(tx, a)
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_reverse(op_tester):
a0 = np.array([[[60, 63, 66, 69, 72], [75, 78, 81, 84, 87],
[90, 93, 96, 99, 102], [105, 108, 111, 114, 117]],
[[60, 62, 64, 66, 68], [70, 72, 74, 76, 78],
[80, 82, 84, 86, 88], [90, 92, 94, 96, 98]],
[[40, 41, 42, 43, 44], [45, 46, 47, 48, 49],
[50, 51, 52, 53, 54], [55, 56, 57, 58,
59]]]).astype(np.float32)
a1 = np.array([[[30, 34, 38, 42, 46], [30, 33, 36, 39, 42],
[25, 27, 29, 31, 33], [15, 16, 17, 18, 19]],
[[110, 114, 118, 122, 126], [90, 93, 96, 99, 102],
[65, 67, 69, 71, 73], [35, 36, 37, 38, 39]],
[[190, 194, 198, 202, 206], [150, 153, 156, 159, 162],
[105, 107, 109, 111, 113], [55, 56, 57, 58,
59]]]).astype(np.float32)
a2 = np.array([[[10, 10, 9, 7, 4], [35, 30, 24, 17, 9],
[60, 50, 39, 27, 14], [85, 70, 54, 37, 19]],
[[110, 90, 69, 47, 24], [135, 110, 84, 57, 29],
[160, 130, 99, 67, 34], [185, 150, 114, 77, 39]],
[[210, 170, 129, 87, 44], [235, 190, 144, 97, 49],
[260, 210, 159, 107, 54], [285, 230, 174, 117,
59]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected[a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_reverse_v2(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
tx = torch.tensor(x)
tx = torch.flip(tx, [a])
out = torch.cumsum(tx, a)
out = torch.flip(out, [a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_3d_exclusive(op_tester):
# Expected from tf as pytorch does not support
# exclusive and reverse.
a0 = np.array([[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 22, 24, 26, 28], [30, 32, 34, 36, 38],
[40, 42, 44, 46, 48], [50, 52, 54, 56,
58]]]).astype(np.float32)
a1 = np.array(
[[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4], [5, 7, 9, 11, 13],
[15, 18, 21, 24, 27]],
[[0, 0, 0, 0, 0], [20, 21, 22, 23, 24], [45, 47, 49, 51, 53],
[75, 78, 81, 84, 87]],
[[0, 0, 0, 0, 0], [40, 41, 42, 43, 44], [85, 87, 89, 91, 93],
[135, 138, 141, 144, 147]]]).astype(np.float32)
a2 = np.array([[[0, 0, 1, 3, 6], [0, 5, 11, 18, 26], [0, 10, 21, 33, 46],
[0, 15, 31, 48, 66]],
[[0, 20, 41, 63, 86], [0, 25, 51, 78, 106],
[0, 30, 61, 93, 126], [0, 35, 71, 108, 146]],
[[0, 40, 81, 123, 166], [0, 45, 91, 138, 186],
[0, 50, 101, 153, 206], [0, 55, 111, 168,
226]]]).astype(np.float32)
am1 = a2
am2 = a1
am3 = a0
expected = {-3: am3, -2: am2, -1: am1, 0: a0, 1: a1, 2: a2}
testAxis = np.array([-3, -2, -1, 0, 1, 2]).astype(np.int32)
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], exclusive=1)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
out = torch.tensor(expected[a])
return [out]
op_tester.run(init_builder, reference, 'infer')
def test_cumsum_grad_1d(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
out = torch.cumsum(tx, axis.item(0))
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_1d_reverse(op_tester):
x = np.array([1., 2., 3., 4., 5.]).astype(np.float32)
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
tx = torch.flip(tx, [0])
out = torch.cumsum(tx, 0)
out = torch.flip(out, [0])
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_2d_axis_0(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(0).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
out = torch.cumsum(tx, axis.item(0))
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_2d_axis_1(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
out = torch.cumsum(tx, axis.item(0))
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_2d_negative_axis(op_tester):
x = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float32).reshape((2, 3))
axis = np.array(-1).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
out = torch.cumsum(tx, axis.item(0))
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_3d(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x = np.arange(60).astype(np.float32).reshape((3, 4, 5))
axis = np.array(a).astype(np.int32)
def init_builder(builder):
i0 = builder.addInputTensor(x)
i1 = builder.aiOnnxOpset11.constant(axis)
o = builder.aiOnnxOpset11.cumsum([i0, i1])
builder.addOutputTensor(o)
return [
o,
popart.reservedGradientPrefix() + i0,
popart.reservedGradientPrefix() + o,
]
def reference(ref_data):
tx = torch.tensor(x, requires_grad=True)
out = torch.cumsum(tx, a)
d__o = ref_data.getOutputTensorGrad(0)
out.backward(torch.tensor(d__o))
return [out, tx.grad, None]
op_tester.run(init_builder, reference, 'train')
def test_cumsum_grad_3d_reverse(op_tester):
testAxis = [-3, -2, -1, 0, 1, 2]
for a in testAxis:
x =
|
np.arange(60)
|
numpy.arange
|
#encoding:utf-8
import pandas as pd
import numpy as np
import os
import shutil
from scipy import stats
import random
import sys
global_TF_num = 591
def Build_Result(cell,cluster_result,path):
try:
shutil.rmtree(path)
os.makedirs(path)
except:
print ("First Time")
os.makedirs(path)
#HiC_Value = np.load("../Temp/%s/HiC_Value.npy" %(cell))
Node2Bin = Load_dict("../Temp/%s/node2bin.npy" %(cell))
Bin2Gene = Load_dict("../Temp/%s/bin2gene.npy" %(cell))
connect = np.load("../Temp/%s/global_connect.npy" %(cell))
connect = np.triu(connect,k = 1)
print ("Number of clusters = %d" % len(cluster_result))
size_list = []
print (len(cluster_result))
for i in range(len(cluster_result)):
cluster = cluster_result[i]
size_list.append(len(cluster))
f = open(path+str(i)+".txt","w")
f.write("Source\tTarget\tWeight\tEdgeType\tSourceType\tTargetType\n")
cluster = np.asarray(cluster)
cluster = np.sort(cluster)
cluster = cluster.astype(int)
local_connect = connect[cluster,:]
local_connect = local_connect[:,cluster]
targetindex = np.nonzero(local_connect)
for k in range(len(targetindex[0])):
SourceType = Node2Bin[cluster[targetindex[0][k]]]
TargetType = Node2Bin[cluster[targetindex[1][k]]]
Target = Bin2Gene[TargetType]
if cluster[targetindex[0][k]] < global_TF_num:
Source = SourceType
EdgeType = 'D'
else:
Source = Bin2Gene[SourceType]
EdgeType = 'U'
#Weight = max(HiC_Value[cluster[targetindex[0][k]],cluster[targetindex[1][k]]],HiC_Value[cluster[targetindex[1][k]],cluster[targetindex[0][k]]])
Weight = 0
f.write("%s\t%s\t%f\t%s\t%s\t%s\n" %(Source,Target,Weight,EdgeType,SourceType,TargetType))
f.close()
def getlist(path):
file = open(path,"r")
lines = file.readlines()
list1 = []
for line in lines:
list1.append(line.strip())
print (list1)
return list1
def Load_dict(path):
return np.load(path,allow_pickle=True).item()
def Motifcount_npdd(connect,TF_num):
#print "Start Counting"
split_point = TF_num
#Only focus on pro - dna interaction
pro_connect =
|
np.copy(connect)
|
numpy.copy
|
import argparse
import math
import numpy as np
import xxhash
class OLH():
def __init__(self, repeat=1):
self.repeat = repeat
self.real_dist = []
self.estimate_dist = []
def calculate_real_dist(self, X, n_user):
for i in range(n_user):
self.real_dist[X[i]] += 1
def perturb(self, X, n_user, p, q, g):
Y = np.zeros(n_user)
for i in range(n_user):
v = X[i]
x = (xxhash.xxh32(str(v), seed=i).intdigest() % g)
y = x
p_sample = np.random.random_sample()
# the following two are equivalent
# if p_sample > p:
# while not y == x:
# y = np.random.randint(0, g)
if p_sample > p - q:
# perturb
y =
|
np.random.randint(0, g)
|
numpy.random.randint
|
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from oml.models.components import ProximalOracle
from . import optimizer
class Adam(optimizer.Optimizer):
"""
Kingma, Diederik, and <NAME>.
"Adam: A method for stochastic optimization."
arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(
self,
model,
step_size=0.001,
t=0,
delta=1e-8,
beta1=0.9,
beta2=0.999
):
optimizer.Optimizer.__init__(
self,
model,
t=t,
)
self.hyper_parameter['step_size'] = step_size
self.hyper_parameter['delta'] = delta
self.state['fst_moment'] = {}
self.state['snd_moment'] = {}
self.hyper_parameter['beta1'] = beta1
self.hyper_parameter['beta2'] = beta2
def rule(self, i, key, layer):
grad = layer.param[key].grad
if isinstance(layer.param[key], ProximalOracle):
grad += layer.param[key].reg.sub_differential(layer.param[key].param)
self.state['fst_moment'][str(i) + key] = self.state['fst_moment'].get(
str(i) + key, np.zeros_like(grad)
) * self.hyper_parameter['beta1'] + grad * (1 - self.hyper_parameter['beta1'])
self.state['snd_moment'][str(i) + key] = self.state['snd_moment'].get(
str(i) + key, np.zeros_like(grad)
) * self.hyper_parameter['beta2'] + np.multiply(grad, grad) * (1 - self.hyper_parameter['beta2'])
m = self.state['fst_moment'][str(i) + key] / (1 - self.hyper_parameter['beta1'] ** self.t)
v = self.state['snd_moment'][str(i) + key] / (1 - self.hyper_parameter['beta2'] ** self.t)
layer.param[key].param -= \
self.hyper_parameter['step_size'] * m / (
np.sqrt(np.sqrt(v)) + self.hyper_parameter['delta']
)
class AdMax(optimizer.Optimizer):
"""
Kingma, Diederik, and <NAME>.
"Adam: A method for stochastic optimization."
arXiv preprint arXiv:1412.6980 (2014).
"""
def __init__(
self,
model,
step_size=0.001,
t=0,
delta=1e-8,
beta1=0.9,
beta2=0.999
):
optimizer.Optimizer.__init__(
self,
model,
t=t,
)
self.hyper_parameter['step_size'] = step_size
self.hyper_parameter['delta'] = delta
self.state['fst_moment'] = {}
self.state['snd_moment'] = {}
self.hyper_parameter['beta1'] = beta1
self.hyper_parameter['beta2'] = beta2
def rule(self, i, key, layer):
grad = layer.param[key].grad
if isinstance(layer.param[key], ProximalOracle):
grad += layer.param[key].reg.sub_differential(layer.param[key].param)
self.state['fst_moment'][str(i) + key] = self.state['fst_moment'].get(
str(i) + key,
|
np.zeros_like(grad)
|
numpy.zeros_like
|
"""Integrals over the Bose-Einstein distribution."""
import numpy as np
import scipy.special as sp
from darkhistory.utilities import log_1_plus_x
from darkhistory.utilities import check_err
from darkhistory.utilities import bernoulli as bern
from darkhistory.utilities import log_series_diff
from darkhistory.utilities import spence_series_diff
from darkhistory.utilities import exp_expn
from darkhistory.utilities import hyp2f1_func_real
from scipy.integrate import quad
def F2(a,b,tol=1e-10):
"""Definite integral of x^2/[(exp(x) - 1)]
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
tol : float
The relative tolerance to be reached. Default is 1e-10.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return x**2/2 - x**3/6
else:
return(
bern(k)*x**(k+2)/(sp.factorial(k)*(k+2))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
# gammaincc(n,x) = 1/gamma(n) * int_x^\infty t^{n-1}exp(-t) dt
expr[~inf] = 2*sp.gammaincc(
3, k*np.array(x[~inf], dtype='float64')
)/k**3
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0], dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b')
b = np.outer(np.ones(a.shape[0], dtype='float128'), b)
# if both are 1D, the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
# Initialize first term of each sum for either limit, and set integral to that value.
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
# Index of summand.
k_low = 2
# Initialize error.
err_max = 10*tol
while err_max > tol:
# Get next term.
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
# Estimate the error
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
# Add the next term in the series to the integral.
integral [both_low] += next_term[both_low]
# Increment k_low. Increment by 2 since B_n is zero for odd n > 1.
k_low += 2
# Set the errors. Only propagate parts where the errors are large to the next step.
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(1.417948518338124870521)
# First term in integral from a to bound
int_a_bound = low_sum_bound - low_sum_a
# First term in integral from bound to infinity
int_bound_b = int_bound_inf - high_sum_b
# Initialize the integral
integral[low_high] = int_a_bound + int_bound_b
# Counters, error estimate
k_low = 2
k_high = 2
err_max = 10*tol
# Arrays for next term
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term to correct high_sum_b, since int_bound_inf is exact.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
# Error estimate
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
# Add the next terms to the current integral.
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F1(a,b,epsrel=0):
"""Definite integral of x/[(exp(x) - 1)].
This is computed from the indefinite integral
.. math::
\\int dx \\frac{x}{e^x - 1} = x \\log\\left(1 - e^{-x} \\right)
- \\text{Li}_2\\left(e^{-x}\\right) =
x \\log\\left(1 - e^{-x} \\right) -
\\text{Sp}\\left( 1 - e^{-x} \\right) + \\frac{\\pi^2}{6} \\,,
where Sp is Spence's function, as implemented in ``scipy.special.spence``.
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
epsrel : float
Target relative error associated with series expansion. If zero, then the error is not computed. Default is 0. If the error is larger than ``epsrel``, then the Taylor expansions used here are insufficient. Higher order terms can be added very easily, however.
Returns
-------
float
The resulting integral.
Notes
-----
For a or b > 0.01, the exact analytic expression is used, whereas below that we use a series expansion. This avoids numerical errors due to computation of log(1 - exp(-x)) and likewise in the `spence` function. Note that `scipy.special.spence` can only take `float64` numbers, so downcasting is necessary for 0.01 < x < 3.
See Also
---------
:func:`.log_1_plus_x`, :func:`.spence_series_diff`
"""
lowlim = 0.1
upplim = 3
def indef_int(x):
inf = (x == np.inf)
low = (x < lowlim)
high = (x > upplim) & (~inf)
gen = ~(low | high) & (~inf)
expr = np.zeros(x.size)
# Two different series for small and large x limit.
# Excludes pi^2/6 to avoid catastrophic cancellation.
if np.any(inf):
expr[inf] = 0
if np.any(low):
expr[low] = (
x[low] - x[low]**2/4 + x[low]**3/36
- x[low]**5/3600 + x[low]**7/211680 - x[low]**9/10886400
)
if np.any(high):
n = np.arange(11) + 1
expr[high] = (
x[high]*log_1_plus_x(-np.exp(-x[high]))
- np.exp(-x[high]) - np.exp(-2*x[high])/4
- np.exp(-3*x[high])/9 - np.exp(-4*x[high])/16
- np.exp(-5*x[high])/25 - np.exp(-6*x[high])/36
- np.exp(-7*x[high])/49 - np.exp(-8*x[high])/64
- np.exp(-9*x[high])/81
- np.exp(-10*x[high])/100
- np.exp(-11*x[high])/121
)
if np.any(gen):
expr[gen] = (x[gen]*log_1_plus_x(-np.exp(-x[gen]))
- sp.spence(
np.array(1. - np.exp(-x[gen]), dtype='float64')
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
both_low = (a < lowlim) & (b < lowlim)
both_high = (a > upplim) & (b > upplim)
if np.any(both_low):
integral[both_low] = (
b[both_low]-a[both_low]
- (b[both_low]-a[both_low])*(b[both_low]+a[both_low])/4
+ (b[both_low]**3 - a[both_low]**3)/36
- (b[both_low]**5 - a[both_low]**5)/3600
+ (b[both_low]**7 - a[both_low]**7)/211680
- (b[both_low]**9 - a[both_low]**9)/10886400
)
if epsrel > 0:
err = (b[both_low]**11 - a[both_low]**11)/526901760
check_err(integral[both_low], err, epsrel)
if np.any(both_high):
# Use a series for the spence function.
spence_term = np.zeros_like(integral)
spence_term[both_high] = spence_series_diff(
np.exp(-b[both_high]),
np.exp(-a[both_high])
)
b_inf = both_high & (b == np.inf)
b_not_inf = both_high & (b != np.inf)
integral[b_inf] = (
- a[b_inf]*log_1_plus_x(-np.exp(-a[b_inf]))
- spence_term[b_inf]
)
integral[b_not_inf] = (
b[b_not_inf]*log_1_plus_x(-np.exp(-b[b_not_inf]))
- a[b_not_inf]*log_1_plus_x(-np.exp(-a[b_not_inf]))
- spence_term[b_not_inf]
)
if epsrel > 0:
err = (
np.exp(-b[both_high])**11
- np.exp(-a[both_high])**11
)/11**2
check_err(integral[both_high], err, epsrel)
gen_case = ~(both_low | both_high)
if np.any(gen_case):
integral[gen_case] = indef_int(b[gen_case]) - indef_int(a[gen_case])
# Correct for missing pi^2/6 where necessary.
a_low_b_notlow = (a < lowlim) & (b >= lowlim)
integral[a_low_b_notlow] += np.pi**2/6
return integral
def F0(a,b,epsrel=0):
"""Definite integral of 1/[(exp(x) - 1)].
Parameters
----------
a : ndarray
Lower limit of integration. Can be either 1D or 2D.
b : ndarray
Upper limit of integration. Can be either 1D or 2D.
err : float
Error associated with series expansion. If zero, then the error is not computed.
Returns
-------
float
The resulting integral.
"""
lowlim = 0.1
upplim = 3
def indef_int(x):
inf = (x == np.inf)
low = (x <= 1e-10)
high = (x > 1e-10) & (~inf)
expr = np.zeros_like(x)
if np.any(inf):
expr[inf] = 0
if np.any(high):
expr[high] = log_1_plus_x(-np.exp(-x[high]))
if np.any(low):
expr[low] = (
np.log(x[low]) - x[low]/2 + x[low]**2/24
- x[low]**4/2880 + x[low]**6/181440
- x[low]**8/9676800 + x[low]**10/479001600
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
both_low = (a < lowlim) & (b < lowlim)
both_high = (a > upplim) & (b > upplim)
if np.any(both_low):
integral[both_low] = (
np.log(b[both_low]/a[both_low])
- (b[both_low]-a[both_low])/2
+ (b[both_low]-a[both_low])*(b[both_low]+a[both_low])/24
- (b[both_low]**4 - a[both_low]**4)/2880
+ (b[both_low]**6 - a[both_low]**6)/181440
- (b[both_low]**8 - a[both_low]**8)/9676800
+ (b[both_low]**10 - a[both_low]**10)/479001600
)
if epsrel > 0:
err = -(b[both_low]**12 - a[both_low]**12)*691/15692092416000
check_err(integral[both_low], err, epsrel)
if np.any(both_high):
integral[both_high] = log_series_diff(
np.exp(-b[both_high]),
np.exp(-a[both_high])
)
if epsrel > 0:
err = -(
np.exp(-b[both_high])**12 -
np.exp(-a[both_high])**12
)/12
check_err(integral[both_high], err, epsrel)
gen_case = ~(both_low | both_high)
if np.any(gen_case):
integral[gen_case] = indef_int(b[gen_case]) - indef_int(a[gen_case])
return integral
def F_inv(a,b,tol=1e-10):
"""Definite integral of (1/x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/x - np.log(x)/2
else:
return (
bern(k)*(x**(k-1))/
(sp.factorial(k)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = sp.expn(1, k*np.array(x[~inf], dtype='float64'))
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.053082306482669888568)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_a(lowlim, a, tol=1e-10):
"""Integral of 1/((x+a)(exp(x) - 1)) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in (x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim, dtype='float128')*2.
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
if k == 1:
expr = np.log(x)/a - np.log(x+a)/a - 0.5*x*(
1/a - x/(2*a**2)
*hyp2f1_func_real(1, -x/a)
)
# expr = np.log(x)/a - np.log(x+a)/a - 0.5*x*(
# 1/a - x/(2*a**2)
# *np.real(sp.hyp2f1(1, 2, 3, -x_flt64/a_flt64 + 0j))
# )
return expr
else:
return bern(k)*x**k/(sp.factorial(k)*k)*(
1/a - k*x/((k+1)*a**2)*hyp2f1_func_real(k, -x/a)
)
# return bern(k)*x**k/(sp.factorial(k)*k)*(
# 1/a - k*x/((k+1)*a**2)*np.real(
# sp.hyp2f1(1, k+1, k+2, -x_flt64/a_flt64 + 0j)
# )
# )
def high_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = np.exp(-k*x[~inf])*exp_expn(
1, k*(x[~inf] + a[~inf])
)
return expr
if a.ndim == 1 and lowlim.ndim == 2:
if lowlim.shape[1] != a.size:
raise TypeError('The second dimension of lowlim must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(lowlim.shape[0]), a)
elif a.ndim == 2 and lowlim.ndim == 1:
if a.shape[1] != lowlim.size:
raise TypeError('The second dimension of a must have the same length as lowlim.')
lowlim = np.outer(np.ones(a.shape[0]), lowlim)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(lowlim.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
a_is_zero = (a == 0)
low = (lowlim < 2) & ~a_is_zero
high = ~low & ~a_is_zero
if np.any(a_is_zero):
integral[a_is_zero] = F_inv(
lowlim[a_is_zero],
np.ones_like(lowlim[a_is_zero])*np.inf,
tol = tol
)
if np.any(low):
integral[low] = (
low_summand(bound[low], a[low], 1)
- low_summand(lowlim[low], a[low], 1)
+ high_summand(bound[low], a[low], 1)
)
k_low = 2
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[low] = (
low_summand(bound[low], a[low], k_low)
- low_summand(lowlim[low], a[low], k_low)
+ high_summand(bound[low], a[low], k_high)
)
err[low] = np.abs(
np.divide(
next_term[low],
integral[low],
out = np.zeros_like(next_term[low]),
where = integral[low] != 0
)
)
integral[low] += next_term[low]
k_low += 2
k_high += 1
err_max = np.max(err[low])
low &= (err > tol)
if np.any(high):
integral[high] = high_summand(lowlim[high], a[high], 1)
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[high] = high_summand(lowlim[high], a[high], k_high)
err[high] = np.abs(
np.divide(
next_term[high],
integral[high],
out = np.zeros_like(next_term[high]),
where = integral[high] != 0
)
)
integral[high] += next_term[high]
k_high += 1
err_max = np.max(err[high])
high &= (err > tol)
return integral, err
def F_inv_n(a,b,n,tol=1e-10):
"""Definite integral of (1/x**n)/(exp(x) - 1)
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
bound = np.float128(2.)
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
init_sum = 0
for j in np.arange(n):
init_sum += bern(j)/sp.factorial(j)*x**(j-n)/(j-n)
init_sum += bern(n)/sp.factorial(n)*np.log(x)
return init_sum
else:
# B_n for n odd, n > 1 is zero.
if np.mod(k+n-1, 2) == 0:
return(
bern(k+n-1)/sp.factorial(k+n-1)*x**(k-1)/(k-1)
)
else:
return(
bern(k+n)/sp.factorial(k+n)*x**k/k
)
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(n, k*np.array(x[~inf], dtype='float64'))/x[~inf]**(n-1)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = quad(
lambda x: 1/(x**n*(np.exp(x) - 1)),
bound, np.inf, epsabs = 1e-16, epsrel=1e-16
)[0]
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_3(a,b,tol=1e-10):
"""Definite integral of (1/x**3)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/(3*x**3) + 1/(4*x**2) - 1/(12*x)
else:
return (
bern(k+2)*(x**(k-1))/(sp.factorial(k+2)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(3, k*np.array(x[~inf], dtype='float64'))/x[~inf]**2
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.0083036361900336)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_inv_5(a,b,tol=1e-10):
"""Definite integral of (1/x**5)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return -1/(5*x**5) + 1/(8*x**4) - 1/(36*x**3) + 1/(720*x)
else:
return (
bern(k+4)*(x**(k-1))/(sp.factorial(k+4)*(k-1))
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[~inf] = (
sp.expn(5, k*np.array(x[~inf], dtype='float64'))/x[~inf]**4
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0],dtype='float128'), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0],dtype='float128'), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros_like(a, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.001483878955697788)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_log(a,b,tol=1e-10):
"""Definite integral of log(x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
# Two different series to approximate this: below and above bound.
def low_summand(x, k):
if k == 1:
return 1/2*(np.log(x)**2) - (x/2)*(np.log(x) - 1)
else:
return (
bern(k)*(x**k)/
(sp.factorial(k)*k**2)*(k*np.log(x) - 1)
)
# B_n for n odd, n > 1 is zero.
def high_summand(x, k):
# sp.expn does not support float128.
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
1/k*(np.exp(-k*x[~inf])*np.log(x[~inf])
+ sp.expn(
1, k*np.array(x[~inf], dtype='float64')
)
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.15171347859984083704)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
# Only need to compute the next term for the b to inf integral.
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_x_log(a,b,tol=1e-10):
"""Definite integral of x log(x)/(exp(x) - 1).
Parameters
----------
a : ndarray
Lower limit of integration.
b : ndarray
Upper limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
float
The resulting integral.
"""
# bound is fixed. If changed to another number, the exact integral from bound to infinity later in the code needs to be changed to the appropriate value.
bound = 2.
def low_summand(x,k):
if k==1:
return x*np.log(x) - x - (x**2/2)*(2*np.log(x) - 1)/4
else:
return (
bern(k)*(x**(k+1))/
(sp.factorial(k)*(k+1)**2)*((k+1)*np.log(x) - 1)
)
def high_summand(x, k):
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
1/k**2*(
(1+k*x[~inf])*np.exp(-k*x[~inf])*np.log(x[~inf])
+ (1+k*x[~inf])*sp.expn(
1, k*np.array(x[~inf], dtype='float64')
)
+ sp.expn(2, k*np.array(x[~inf], dtype='float64'))
)
)
return expr
if a.ndim == 1 and b.ndim == 2:
if b.shape[1] != a.size:
raise TypeError('The second dimension of b must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(b.shape[0]), a)
elif a.ndim == 2 and b.ndim == 1:
if a.shape[1] != b.size:
raise TypeError('The second dimension of a must have the same length as b.')
b = np.outer(np.ones(a.shape[0]), b)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(a.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
both_low = (a < bound) & (b < bound)
low_high = (a < bound) & (b >= bound)
both_high = (a > bound) & (b > bound)
# Both low
if np.any(both_low):
low_sum_a = low_summand(a[both_low], 1)
low_sum_b = low_summand(b[both_low], 1)
integral[both_low] = low_sum_b - low_sum_a
k_low = 2
err_max = 10*tol
while err_max > tol:
next_term[both_low] = (
low_summand(b[both_low], k_low)
- low_summand(a[both_low], k_low)
)
err[both_low] = np.abs(
np.divide(
next_term[both_low],
integral[both_low],
out = np.zeros_like(next_term[both_low]),
where = integral[both_low] != 0
)
)
integral[both_low] += next_term[both_low]
k_low += 2
err_max = np.max(err[both_low])
both_low &= (err > tol)
# a low b high
if np.any(low_high):
# Evaluate the definite integral from a to 2, and then 2 to b.
low_sum_a = low_summand(a[low_high], 1)
high_sum_b = high_summand(b[low_high], 1)
low_sum_bound = low_summand(bound, 1)
# Exact integral from 2 to infinity.
int_bound_inf = np.float128(0.4888742871822041)
int_a_bound = low_sum_bound - low_sum_a
int_bound_b = int_bound_inf - high_sum_b
integral[low_high] = int_a_bound + int_bound_b
k_low = 2
k_high = 2
err_max = 10*tol
next_term_a_bound = np.zeros_like(integral)
next_term_bound_b = np.zeros_like(integral)
while err_max > tol:
next_term_a_bound[low_high] = (
low_summand(bound, k_low)
- low_summand(a[low_high], k_low)
)
next_term_bound_b[low_high] = (
-high_summand(b[low_high], k_high)
)
next_term[low_high] = (
next_term_a_bound[low_high]
+ next_term_bound_b[low_high]
)
err[low_high] = np.abs(
np.divide(
next_term[low_high],
integral[low_high],
out = np.zeros_like(next_term[low_high]),
where = integral[low_high] != 0
)
)
integral[low_high] += next_term[low_high]
k_low += 2
k_high += 1
err_max = np.max(err[low_high])
low_high &= (err > tol)
# Both high
if np.any(both_high):
high_sum_a = high_summand(a[both_high], 1)
high_sum_b = high_summand(b[both_high], 1)
integral[both_high] = high_sum_a - high_sum_b
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[both_high] = (
high_summand(a[both_high], k_high)
- high_summand(b[both_high], k_high)
)
err[both_high] = np.abs(
np.divide(
next_term[both_high],
integral[both_high],
out = np.zeros_like(next_term[both_high]),
where = integral[both_high] != 0
)
)
integral[both_high] += next_term[both_high]
k_high += 1
err_max = np.max(err[both_high])
both_high &= (err > tol)
return integral, err
def F_log_a(lowlim, a, tol=1e-10):
"""Integral of log(x+a)/(exp(x) - 1) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in log(x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim,dtype='float128')*2.
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
if k == 1:
expr = np.zeros_like(x)
a_pos = a > 0
a_neg = a < 0
if np.any(a_pos):
expr[a_pos] = (
np.log(x[a_pos])*np.log(a[a_pos])
- sp.spence(1+x_flt64[a_pos]/a_flt64[a_pos])
- (
(x[a_pos]+a[a_pos])
*np.log(x[a_pos]+a[a_pos])
- x[a_pos]
)/2
)
if np.any(a_neg):
expr[a_neg] = (
np.log(-x[a_neg]/a[a_neg])*np.log(x[a_neg]+a[a_neg])
+ sp.spence(-x_flt64[a_neg]/a_flt64[a_neg])
- (
(x[a_neg]+a[a_neg])*np.log(x[a_neg]+a[a_neg])
- x[a_neg]
)/2
)
return expr
else:
return (
bern(k)*x**k/(sp.factorial(k)*k)*(
np.log(x + a)
- x/(a*(k+1))*hyp2f1_func_real(k, -x/a)
)
)
# return (
# bern(k)*x**k/(sp.factorial(k)*k)*(
# np.log(x + a) - x/(a*(k+1))*np.real(
# sp.hyp2f1(
# 1, k+1, k+2, -x_flt64/a_flt64 + 0j
# )
# )
# )
# )
def high_summand(x, a, k):
x_flt64 = np.array(x, dtype='float64')
a_flt64 = np.array(a, dtype='float64')
inf = (x == np.inf)
expr = np.zeros_like(x)
expr[inf] = 0
expr[~inf] = (
np.exp(-k*x[~inf])/k*(
np.log(x[~inf] + a[~inf])
+ exp_expn(1, k*(x[~inf] + a[~inf]))
)
)
return expr
if a.ndim == 1 and lowlim.ndim == 2:
if lowlim.shape[1] != a.size:
raise TypeError('The second dimension of lowlim must have the same length as a.')
# Extend a to a 2D array.
a = np.outer(np.ones(lowlim.shape[0]), a)
elif a.ndim == 2 and lowlim.ndim == 1:
if a.shape[1] != lowlim.size:
raise TypeError('The second dimension of a must have the same length as lowlim.')
lowlim = np.outer(np.ones(a.shape[0]), lowlim)
# if both are 1D, then the rest of the code still works.
integral = np.zeros(lowlim.shape, dtype='float128')
err = np.zeros_like(integral)
next_term = np.zeros_like(integral)
a_is_zero = (a == 0)
low = (lowlim < 2) & ~a_is_zero
high = ~low & ~a_is_zero
if np.any(a_is_zero):
integral[a_is_zero] = F_log(lowlim[a_is_zero],
np.ones_like(lowlim[a_is_zero])*np.inf,
tol=tol
)
if np.any(low):
integral[low] = (
low_summand(bound[low], a[low], 1)
- low_summand(lowlim[low], a[low], 1)
+ high_summand(bound[low], a[low], 1)
)
k_low = 2
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[low] = (
low_summand(bound[low], a[low], k_low)
- low_summand(lowlim[low], a[low], k_low)
+ high_summand(bound[low], a[low], k_high)
)
err[low] = np.abs(
np.divide(
next_term[low],
integral[low],
out = np.zeros_like(next_term[low]),
where = integral[low] != 0
)
)
integral[low] += next_term[low]
k_low += 2
k_high += 1
err_max = np.max(err[low])
low &= (err > tol)
if np.any(high):
integral[high] = high_summand(lowlim[high], a[high], 1)
k_high = 2
err_max = 10*tol
while err_max > tol:
next_term[high] = high_summand(lowlim[high], a[high], k_high)
err[high] = np.abs(
np.divide(
next_term[high],
integral[high],
out = np.zeros_like(next_term[high]),
where = integral[high] != 0
)
)
integral[high] += next_term[high]
k_high += 1
err_max = np.max(err[high])
high &= (err > tol)
return integral, err
def F_x_log_a(lowlim, a, tol=1e-10):
"""Integral of x log (x+a)/(exp(x) - 1) from lowlim to infinity.
Parameters
----------
a : ndarray
Parameter in x log(x+a).
lowlim : ndarray
Lower limit of integration.
tol : float
The relative tolerance to be reached.
Returns
-------
ndarray
The resulting integral.
"""
bound = np.ones_like(lowlim, dtype='float128')*2
# Two different series to approximate this: below and above bound.
def low_summand(x, a, k):
# x_flt64 = np.array(x, dtype='float64')
# a_flt64 = np.array(a, dtype='float64')
if k == 1:
return (
x*(
|
np.log(a+x)
|
numpy.log
|
#!/usr/bin/env python3
""" performance.py A program to weigh up the virtues of the
different algorithms
By testing them for energy and angular momentum conservation"""
import os
import copy
import numpy as np
import pickle
import src.simulation.helpers as hlp
from scipy.optimize import curve_fit
import src.data_loading.io_pickles as ip
import src.simulation.sclass
import src.plotting.my_plotting_style as mps
import src.plotting.animator as ani
import matplotlib.pyplot as plt
import src.simulation.sclass as scl
import src.time_wrapper as twr
import src.simulation.halos as hal
@twr.timeit
def _circular_orbit(seperation=2, algo="vv", tstep=0.05):
"""
Take the desired seperation between the two equally sized masses,
and turn that into a list of two particles at the right speed
and distance.
"""
co = scl.Controls(
MAXTIMER=20,
TSTEP=tstep,
algorithm=algo,
OUT="MUT_CIRC",
name=algo + "_" + str(tstep),
)
speed = np.sqrt(co.GM / 2 / seperation)
particles = []
particles.append(
hal.Particle(
np.array([-seperation / 2, 0, 0]),
np.array([0, speed, 0]),
1.0,
False,
countA,
)
)
particles.append(
hal.Particle(
np.array([-seperation / 2, 0, 0]),
np.array([0, speed, 0]),
1.0,
False,
countA,
)
)
tmp_log_data = {}
co, sy, particles = spinner(co, sy, particles, log_time=tmp_log_data)
time_taken = tmp_log_data["SPINNER"]
hlp.write_out(co, sy)
return time, energy, time_taken
@twr.timeit
def circle_orbit_tester():
"""
This function calls _circular_orbit a number of times
for different settings, and then sends the output to a
_graph_circular_orbit.
"""
energy_remaining = {} # ar
simu_time = {}
time_taken = {}
for algo in ["rk4o", "vv", "herm"]:
time_taken[algo] = []
simu_time[algo] = {}
energy_remaining[algo] = {}
tsteps = [0.05, 0.1, 0.2, 0.3, 0.5, 0.8, 1]
for tstep in tsteps:
simu_time[algo][tstep], energy_remaining[algo][tstep], tt = _circular_orbit(
algo=algo, tstpep=tstep
)
time_taken.append(tt)
_graph_circular_orbit(energy_remaining, simu_time, time_taken)
@twr.timeit
def three_body_test():
""" Three body test with a variety of algorithms"""
print("Let us try three body")
tbc_time = []
for algo in ["rk4o", "vv", "herm"]:
tbc_time[algo] = []
for dt in [0.001, 0.002, 0.01, 0.1, 0.2, 0.5]:
vb = int(100 * (0.001 / dt))
if vb == 0:
# Prevent div0 errors
vb = 1
tmp_log_data = {}
three_body_collide(
MAX_TIMER=5000,
VB=vb,
TSTEP=dt,
EP=0.01,
recalculate=True,
algorithm=algo,
log_time=tmp_log_data,
)
tbc_time[algo].append(tmp_log_data["THREE_BODY_COLLIDE"])
@twr.timeit
def particle_build_up(OM=5):
"""A function which gradually fills up a galaxy with a variable number
particles, trying each algorithm used in the simulation, allowing
computational excess to be compared"""
# Fills up the shells at these radii to these maximums
radii = [
0,
1.0,
1.5,
2,
2.5,
3,
3.5,
4,
4.5,
5,
5.5,
6,
6.5,
7.0,
7.5,
8.0,
8.5,
9.0,
9.5,
]
num_particles = [
1,
12,
18,
24,
31,
36,
42,
50,
58,
70,
80,
100,
130,
160,
200,
250,
300,
340,
400,
]
total_particles = 0
for i in num_particles:
total_particles += i
print("You can go up to " + str(total_particles) + " if you want to.")
N_vec = np.logspace(1, OM, num=OM, base=2)
# variables to store the raw data
tot_particles_d = {}
fill_up_time = []
ani_time = []
spin_round_time = {}
spinner_time = {}
spin_round_time["vv"] = []
spin_round_time["rk4o"] = []
spin_round_time["herm"] = []
spinner_time["vv"] = []
spinner_time["rk4o"] = []
spinner_time["herm"] = []
tot_particles_d["vv"] = []
tot_particles_d["FU"] = []
tot_particles_d["herm"] = []
tot_particles_d["rk4o"] = []
tot_particles_d["ANI"] = []
tot_particles_d["vv_spin"] = []
tot_particles_d["rk4o_spin"] = []
tot_particles_d["herm_spin"] = []
for required_total in N_vec:
tmp_log_data = {}
# particles = []
particles = _fill_up(
radii, num_particles, int(required_total), log_time=tmp_log_data
)
if not required_total == N_vec[0]:
# The first data point is always terrible and messes up the fit
fill_up_time.append(tmp_log_data["_FILL_UP"])
tot_particles_d["FU"].append(len(particles))
print(
"There are "
+ str(len(particles))
+ " particles and I expected "
+ str(required_total)
)
# What happens if you just spin the particles?
for key in spin_round_time:
tot_particles_d[key].append(
len(particles)
) # how many particles in this key
co = scl.Controls(TSTEP=0.05, algorithm=key)
tmp_log_data = {}
part = hal.spin_forward(
400, co, particles=copy.deepcopy(particles), log_time=tmp_log_data
) # chuck it into part to stop interference.
assert part != particles
spin_round_time[key].append(tmp_log_data["SPIN_FORWARD"])
# What happens if you spin the particles, calculate energies, and plot things?
for key in spinner_time:
tot_particles_d[key + "_spin"].append(len(particles))
sub = key + "_running_time_test_" + str(len(particles))
co = scl.Controls(
OUT="./Run_Time_TEST/" + sub,
MAXTIMER=400,
TSTEP=0.05,
algorithm=key,
name=sub,
)
sy = scl.System(co, particles=particles)
tmp_log_data = {}
co, sy, part = hal.spinner(
co, sy, copy.deepcopy(particles), log_time=tmp_log_data
)
# chuck it into part to stop interference.
assert part != particles
spinner_time[key].append(tmp_log_data["SPINNER"]) # extract spinner time
# What about the animation of those particles?
hlp.write_out(co, sy)
aniclass_member = ani.AniMP4(co.out, name=co.name, move_with=True)
# initiate animator
animate = True
if key == "vv" and animate == True:
tmp_log_data = {}
aniclass_member.animate_starter(log_time=tmp_log_data)
# run animation / plots
ani_time.append(tmp_log_data)
tot_particles_d["ANI"].append(len(particles))
sy = [] # delete sy so that it doesn't slow the other steps down.
save_data = {
"tot_particles_d": tot_particles_d,
"fill_up_time": fill_up_time,
"spin_round_time": spin_round_time,
"spinner_time": spinner_time,
"ani_time": ani_time,
}
name = "Fill_Up_Dict_No_Fits_OM_" + str(OM)
ip.print_dict_to_pickle(name=name, dictionary=save_data)
print("about to go to _fit_fill_up with name " + name)
_fit_fill_up(name=name, animate=animate)
@twr.timeit
def _fit_fill_up(name="no_name", animate=True):
"""
:param name: where to look for saved output
:param animate: whether the animation is being run
:return: void
"""
read_data = ip.read_pickle_to_dict(name=name)
tot_particles_d = read_data["tot_particles_d"]
fill_up_time = read_data["fill_up_time"]
spin_round_time = read_data["spin_round_time"]
spinner_time = read_data["spinner_time"]
ani_time = read_data["ani_time"]
# variables to store the log log fits
popt_d = {}
perr_d = {}
x_values_d = {}
y_values_d = {}
tmp_log_data = {}
x_values_d["FU"], y_values_d["FU"], popt_d["FU"], perr_d["FU"] = linear_fitter(
tot_particles_d["FU"],
fill_up_time,
"Gen_Output/Verbose/Fill_Up_Only",
log_time=tmp_log_data,
)
print("Graphing time was %5.3f Seconds." % tmp_log_data["LINEAR_FITTER"])
tmp_log_data = {}
x_values_d["ANI"], y_values_d["ANI"], popt_d["ANI"], perr_d["ANI"] = linear_fitter(
tot_particles_d["ANI"],
ani_time,
"Gen_Output/Verbose/Animate_Only",
log_time=tmp_log_data,
)
print("Graphing time was %5.3f Seconds." % tmp_log_data["LINEAR_FITTER"])
for key in spin_round_time:
tmp_log_data = {}
x_values_d[key], y_values_d[key], popt_d[key], perr_d[key] = linear_fitter(
tot_particles_d[key],
spin_round_time[key],
"Gen_Output/Verbose/Spin_Round_Time_" + key,
log_time=tmp_log_data,
)
print("Graphing time was %5.3f Seconds." % tmp_log_data["LINEAR_FITTER"])
for old_key in spinner_time:
key = old_key + "_spin"
tmp_log_data = {}
x_values_d[key], y_values_d[key], popt_d[key], perr_d[key] = linear_fitter(
tot_particles_d[key],
spin_round_time[old_key],
"Gen_Output/Verbose/Spin_Round_Time_" + key,
log_time=tmp_log_data,
)
print("Graphing time was %5.3f Seconds." % tmp_log_data["LINEAR_FITTER"])
data_dictionary = {
"x_values_d": x_values_d,
"y_values_d": y_values_d,
"popt_d": popt_d,
"perr_d": perr_d,
}
name = "Fill_Up_Dict_With_Fits_OM_" + str(OM)
ip.print_dict_to_pickle(name=name, dictionary=data_dictionary)
_replot_fill_up(name)
@twr.timeit
def _replot_fill_up(name):
data = ip.read_pickle_to_dict(name=name)
linear_replotter(
data["x_values_d"],
data["y_values_d"],
data["popt_d"],
data["perr_d"],
name_graph="Gen_Output/Compared_Ov2",
)
@twr.timeit
def _fill_up(radii, num_particles, required_total, **kwargs):
"""
This algorithm is fills up the rings up to the $\sim$ fermi
energy
"""
temp_total = 0
radii_2 = []
num_particles_2 = []
for j in range(0, len(num_particles)):
temp_totalA = temp_total # lower bound of ring
temp_total += num_particles[j] # upper bound of ring
if temp_totalA < required_total:
if temp_total >= required_total:
# The change over point
valence_planets = required_total - temp_totalA
ring_reached = j # ok.
if temp_total < required_total:
print("Somehow you have overshot what is possible")
ring_reached = len(num_particles) - 1
valence_planets = num_particles[ring_reached]
for i in range(0, ring_reached):
radii_2.append(radii[i])
num_particles_2.append(num_particles[i])
radii_2.append(radii[ring_reached])
num_particles_2.append(valence_planets)
return _calculate_vs_and_ps(radii_2, num_particles_2)
@twr.timeit
def _calculate_vs_and_ps(radiiA, num_particlesB, **kwargs):
""" Calculate vs and ps"""
particles = []
co = scl.Controls() # the controls really don't matter in this instance.
countA = 0
for i in range(len(radiiA)):
countB = 0 # CountB is number in a Particular Ring
for j in range(num_particlesB[i]):
countA += 1
countB += 1
if radiiA[i] == 0:
particles.append(
hal.Particle(
np.array([0.0, 0.0, 0.0]),
np.array([0.0, 0.0, 0.0]),
1.0,
True,
countA,
)
)
else:
pos = hlp.circular_position(radiiA[i], num_particlesB[i], j)
vel = hlp.circular_velocity(radiiA[i], num_particlesB[i], j, co)
particles.append(hal.Particle(pos, vel, 1.0, False, countA))
return particles
# three_body_test()
# three_body_collide(algorithm='herm')
# three_body_test(algorithm='herm')
def two_body_test():
""" Fin"""
print("lets test two bodies for a long time")
@twr.timeit
def _perf_test_plotter():
"""A function designed to test whether the timeit
wrapper worked"""
test_times = []
test_inputs = []
test_log_times = []
test_log_inputs = []
for i in range(5, 200):
tmp_log_data = {}
_timeit_tester(N=i ** 2, log_time=tmp_log_data)
test_times.append(tmp_log_data["_TIMEIT_TESTER"])
test_inputs.append(i ** 2)
# timeit_tester(N=i)
tmp_log_data = {}
linear_fitter(test_inputs, test_times, "ONSquared", log_time=tmp_log_data)
print("Graphing time was %5.3f Seconds." % tmp_log_data["LINEAR_FITTER"])
# Fitting and graphing help
def _funct(x, a, b):
""" fit linear function to log log data"""
return (a * x) + b
@twr.timeit
def linear_fitter(
x_unlogged_values, y_unlogged_values, name_graph, x_unit="N", y_unit="t", **kwargs
):
"""
requires scipy curvefit
:param x_values:
:param y_values:
:param name_graph:
:param kwargs:
:return:
"""
print("fitting for " + name_graph)
x_values = []
y_values = []
for x_unlogged_value in x_unlogged_values:
assert x_unlogged_value != 0.0
# code will break if there is a zero
x_values.append(np.log2(x_unlogged_value))
for y_unlogged_value in y_unlogged_values:
assert y_unlogged_value != 0.0
y_values.append(
|
np.log2(y_unlogged_value)
|
numpy.log2
|
import collections
import math
import os
import sys
import warnings
try:
long
except NameError:
long = int # Python 3
import numpy
mw_python_dir = os.environ.get(
'MW_PYTHON_DIR',
'/Library/Application Support/MWorks/Scripting/Python',
)
sys.path.insert(0, mw_python_dir)
import mworks
assert (os.path.dirname(mworks.__file__) ==
os.path.join(mw_python_dir, 'mworks')), 'Wrong mworks package!'
class TypeConversionTestMixin(object):
def send(self, data):
raise NotImplementedError
def receive(self):
raise NotImplementedError
def assertReceivedIsSent(self, sent):
self.send(sent)
self.assertIs(sent, self.receive())
def assertReceivedEqualsSent(self, sent, expected=None):
if expected is None:
expected_type = type(sent)
expected = sent
elif isinstance(expected, type):
expected_type = expected
expected = sent
else:
expected_type = type(expected)
self.send(sent)
received = self.receive()
self.assertIsInstance(received, expected_type)
self.assertEqual(expected, received)
def assertReceivedIsInf(self, sent):
self.send(sent)
received = self.receive()
self.assertIsInstance(received, float)
self.assertTrue(math.isinf(received))
def test_none(self):
self.assertReceivedIsSent(None)
def test_bool(self):
self.assertReceivedIsSent(True)
self.assertReceivedIsSent(False)
def test_int(self):
self.assertReceivedEqualsSent(0)
self.assertReceivedEqualsSent(1)
self.assertReceivedEqualsSent(-2)
int_info = numpy.iinfo(int)
self.assertReceivedEqualsSent(int_info.max)
self.assertReceivedEqualsSent(int_info.min)
def test_long(self):
self.assertReceivedEqualsSent(long(0), int)
self.assertReceivedEqualsSent(long(1), int)
self.assertReceivedEqualsSent(long(-2), int)
# mw::Datum stores integers as long long's, so we should be
# able to send the full range of long long values, but not
# values outside that range
longlong_info = numpy.iinfo(numpy.longlong)
self.assertReceivedEqualsSent(longlong_info.max)
self.assertReceivedEqualsSent(longlong_info.min)
self.assertRaises(OverflowError, self.send, longlong_info.max+1)
self.assertRaises(OverflowError, self.send, longlong_info.min-1)
def test_float(self):
self.assertReceivedEqualsSent(0.0)
self.assertReceivedEqualsSent(1.0)
self.assertReceivedEqualsSent(-2.2)
def test_float_inf(self):
self.assertReceivedIsInf(numpy.inf)
def test_float_nan(self):
self.send(numpy.nan)
received = self.receive()
self.assertIsInstance(received, float)
self.assertTrue(math.isnan(received))
def test_bytes(self):
self.assertReceivedEqualsSent(b'', '')
self.assertReceivedEqualsSent(b'foo', 'foo')
self.assertReceivedEqualsSent(b' Foo \n Bar ', ' Foo \n Bar ')
self.assertReceivedEqualsSent(b'foo\0bar') # Embedded NUL
def test_bytes_with_trailing_nul(self):
self.assertReceivedEqualsSent(b'foo\0')
def test_unicode(self):
self.assertReceivedEqualsSent(u'', str)
self.assertReceivedEqualsSent(u'foo', str)
self.assertReceivedEqualsSent(u' Foo \n Bar ', str)
self.assertReceivedEqualsSent(u'foo\0bar', b'foo\0bar') # Embedded NUL
# Try some real Unicode
sent = u'a\U0001d11eb' # U+1D11E is the G clef character
self.send(sent)
received = self.receive()
self.assertIsInstance(received, str)
if sys.version_info >= (3,):
self.assertEqual(sent, received)
else:
sent_encoded = sent.encode('utf-8')
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
self.assertNotEqual(sent, sent_encoded)
self.assertEqual(sent_encoded, received)
self.assertEqual(sent, received.decode('utf-8'))
def test_unicode_with_trailing_nul(self):
self.assertReceivedEqualsSent(u'foo\0', b'foo\0')
def _test_sequence(self, seq_type):
def test(list_val):
self.assertReceivedEqualsSent(seq_type(list_val), list_val)
test([])
test([1])
test([1, 2.0, 'three', {'four': 4}])
# Unconvertible item
self.assertRaises(TypeError, self.send, seq_type([1, 2, 3, 4j, 5]))
def test_list(self):
self._test_sequence(list)
self.assertReceivedEqualsSent([1, [2, [3, [4, 5]]]])
def test_tuple(self):
self._test_sequence(tuple)
self.assertReceivedEqualsSent((1, (2, (3, (4, 5)))),
[1, [2, [3, [4, 5]]]])
def _test_mapping(self, map_type):
def test(dict_val):
self.assertReceivedEqualsSent(map_type(dict_val), dict_val)
test({})
test({'a': 1})
test({'a': 1, 'b': 2.0, 'c': 'three'})
test({1: 'a', 2: 'b', 3: ['c', 'd', 'e']})
# Unconvertible key
self.assertRaises(TypeError, self.send, {1: 'a', 2: 'b', 3j: 'c'})
# Unconvertible value
self.assertRaises(TypeError, self.send, {'a': 1, 'b': 2, 'c': 3j})
def test_dict(self):
self._test_mapping(dict)
self.assertReceivedEqualsSent({'a': {'b': {'c': {'d': 'e'}}}})
def test_custom_mapping(self):
class MyMapping(collections.Mapping):
def __init__(self, d):
self._d = d
def __getitem__(self, key):
return self._d[key]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
self.assertFalse(issubclass(MyMapping, dict))
self._test_mapping(MyMapping)
def test_unconvertible_object(self):
self.assertRaises(TypeError, self.send, object())
def test_bad_dict_key(self):
# The key (1, 2) is converted on the other end into [1, 2],
# which is unhashable and can't be used as a key
self.send({(1, 2): 3})
self.assertIsInstance(self.receive(), TypeError)
def test_infinite_recursion(self):
l = []
l.append(l)
self.assertRaises(RuntimeError, self.send, l)
def test_numpy_bool_(self):
self.assertReceivedEqualsSent(numpy.bool_(True), True)
self.assertReceivedEqualsSent(numpy.bool_(False), False)
def _test_numpy_integer(self, itype):
type_info = numpy.iinfo(itype)
longlong_info = numpy.iinfo(numpy.longlong)
self.assertReceivedEqualsSent(itype(0), 0)
self.assertReceivedEqualsSent(itype(1), 1)
if type_info.min < 0:
self.assertReceivedEqualsSent(itype(-2), -2)
if type_info.max <= longlong_info.max:
self.assertReceivedEqualsSent(itype(type_info.max),
int(type_info.max))
else:
self.assertReceivedEqualsSent(itype(longlong_info.max),
int(longlong_info.max))
self.assertRaises(OverflowError,
self.send,
itype(longlong_info.max+1))
if type_info.min >= longlong_info.min:
self.assertReceivedEqualsSent(itype(type_info.min),
int(type_info.min))
else:
self.assertReceivedEqualsSent(itype(longlong_info.min),
int(longlong_info.min))
self.assertRaises(OverflowError,
self.send,
itype(longong_info.min-1))
def test_numpy_byte(self):
self._test_numpy_integer(numpy.byte)
def test_numpy_short(self):
self._test_numpy_integer(numpy.short)
def test_numpy_intc(self):
self._test_numpy_integer(numpy.intc)
def test_numpy_int_(self):
self._test_numpy_integer(numpy.int_)
def test_numpy_longlong(self):
self._test_numpy_integer(numpy.longlong)
def test_numpy_ubyte(self):
self._test_numpy_integer(numpy.ubyte)
def test_numpy_ushort(self):
self._test_numpy_integer(numpy.ushort)
def test_numpy_uintc(self):
self._test_numpy_integer(numpy.uintc)
def test_numpy_uint(self):
self._test_numpy_integer(numpy.uint)
def test_numpy_ulonglong(self):
self._test_numpy_integer(numpy.ulonglong)
def _test_numpy_floating(self, ftype):
self.assertReceivedEqualsSent(ftype(0.0), 0.0)
self.assertReceivedEqualsSent(ftype(1.0), 1.0)
self.assertReceivedEqualsSent(ftype(-2.2), float(ftype(-2.2)))
type_info = numpy.finfo(ftype)
float_info = numpy.finfo(float)
if type_info.max <= float_info.max:
self.assertReceivedEqualsSent(ftype(type_info.max),
float(type_info.max))
self.assertReceivedEqualsSent(ftype(type_info.min),
float(type_info.min))
else:
self.assertReceivedEqualsSent(ftype(float_info.max),
float(float_info.max))
self.assertReceivedIsInf(ftype(float_info.max) * 2.0)
self.assertReceivedEqualsSent(ftype(float_info.min),
float(float_info.min))
self.assertReceivedIsInf(ftype(float_info.min) * 2.0)
def test_numpy_half(self):
self._test_numpy_floating(numpy.half)
def test_numpy_single(self):
self._test_numpy_floating(numpy.single)
def test_numpy_float_(self):
self._test_numpy_floating(numpy.float_)
def test_numpy_longfloat(self):
self._test_numpy_floating(numpy.longfloat)
def test_numpy_bytes_(self):
self.assertReceivedEqualsSent(numpy.bytes_(b'foo'), 'foo')
def test_numpy_unicode_(self):
self.assertReceivedEqualsSent(numpy.unicode_(u'foo'), str)
def test_numpy_array(self):
self.assertReceivedEqualsSent(numpy.array([]), [])
self.assertReceivedEqualsSent(numpy.array([], dtype=numpy.single), [])
self.assertReceivedEqualsSent(numpy.arange(10, dtype=numpy.ushort),
list(range(10)))
self.assertReceivedEqualsSent(
|
numpy.arange(10, dtype=numpy.single)
|
numpy.arange
|
"""
.. module:: trainer
:synopsis: Train the flow and perform sampling in latent space
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import print_function
from __future__ import division
import os, sys
import time
import torch
from torch.utils.data import DataLoader
import numpy as np
from tensorboardX import SummaryWriter
from sklearn.model_selection import train_test_split
import scipy.spatial
from tqdm import tqdm
import emcee
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nnest.networks import SingleSpeed, FastSlow, BatchNormFlow
from nnest.utils.logger import create_logger
from nnest.trainer import Trainer
class MultiTrainer(Trainer):
def __init__(self,
xdim,
ndim,
nslow=0,
batch_size=100,
flow='nvp',
num_blocks=5,
num_layers=2,
oversample_rate=-1,
train=True,
load_model='',
log_dir='logs',
use_gpu=False,
log=True
):
self.device = torch.device(
'cuda' if use_gpu and torch.cuda.is_available() else 'cpu')
self.x_dim = xdim
self.z_dim = xdim
self.batch_size = batch_size
self.total_iters = 0
assert xdim > nslow
nfast = xdim - nslow
self.nslow = nslow
if oversample_rate > 0:
self.oversample_rate = oversample_rate
else:
self.oversample_rate = nfast / xdim
def init_network():
if flow.lower() == 'nvp':
if nslow > 0:
self.netG = FastSlow(nfast, nslow, ndim, num_blocks, num_layers)
else:
self.netG = SingleSpeed(xdim, ndim, num_blocks, num_layers)
else:
raise NotImplementedError
self.nparams = sum(p.numel() for p in self.netG.parameters())
if train and not load_model:
if log_dir is not None:
self.path = log_dir
if not os.path.exists(os.path.join(self.path, 'models')):
os.makedirs(os.path.join(self.path, 'models'))
if not os.path.exists(os.path.join(self.path, 'data')):
os.makedirs(os.path.join(self.path, 'data'))
if not os.path.exists(os.path.join(self.path, 'chains')):
os.makedirs(os.path.join(self.path, 'chains'))
if not os.path.exists(os.path.join(self.path, 'plots')):
os.makedirs(os.path.join(self.path, 'plots'))
else:
self.path = None
else:
self.path = os.path.join(log_dir, load_model)
self.netG.load_state_dict(torch.load(
os.path.join(self.path, 'models', 'netG.pt')
))
self.optimizer = torch.optim.Adam(
self.netG.parameters(), lr=0.0001, weight_decay=1e-6)
if self.path is not None:
self.writer = SummaryWriter(self.path)
self.init_network = init_network
self.init_network()
print(self.netG)
self.logger = create_logger(__name__)
self.log = log
def train(
self,
samples,
max_iters=5000,
log_interval=50,
save_interval=50,
noise=0.0,
validation_fraction=0.05):
assert samples.shape[1] == self.x_dim, samples.shape
start_time = time.time()
if self.path:
fig, ax = plt.subplots()
ax.scatter(samples[:, 0], samples[:, 1])
self.writer.add_figure('originals', fig, self.total_iters)
np.save(
os.path.join(self.path, 'data', 'originals.npy'),
samples)
if noise < 0:
# compute distance to nearest neighbor
kdt = scipy.spatial.cKDTree(samples)
dists, neighs = kdt.query(samples, 2)
training_noise = 0.5 * np.mean(dists) / self.x_dim
else:
training_noise = noise
if self.log:
self.logger.info('Number of training samples [%d] for [%d] variables' % (samples.shape[0], self.nparams))
self.logger.info('Training noise [%5.4f]' % training_noise)
X_train, X_valid = train_test_split(
samples, test_size=validation_fraction)
train_tensor = torch.from_numpy(X_train.astype(np.float32))
train_dataset = torch.utils.data.TensorDataset(train_tensor)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=self.batch_size, shuffle=True)
valid_tensor = torch.from_numpy(X_valid.astype(np.float32))
valid_dataset = torch.utils.data.TensorDataset(valid_tensor)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=X_valid.shape[0], shuffle=False, drop_last=True)
best_validation_loss = float('inf')
best_validation_epoch = 0
best_model = self.netG.state_dict()
self.netGs = []
self.netGs_accepts = []
self.netGs_samples = []
self.netGs_init_x = []
stored = ' '
for epoch in range(1, max_iters + 1):
self.total_iters += 1
train_loss = self._train(
epoch, self.netG, train_loader, noise=training_noise)
validation_loss = self._validate(epoch, self.netG, valid_loader)
# 1) remember only if much better
# 2) when performance is improving drastically at each epoch, avoid storing every epoch.
if validation_loss < best_validation_loss - 1.0 and epoch >= best_validation_epoch + save_interval:
best_validation_epoch = epoch
best_validation_loss = validation_loss
best_model = self.netG.state_dict()
self.netGs.append(self.netG.state_dict())
self.netGs_accepts.append(0)
self.netGs_samples.append(0)
self.netGs_init_x.append(None)
stored = '**'
if epoch == 1 or epoch % log_interval == 0:
print(
'Epoch: {} validation loss: {:6.4f} {}'.format(
epoch, validation_loss, stored))
stored = ' '
#if epoch % save_interval == 0:
if self.path:
self.writer.add_scalar('loss', validation_loss, self.total_iters)
if epoch % save_interval == 0:
torch.save(
self.netG.state_dict(),
os.path.join(self.path, 'models', 'netG.pt%d' % epoch)
)
if self.x_dim == 2:
self._train_plot(self.netG, samples)
self.netG.load_state_dict(best_model)
def choose_netG(self, verbose=False):
# compute accept probabilities
# give initially equal acceptance probabilities
if verbose:
print("Sampling from networks:", ' '.join(['%4d/%4d' % (A, S) for A, S in zip(self.netGs_accepts, self.netGs_samples)]), end=" \r")
sys.stdout.flush()
probs = np.array([(A + 1.) / (S + 1) for A, S in zip(self.netGs_accepts, self.netGs_samples)])
probs /= probs.sum()
i = np.random.choice(np.arange(len(probs)), p=probs)
self.netG.load_state_dict(self.netGs[i])
#print("Network %d: %d/%d" % (i, self.netGs_accepts[i], self.netGs_samples[i]))
return i
def sample(
self,
mcmc_steps=20,
alpha=1.0,
dynamic=True,
batch_size=1,
loglike=None,
init_x=None,
logl=None,
loglstar=None,
transform=None,
show_progress=False,
plot=False,
out_chain=None,
max_prior=None,
nwalkers=40):
def transformed_logpost(z):
assert z.shape == (self.x_dim,), z.shape
x, log_det_J = self.netG(torch.from_numpy(z.reshape((1,-1))).float().to(self.device), mode='inverse')
x = x.detach().cpu().numpy()
lnlike = float(loglike(transform(x)))
log_det_J = float(log_det_J.detach())
logprob = log_det_J + lnlike
#print('Like=%.1f' % logprob, x)
return logprob
allsamples = []
alllatent = []
alllikes = []
allncall = 0
populations = [None for net in self.netGs]
nsteps = 4
#nsamples = 200 // len(self.netGs_init_x)
while allncall < mcmc_steps:
neti = self.choose_netG(verbose = True)
sampler = populations[neti]
if sampler is None:
z0 = np.random.normal(size=(nwalkers, self.x_dim))
if init_x is not None:
batch_size = init_x.shape[0]
z, _ = self.netG(torch.from_numpy(init_x).float().to(self.device))
z = z.detach()
z0[:batch_size,:] = z
sampler = emcee.EnsembleSampler(nwalkers=nwalkers, dim=self.x_dim, lnpostfn=transformed_logpost)
pos, lnprob, rstate = sampler.run_mcmc(z0, nsteps)
populations[neti] = sampler
else:
# advance population
pos, lnprob, rstate = sampler.run_mcmc(sampler.chain[:,-1,:], lnprob0=sampler.lnprobability[:,-1], N=nsteps)
alllatent.append(pos)
alllikes.append(lnprob)
allncall += nsteps * nwalkers
x, log_det_J = self.netG(torch.from_numpy(pos).float().to(self.device), mode='inverse')
x = x.detach().cpu().numpy()
allsamples.append(x)
ar = sampler.acceptance_fraction
Nsamples = len(sampler.flatchain)
#self.logger.info('Network %d sampler accepted: %.3f (%.d/%d)' % (neti, ar.mean(), ar.mean() * Nsamples, Nsamples))
self.netGs_accepts[neti] = int(ar.mean() * Nsamples)
self.netGs_samples[neti] = Nsamples
#print(np.shape(allsamples), np.shape(alllikes), np.shape(alllatent))
# Transpose so shape is (chain_num, iteration, dim)
#samples = np.transpose(np.array(allsamples), axes=[1, 0, 2])
#latent = np.transpose(np.array(alllatent), axes=[1, 0, 2])
#likes = np.transpose(np.array(alllikes), axes=[1, 0])
samples = np.array(allsamples).reshape((1, -1, self.x_dim))
latent = np.array(alllatent).reshape((1, -1, self.x_dim))
likes = np.array(alllikes).reshape((1, -1))
print()
ncall = allncall
if self.path and plot:
cmap = plt.cm.jet
cmap.set_under('w', 1)
fig, ax = plt.subplots()
ax.hist2d(samples[0, :, 0], samples[0, :, 1],
bins=200, cmap=cmap, vmin=1, alpha=0.2)
#if self.writer is not None:
# self.writer.add_figure('chain', fig, self.total_iters)
plt.tight_layout()
plt.savefig(os.path.join(self.path, 'plots', 'chain_%s.png' % self.total_iters))
plt.close()
fig, ax = plt.subplots()
ax.plot(likes[0, len(likes[0])//3:])
self.logger.info('lnLike: %d+-%d -> %d+-%d' % (
alllikes[len(alllikes)//3].mean(), alllikes[len(alllikes)//3].std(),
alllikes[-1].mean(), alllikes[-1].std()
))
#if self.writer is not None:
# self.writer.add_figure('likeevol', fig, self.total_iters)
plt.tight_layout()
plt.savefig(os.path.join(self.path, 'plots', 'likeevol_%s.png' % self.total_iters))
plt.close()
return samples, likes, latent, alpha, ncall
def subsample(
self,
mcmc_steps=20,
alpha=1.0,
dynamic=True,
batch_size=1,
loglike=None,
init_x=None,
logl=None,
loglstar=None,
transform=None,
show_progress=False,
plot=False,
out_chain=None,
max_prior=None,
max_start_tries=100):
self.netG.eval()
samples = []
latent = []
likes = []
if transform is None:
def transform(x): return x
if init_x is not None:
batch_size = init_x.shape[0]
z, _ = self.netG(torch.from_numpy(init_x).float().to(self.device))
z = z.detach()
# Add the backward version of x rather than init_x due to numerical precision
x, _ = self.netG(z, mode='inverse')
x = x.detach().cpu().numpy()
if logl is None:
logl = loglike(transform(x))
else:
if logl is None:
for i in range(max_start_tries):
z = torch.randn(batch_size, self.z_dim, device=self.device)
x, _ = self.netG(z, mode='inverse')
x = x.detach().cpu().numpy()
logl = loglike(transform(x))
if np.all(logl > -1e30):
break
if i == max_start_tries - 1:
raise Exception('Could not find starting value')
else:
z = torch.randn(batch_size, self.z_dim, device=self.device)
x, _ = self.netG(z, mode='inverse')
x = x.detach().cpu().numpy()
logl = loglike(transform(x))
samples.append(x)
likes.append(logl)
iters = range(mcmc_steps)
if show_progress:
iters = tqdm(iters)
scale = alpha
accept = 0
reject = 0
ncall = 0
if out_chain is not None:
if batch_size == 1:
files = [open(out_chain + '.txt', 'w')]
else:
files = [open(out_chain + '_%s.txt' % (ib + 1), 'w') for ib in range(batch_size)]
for i in iters:
dz = torch.randn_like(z) * scale
if self.nslow > 0 and np.random.uniform() < self.oversample_rate:
fast = True
dz[:, 0:self.nslow] = 0.0
else:
fast = False
z_prime = z + dz
# Jacobian is det d f^{-1} (z)/dz
x, log_det_J = self.netG(z, mode='inverse')
x_prime, log_det_J_prime = self.netG(z_prime, mode='inverse')
x = x.detach().cpu().numpy()
x_prime = x_prime.detach().cpu().numpy()
delta_log_det_J = (log_det_J_prime - log_det_J).detach()
log_ratio_1 = delta_log_det_J.squeeze(dim=1)
# Check not out of prior range
if max_prior is not None:
prior = np.logical_or(
np.abs(x) > max_prior,
np.abs(x_prime) > max_prior)
idx = np.where([np.any(p) for p in prior])
log_ratio_1[idx] = -np.inf
rnd_u = torch.rand(log_ratio_1.shape, device=self.device)
ratio = log_ratio_1.exp().clamp(max=1)
mask = (rnd_u < ratio).int()
logl_prime = np.full(batch_size, logl)
# Only evaluate likelihood if prior and volume is accepted
if loglike is not None and transform is not None:
for idx, im in enumerate(mask):
if im:
if not fast:
ncall += 1
lp = loglike(transform(x_prime[idx]))
if loglstar is not None:
if
|
np.isfinite(lp)
|
numpy.isfinite
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 <NAME> <<EMAIL>>
# License: MIT
"""
jagged.core
~~~~~~~~~~~
Implementation for `JaggedArray`, the core data structure of jagged array.
"""
from __future__ import annotations
from collections.abc import Iterable
from functools import partial
from typing import Any
from typing import Optional
import numpy as np
from .typing import ArrayLike
from .typing import AxisLike
from .typing import DtypeLike
from .typing import Number
from .typing import ShapeLike
from .typing import SliceLike
from .utils import infer_nan
from .utils import is_float
class JaggedArray(object):
""" Object supporting arrays with jagged shapes off the zero'th axis. """
def __init__(self, data: ArrayLike, shape: ArrayLike):
""" Initialize a jagged array.
Args:
data:
The data as a one dimensional array.
shape:
The shape of the data along the zero'th axis.
"""
self._data = self.__cumsum = self._shape = None
self._data = np.array(data)
self._shape = np.array(shape)
self._verify_integrity()
@property
def data(self) -> np.ndarray:
""" 1D array storing all entries of the array. """
return self._data
@data.setter
def data(self, val: ArrayLike):
old_data = self.data
self._data = val
try:
self._verify_integrity()
except ValueError:
self._data = old_data
raise
@property
def shape(self) -> np.ndarray:
""" the shapes of subarrays along the zero'th axis.
dims: (D, m)
where D is the number of dimensions and m is the length along the
zero'th axis. """
return self._shape
@shape.setter
def shape(self, val: ShapeLike):
val = np.asarray(val)
old_shape = self.shape
self._shape = val
try:
self._verify_integrity()
except ValueError:
self._shape = old_shape
raise
@property
def sizes(self) -> np.ndarray:
""" the sizes of the subarrays along the zero'th axis.
dims: (m)
where m is the length along along the zero'th axis.
"""
return self.shape.prod(axis=0)
@property
def size(self) -> int:
""" the number of elements in the jagged array. """
return self.sizes.sum()
@property
def nbytes(self) -> int:
""" the number of bytes taken up by the jagged array. """
return self.data.nbytes + self.shape.nbytes
@property
def ndim(self) -> int:
""" the number of dims. """
return 1 + len(self.shape)
@property
def dtype(self) -> DtypeLike:
""" the dtype of the contained data. """
return self.data.dtype
@property
def limits(self) -> np.ndarray:
""" the shape of the largest array for each dimension. """
return np.insert(self.shape.max(axis=1), 0, self.shape.shape[1])
@property
def _cumsum(self) -> np.ndarray:
""" indices into the data along the zero'th axis. """
if not hasattr(self, "__cumsum"):
self.__cumsum = np.insert(np.cumsum(self.sizes), 0, 0)
return self.__cumsum
@classmethod
def from_aoa(cls, arr: np.ndarray) -> JaggedArray:
""" Create a jagged array from a numpy array of arrays.
Args:
arr:
Numpy array of arrays to convert.
Examples:
>>> arr = np.array([np.array([0, 1, 2]),
... np.array([3, 4]),
... np.array([5, 6, 7])])
>>> JaggedArray.from_aoa(arr)
JaggedArray(data=[0 1 2 3 4 5 6 7],
shape=[[3 2 3]],
dtype=int64)
"""
return cls(
np.concatenate([sub.flatten() for sub in arr]),
np.array([sub.shape for sub in arr]).T,
)
@classmethod
def from_masked(cls, arr: np.masked.masked_array) -> JaggedArray:
""" Create a jagged array from a masked numpy array.
Args:
arr:
Masked numpy array to convert.
Examples:
>>> arr = np.ma.masked_array(np.array([[0, 1, 2],
... [3, 4, 0],
... [5, 0, 0],
... [6, 7, 8]]),
... np.array([[False, False, False],
... [False, False, True],
... [False, True, True],
... [False, False, False]]))
>>> JaggedArray.from_masked(arr)
JaggedArray(data=[0 1 2 3 4 5 6 7 8],
shape=[[3 2 1 3]],
dtype=int64)
Notes:
The first masked value in a given direction is assumed to be the
end of the array.
"""
return cls._from_arr_and_mask(arr.compressed(), arr.mask)
# TODO: add check that mask is jagged (i.e. no holes)
@classmethod
def _from_arr_and_mask(cls, arr: np.ndarray, mask: np.ndarray) -> JaggedArray:
def get_shape(mask, axis=1):
res = (~mask).argmin(axis=axis)
res = res.max(axis=-1) if res.ndim > 2 else res
res[res == 0] = mask.shape[axis]
return res
shapes = np.vstack([get_shape(mask, axis=i) for i in range(1, len(mask.shape))])
return cls(arr, shapes)
@classmethod
def from_array(
cls, arr: np.ndarray, masked_value: Optional[Any] = None
) -> JaggedArray:
""" Create a jagged array from a (full) array with a masked value.
Args:
arr:
array to convert.
masked_value:
The masked value. If no value is passed and the array is
compatible with float, this will be `nan`, otherwise `None`.
Examples:
>>> arr = np.array([[ 0., 1., 2.],
... [ 3., 4., np.nan],
... [ 5., np.nan, np.nan],
... [ 6., 7., 8.]])
>>> JaggedArray.from_array(arr).astype(np.int64)
JaggedArray(data=[0 1 2 3 4 5 6 7 8],
shape=[[3 2 1 3]],
dtype=int64)
"""
if masked_value is None:
masked_value = infer_nan(arr.dtype)
if masked_value == np.nan:
mask = np.isnan(arr)
else:
mask = np.equal(arr, masked_value)
return cls._from_arr_and_mask(arr[~mask], mask)
@classmethod
def from_format(cls, arr, format, **kwargs):
""" Instantiate a JaggedArray from a jagged format.
Args:
arr:
array to convert.
Keyword Args:
are passed onto the initializer.
Returns:
JaggedArray
"""
try:
return getattr(cls, "from_" + format)(arr, **kwargs)
except AttributeError:
raise ValueError("{} is not a valid jagged format.".format(format))
def copy(self) -> JaggedArray:
""" copy the jagged array. """
return self.__class__(self.data.copy(), self.shape.copy())
def astype(self, dtype: DtypeLike, copy=True) -> JaggedArray:
""" the array with the data as a given data type.
Args:
dtype:
the numpy dtype to use to represent data.
copy:
whether to copy the data, or make the change in place.
"""
res = self.copy() if copy else self
res.data = self.data.astype(dtype)
return res
def _verify_integrity(self):
""" Verify that the jagged array is acceptable.
This checks that:
- the data is 1D
- the shape is 2D
- the number of entries match the sizes of the array.
Returns:
bool
"""
if len(self.data.shape) != 1:
raise ValueError(
"Data array must be one dimensional "
"(is {})".format(len(self.data.shape))
)
if len(self.shape.shape) != 2:
raise ValueError(
"Shape array must be two dimensional "
"(is {})".format(len(self.shape.shape))
)
shape_size, data_size = self._cumsum[-1], self.data.size
if not shape_size == data_size:
raise ValueError(
"Size of data ({data_size}) does not match that "
"of the given shapes ({shape_size}).".format(
data_size=data_size, shape_size=shape_size
)
)
def _mask(self) -> np.ndarray:
""" the mask for a dense array for the given shapes. """
mask = np.ones(self.limits, dtype=bool)
for ax, shape, limit in zip(
range(1, len(self.limits)), self.shape, self.limits[1:]
):
ax_mask = np.arange(limit) < np.expand_dims(shape, 1)
new_shape = np.ones(len(self.limits), dtype=int)
new_shape[0], new_shape[ax] = self.limits[0], limit
mask = mask & ax_mask.reshape(*new_shape)
return mask
def to_masked(self) -> np.mask.masked_array:
""" convert the array to a dense masked array.
Examples:
>>> JaggedArray(np.arange(8), [[3, 2, 3]]).to_masked()
masked_array(data =
[[0 1 2]
[3 4 --]
[5 6 7]],
mask =
[[False False False]
[False False True]
[False False False]],
fill_value = 999999)
>>> JaggedArray(np.arange(33), np.array([[3, 2, 3],
... [3, 6, 4]])).to_masked()
masked_array(data =
[[[0 1 2 -- -- --]
[3 4 5 -- -- --]
[6 7 8 -- -- --]]
[[9 10 11 12 13 14]
[15 16 17 18 19 20]
[-- -- -- -- -- --]]
[[21 22 23 24 -- --]
[25 26 27 28 -- --]
[29 30 31 32 -- --]]],
mask =
[[[False False False True True True]
[False False False True True True]
[False False False True True True]]
[[False False False False False False]
[False False False False False False]
[ True True True True True True]]
[[False False False False True True]
[False False False False True True]
[False False False False True True]]],
fill_value = 999999)
"""
mask = self._mask()
res = np.ma.masked_all(self.limits, dtype=self.dtype)
res[mask] = self.data
return res
def to_aoa(self) -> np.ndarray:
""" Return a numpy array of arrays.
Examples:
>>> JaggedArray(np.arange(8), np.array([[3, 2, 3]])).to_aoa()
array([array([0, 1, 2]),
array([3, 4]),
array([5, 6, 7])], dtype=object)
>>> JaggedArray(np.arange(33), np.array([[3, 2, 3],
... [3, 6, 4]])).to_aoa()
array([array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]),
array([[ 9, 10, 11, 12, 13, 14],
[15, 16, 17, 18, 19, 20]]),
array([[21, 22, 23, 24],
[25, 26, 27, 28],
[29, 30, 31, 32]])], dtype=object)
"""
arr = np.array_split(self.data, self._cumsum[1:])
return np.array([res.reshape(*shape) for res, shape in zip(arr, self.shape.T)])
def to_array(self, fill_value: Optional[Any] = None) -> np.ndarray:
""" Convert to a dense array.
Args:
fill_value:
The value to fill in the array. If `None` (as default) and the
array can be converted to floats, we will use `np.nan`.
Examples:
>>> JaggedArray(np.arange(8), np.array([[3, 2, 3]])).to_array()
array([array([ 0., 1., 2.]),
array([ 3., 4., nan]),
array([ 5., 6., 7.])], dtype=np.float64)
"""
if fill_value is None:
fill_value = infer_nan(self.dtype)
tmp = self.astype(float) if is_float(fill_value) else self
return tmp.to_masked().filled(fill_value=fill_value)
def to_format(self, format: str, **kwargs):
""" Convert the jagged array to a different format.
This is a convenience function around `to_masked`, `to_aoa`, etc.
Args:
format:
The type of array.
Returns:
JaggedArray | np.array
"""
try:
return getattr(self, "to_" + format)(**kwargs)
except AttributeError:
raise ValueError("{} is not a valid jagged format.".format(format))
asformat = to_format # consistency with scipy.sparse
def __eq__(self, other: JaggedArray) -> bool:
""" Whether one JaggedArray equals another. """
return np.array_equal(self.data, other.data) and np.array_equal(
self.shape, other.shape
)
@classmethod
def from_broadcast(cls, arr: np.ndarray, shape: ShapeLike) -> JaggedArray:
return cls(np.repeat(arr, shape.prod(axis=0)), shape)
def __array__(self, *args, **kwargs):
""" Numpy array interface for ufuncs.
This just gives ufuncs the data to operate on. """
return self.data
def __array_wrap__(self, result, **kwargs):
""" Numpy array interface for ufuncs.
This takes the result of a ufunc and rejaggedizes it. """
return self.__class__(result, self.shape)
def _unitary_op(self, op):
return self.__class__(op(self.data), self.shape)
def __neg__(self):
return self._unitary_op(np.negative)
def _binary_elementwise_op(self, other, op):
if isinstance(other, JaggedArray):
if not np.array_equal(other.shape, self.shape):
raise ValueError(
"operands cound not be broadcast "
"together with shapes {} and "
"{}.".format(self.shape, other.shape)
)
return self.__class__(op(self.data, other.data), self.shape)
else:
other = np.asanyarray(other)
if other.ndim == 2:
# try to broadcast
if other.shape[0] != len(self):
raise ValueError(
"operands could not be broadcast "
"together with zero-axis lengths {} and "
"{}.".format(len(self), other.shape[0])
)
return self._binary_elementwise_op(
self.from_broadcast(other, self.shape), op
)
elif other.ndim > 2:
raise ValueError(
"Could not broadcast dense array of shape {} "
"to jagged array.".format(other.shape)
)
# otherwise we have single
return self.__class__(op(self.data, other), self.shape)
def __add__(self, other):
""" Add Jagged by a value. """
return self._binary_elementwise_op(other, np.add)
def __mul__(self, other):
""" Multiply JaggedArray by a value. """
return self._binary_elementwise_op(other, np.multiply)
def __truediv__(self, other):
""" True divide a JaggedArray by a value. """
return self._binary_elementwise_op(other, np.true_divide)
def __floordiv__(self, other):
return self._binary_elementwise_op(other, np.floor_divide)
def __sub__(self, other):
return self._binary_elementwise_op(other, np.subtract)
def __pow__(self, power, modulo=None):
if modulo:
raise NotImplementedError("modulo argument not implemented.")
return self._binary_elementwise_op(power, np.power)
def __mod__(self, other):
return self._binary_elementwise_op(other, np.mod)
def clip(self, a_min=Optional[Number], a_max=Optional[Number]):
""" Clip the values of the array.
Args:
a_min:
Lower bound of clipping interval. Values below this will be
set to this value.
a_max:
Upper bound of clipping interval. Values above this will be
set to this value.
Examples:
>>> JaggedArray(np.arange(7), [2, 3, 2]).clip(2, 5)
JaggedArray(data=[ 2 2 2 3 4 5 5],
shape=[[2, 3, 2]],
dtype=int64)
"""
return self._unitary_op(partial(np.clip, a_min=a_min, a_max=a_max))
def conjugate(self) -> JaggedArray:
""" Return the element-wise complex conjugate.
The complex conjugate of a number is obtained by changing the sign of
its imaginary part.
Returns:
JaggedArray
Examples:
>>> JaggedArray([np.complex(0, 1), np.complex(1, 1), np.complex(1, -1)], [[2, 1]]).conjugate()
JaggedArray(data=[0.-1.j 1.-1.j 1.+j],
shape=[[2 1]],
dtype=complex128)
"""
return self._unitary_op(np.conjugate)
conj = conjugate
def fill(self, value: Any) -> JaggedArray:
""" Fill the array with a scalar value.
Args:
value (any):
All elements of `a` will be assigned this value.
Examples:
>>> ja = JaggedArray(np.arange(7), [[3, 2, 3]])
>>> ja.fill(0)
>>> ja
JaggedArray(data=[0 0 0 0 0 0 0],
shape=[[3 2 3]],
dtype=int64)
"""
self.data[...] = value
@property
def flat(self) -> np.ndarray:
return self.data.flat
def flatten(self) -> np.ndarray:
""" Flatten the array.
This creates a copy of the data.
Examples:
>>> ja = JaggedArray(np.arange(7), [[3, 2, 3]])
>>> flattened = ja.flatten()
>>> flattened
array([0, 1, 2, 3, 4, 5, 6])
>>> flattened[...] = 0
>>> ja
JaggedArray(data=[0 0 0 0 0 0 0],
shape=[[3 2 3]],
dtype=int64)
"""
return self.data.copy()
def ravel(self) -> np.ndarray:
""" Ravel the array.
Creates a view of the data.
Examples:
>>> ja = JaggedArray(np.arange(7), [[3, 2, 3]])
>>> ravelled = ja.ravel()
>>> ravelled
array([0, 1, 2, 3, 4, 5, 6])
>>> ravelled[...] = 0
>>> ja
JaggedArray(data=[0 0 0 0 0 0 0],
shape=[[3 2 3]],
dtype=int64)
"""
return self.data
@property
def imag(self) -> JaggedArray:
""" Get the imaginary part of the array """
return self._unitary_op(np.imag)
@imag.setter
def imag(self, values):
# TODO: broadcasting
# TODO: take a jagged array
self.data.imag = values
@property
def real(self):
return self._unitary_op(np.real)
@real.setter
def real(self, values):
# TODO: broadcasting
# TODO: take a jagged array
self.data.real = values
def _reduce_op(self, op, axis=None, **kwargs):
# easy if axis is none
if axis is None:
return op(self.data)
aoa = self.to_aoa()
axis = np.sort(np.atleast_1d(axis))
# then it is applying to all. Will get a dense array.
if np.array_equal(axis, np.arange(1, self.ndim)):
return np.asarray([op(arr) for arr in aoa])
if axis[0] == 0:
raise ValueError(
"reduce operations through the zero'th axis are " "not defined."
)
return JaggedArray.from_aoa(
np.array([op(arr, axis=tuple(axis - 1)) for arr in aoa])
)
def all(self, **kwargs):
return self._reduce_op(np.all, **kwargs)
def any(self, **kwargs):
return self._reduce_op(np.any, **kwargs)
def argmax(self, **kwargs):
return self._reduce_op(np.argmax, **kwargs)
def argmin(self, **kwargs):
return self._reduce_op(np.argmin, **kwargs)
def cumprod(self, **kwargs):
return self._reduce_op(np.cumprod, **kwargs)
def cumsum(self, **kwargs):
return self._reduce_op(np.cumsum, **kwargs)
def max(self, **kwargs):
return self._reduce_op(np.max, **kwargs)
def mean(self, **kwargs):
return self._reduce_op(np.mean, **kwargs)
def min(self, **kwargs):
return self._reduce_op(np.min, **kwargs)
def sum(self, **kwargs):
return self._reduce_op(np.sum, **kwargs)
def prod(self, **kwargs):
return self._reduce_op(np.prod, **kwargs)
def ptp(self, **kwargs):
return self._reduce_op(np.ptp, **kwargs)
def put(self, indices, values):
raise NotImplementedError("put for a jagged array is not supported.")
def repeat(self, n):
raise NotImplementedError("repeat for a jagged array is not supported.")
def reshape(self, shape: ShapeLike) -> JaggedArray:
""" reshape the array
Args:
shape: the new shape to add
Examples:
>>> ja = JaggedArray(np.arange(8), [[3, 2, 3]])
>>> ja.reshape([[2, 3, 3]])
JaggedArray(data=[0 1 2 3 4 5 6 7],
shape=[[2 3 3]],
dtype=int64)
>>> ja.reshape([[3, 3, 3]])
ValueError: total size of new array must be unchanged.
"""
shape = np.asarray(shape)
if not np.array_equal(np.prod(shape, axis=0), np.prod(self.shape, axis=0)):
raise ValueError("total size of new array must be unchanged.")
else:
data = np.concatenate(
[sub.reshape(shp).flatten() for sub, shp in zip(self.to_aoa(), shape.T)]
)
new = self[...]
new.data = data
new.shape = shape
return new
def resize(self, shape: ShapeLike) -> JaggedArray:
""" resize the arrays """
new = self[...]
size =
|
np.prod(shape, axis=1)
|
numpy.prod
|
from __future__ import division
import os
import sys
import array
import struct
import io
import math
import numpy as np
import colorsys
import pydicom
from pydicom.dataset import Dataset, FileDataset
import pydicom.uid
import skimage as ski
import skimage.io
import skimage.transform
import skimage.draw
import skimage.morphology
import warnings
from Util import *
from File import *
NORMALIZE_NO = 0
NORMALIZE_SIMPLE = 1
NORMALIZE_CONTRAST_STRETCHING = 2
# NORMALIZE_ADAPTATIVE_EQUALIZATION = 3
# _image type should be np.float32
def NormalizeFrame(_image, _normalize = NORMALIZE_NO):
if _normalize == NORMALIZE_SIMPLE:
min = np.min(_image)
max = np.max(_image)
return (_image - min)/(max - min)
elif _normalize == NORMALIZE_CONTRAST_STRETCHING:
p2, p98 = np.percentile(_image, (2, 98))
return ski.exposure.rescale_intensity(_image, in_range=(p2, p98), out_range=(0,1))
# elif _normalize == NORMALIZE_ADAPTATIVE_EQUALIZATION
# return ski.exposure.equalize_adapthist(_image, clip_limit=0.03):
return _image
def GetMaxValue(_pixelSize):
if _pixelSize == 8:
return 255
elif _pixelSize == 16:
return 65535
elif _pixelSize == 10:
return 1023
else:
print("GetMaxValue():: WARNING NOT IMPLEMENTED _pixelSize = " + str(_pixelSize))
return 65535
def GetFloat32NormalizedFrame(_image, _pixelSize, _normalize = NORMALIZE_NO):
_image = _image.astype(np.float32)
if _normalize != NORMALIZE_NO:
return NormalizeFrame(_image, _normalize)
return _image/GetMaxValue(_pixelSize)
def ReadOnlyDicomInfo(_filename):
dcmInfo = pydicom.read_file(_filename, stop_before_pixels = True, defer_size = 16)
# print(dcmInfo.Columns)
# print(dcmInfo.Rows)
# print(dcmInfo.NumberOfFrames)
# print(dcmInfo.BitsStored)
return dcmInfo
def ReadDicomFrame(_filename, _frameId):
# print(_filename + " " + str(_frameId))
file = open(_filename, "rb") # TODO use OpenFile here?
dcmInfo = pydicom.read_file(file, stop_before_pixels = True, defer_size = 16)
if _frameId < 0 and _frameId >= dcmInfo.NumberOfFrames:
print("ReadDicomFrame():: ERROR _frameId should be inferior dcmInfo.NumberOfFrames")
# print(dcmInfo.BitsStored)
if dcmInfo.BitsStored == 16 or dcmInfo.BitsStored == 10:
pixelType = "H"
pixelSize = 2 # dcmInfo.BitsStored//8
elif dcmInfo.BitsStored == 8:
pixelType = "B"
pixelSize = 1
else:
print("ReadDicomFrame():: WARNING NOT IMPLEMENTED dcmInfo.BitsStored = " + str(dcmInfo.BitsStored))
sizeImageInByte = dcmInfo.Columns*dcmInfo.Rows*pixelSize
# print(sizeImageInByte)
# print(file.tell())
# skip the dicom tag (0x7fe0, 0x0010) 4 bytes,
# then the VR info if we have "explicit VR" (if not, nothing is there in "implicit VR") 4 bytes (if not 0 byte): (VR_OW = 0x574f for example)
# finally the length of the sequence 4 bytes
# u16 = struct.unpack('H', file.read(2))[0]
# print(hex(u16) + " " + str(u16))
# u16 = struct.unpack('H', file.read(2))[0]
# print(hex(u16) + " " + str(u16))
# if dcmInfo.is_implicit_VR == False:
# s32 = struct.unpack('i', file.read(4))[0]
# print(hex(s32) + " " + str(s32))
# s32 = struct.unpack('i', file.read(4))[0]
# print(hex(s32) + " " + str(s32))
if dcmInfo.is_implicit_VR == True:
file.seek(8, io.SEEK_CUR)
else:
file.seek(12, io.SEEK_CUR)
file.seek(_frameId*sizeImageInByte, io.SEEK_CUR)
package = file.read(sizeImageInByte)
# print(len(package))
# seems faster than...
image = array.array(pixelType)
if sys.version_info < (3,0):
image.fromstring(package) # DEPRECATED
else:
image.frombytes(package)
# ...this
# n = dcmInfo.Columns*dcmInfo.Rows
# image = struct.unpack(str(n)+pixelType, package)
# image = np.array(image).reshape(dcmInfo.Columns, dcmInfo.Rows)
# print(sizeImageInByte)
# print(np.array(image).shape)
# print(np.array(image).dtype)
image = np.array(image).reshape(dcmInfo.Rows, dcmInfo.Columns)
file.close() # TODO use CloseFile here?
return image, dcmInfo
def GetFloat32DicomFrame(_filename, _frameId, _normalize = NORMALIZE_NO):
image, dcmInfo = ReadDicomFrame(_filename, _frameId)
return GetFloat32NormalizedFrame(image, dcmInfo.BitsStored, _normalize)
# save a X-ray sequence into dicom format, _sequence is numpy array with the following shape (NumberOfFrames, Rows, Columns)
def SaveDicomSequence(_filename, _sequence):
file_meta = Dataset()
# file_meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.2' # CT Image Storage
file_meta.MediaStorageSOPClassUID = '1.2.3.4.5.1.4.1.1.2' # need valid UID here for real work
file_meta.MediaStorageSOPInstanceUID = "1.2.3" # need valid UID here for real work
file_meta.ImplementationClassUID = "1.2.3.4" # need valid UIDs here
# Create the FileDataset instance (initially no data elements, but file_meta supplied)
ds = FileDataset(_filename, {}, file_meta=file_meta, preamble=b"\0" * 128)
# Add the data elements -- not trying to set all required here. Check DICOM standard
# ds.PatientName = "Test^Firstname"
# ds.PatientID = "123456"
# Set the transfer syntax
ds.is_little_endian = True
ds.is_implicit_VR = True # implicit VR (0002,0010) TransferSyntaxUID: 1.2.840.10008.1.2
# ds.is_implicit_VR = False # explicit VR (0002,0010) TransferSyntaxUID: 1.2.840.10008.1.2.1
# Set creation date/time
# dt = datetime.datetime.now()
# ds.ContentDate = dt.strftime('%Y%m%d')
# timeStr = dt.strftime('%H%M%S.%f') # long format with micro seconds
# ds.ContentTime = timeStr
ds.SamplesPerPixel = 1
ds.PhotometricInterpretation = "MONOCHROME2"
ds.PixelRepresentation = 0
ds.HighBit = 15
ds.BitsStored = 16
ds.BitsAllocated = 16
if sys.version_info < (3,0):
ds.SmallestImagePixelValue = '\\x00\\x00'
ds.LargestImagePixelValue = '\\xff\\xff'
else:
ds.SmallestImagePixelValue = (0).to_bytes(2, byteorder='little')
ds.LargestImagePixelValue = (65535).to_bytes(2, byteorder='little')
ds.Columns = _sequence.shape[2]
ds.Rows = _sequence.shape[1]
ds.NumberOfFrames = _sequence.shape[0]
if _sequence.dtype != np.uint16:
print("warning _sequence.dtype != np.uint16")
_sequence = _sequence.astype(np.uint16)
ds.PixelData = _sequence.tostring()
ds.save_as(_filename)
# Write as a different transfer syntax
# ds.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRBigEndian # XXX shouldn't need this but pydicom 0.9.5 bug not recognizing transfer syntax
# ds.is_little_endian = False
# ds.is_implicit_VR = False
# print("Writing test file as Big Endian Explicit VR", filename2)
# ds.save_as(filename2)
def LoadImage(_path):
return ski.io.imread(_path)
def SaveImage(_path, _buffer):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ski.io.imsave(_path, _buffer)
# ski.io.imsave(_path, _buffer)
def StackImagesMultiChan(_imgs, _columns, _rows):
Xid = 2
Yid = 3
bigImage = np.zeros((_imgs.shape[1], _rows*_imgs.shape[Yid], _columns*_imgs.shape[Xid]), dtype=_imgs.dtype)
# for index, img in enumerate(_imgs):
for index in range(_columns*_rows):
if index >= len(_imgs):
break
i = int(index/_columns)
j = index%_columns
for chan in range(_imgs.shape[1]):
bigImage[chan, i*_imgs.shape[Yid]:(i+1)*_imgs.shape[Yid], j*_imgs.shape[Xid]:(j+1)*_imgs.shape[Xid]] = _imgs[index][chan][...]
return bigImage
def SaveSetImagesMultiChan(_path, _imgs, _columns, _rows):
image = StackImagesMultiChan(_imgs, _columns, _rows)
image = np.moveaxis(image, 0, -1)
image = image*255
image = image.astype(np.uint8)
SaveImage(_path, image)
def ConcatImagesAndSave(_imageNameList, _concatImageName, _sizeX, _sizeY, _columns, _rows):
imageList = np.zeros((len(_imageNameList), _sizeY, _sizeX, 3), dtype = np.uint8)
for i in range(len(_imageNameList)):
if IsFileExist(_imageNameList[i]) == True:
imageList[i][...] = LoadImage(_imageNameList[i])
imageList = np.rollaxis(imageList, 3, 1)
concatImage = StackImagesMultiChan(imageList, _columns, _rows)
imageList = None
concatImage = np.moveaxis(concatImage, 0, -1)
SaveImage(_concatImageName, concatImage)
def GrayToRGB(_image):
image = np.empty((3, _image.shape[0], _image.shape[1]), dtype = _image.dtype)
image[0][...] = _image[...]
image[1][...] = _image[...]
image[2][...] = _image[...]
return np.moveaxis(image, 0, -1)
def GrayToRGBSet(_imageSet):
imageSet = np.empty((3, _imageSet.shape[0], _imageSet.shape[1], _imageSet.shape[2]), dtype = _imageSet.dtype)
for i in range(len(_imageSet)):
imageSet[0][i] = _imageSet[i][...]
imageSet[1][i] = _imageSet[i][...]
imageSet[2][i] = _imageSet[i][...]
return
|
np.moveaxis(imageSet, 0, -1)
|
numpy.moveaxis
|
__author__ = "<NAME>"
__copyright__ = "Sprace.org.br"
__version__ = "1.0.0"
import os
import math
import numpy as np
import datetime as dt
from enum import Enum
from scipy.spatial import distance
import tensorflow as tf
import keras.backend as K
#from keras.backend.tensorflow_backend import set_session
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils import plot_model
from core.utils.utils import *
from core.models.gaussian_loss import gaussian_loss, gaussian_nll
class BagOfHits(Enum):
All=1,
Track=2,
Layer=3
class BaseModel():
def __init__(self, configs):
self.model = Sequential()
self.name = configs['model']['name']
self.normalise = configs['data']['normalise']
self.cylindrical = configs['data']['cylindrical']
self.epochs = configs['training']['epochs']
self.batch_size = configs['training']['batch_size']
self.validation = configs['training']['validation']
self.earlystopping = configs['training']['earlystopping']
self.stopped_epoch = 0
path_to, filename = os.path.split(configs['data']['filename'])
#print(get_unique_name(filename))
#self.orig_ds_name = configs['data']['filename']
self.orig_ds_name = filename
self.encryp_ds_name = get_unique_name(self.orig_ds_name)
self.decryp_ds_name = get_decryp_name(self.encryp_ds_name)
#print(self.encryp_ds_name)
if self.cylindrical:
coord = 'cylin'
else:
coord = 'xyz'
# set unique Id identification
self.save_fnameh5 = os.path.join(configs['paths']['bin_dir'],
'model-%s-%s-coord-%s-normalise-%s-epochs-%s-batch-%s.h5' % (
self.name, self.encryp_ds_name, coord,
str(self.normalise).lower(), self.epochs, self.batch_size))
print(self.save_fnameh5)
self.save_fname = os.path.join(configs['paths']['save_dir'], 'architecture-%s.png' % self.name)
self.save = configs['training']['save_model']
if configs['training']['use_gpu'] == True:
#if tf.test.is_gpu_available():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print('[Model] Set memory growth for %s to True', gpu)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print("[Model] ", len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
else:
print('No GPU configured.')
pass
# if configs['training']['use_gpu'] == True:
# #config = tf.ConfigProto( device_count = {'GPU': 0 , 'CPU': 0} )
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tf.Session(config=config)
# set_session(sess)
# tf.device('/gpu:0')
# else:
# config=tf.ConfigProto(log_device_placement=True)
# sess = tf.Session(config=config)
# set_session(sess)
#set_random_seed(42)
tf.compat.v1.set_random_seed(0)
def load_model(self):
if self.exist_model(self.save_fnameh5):
print('[Model] Loading model from file %s' % self.save_fnameh5)
self.model = load_model(self.save_fnameh5, custom_objects={'gaussian_loss': gaussian_loss, 'gaussian_nll': gaussian_nll})
return True
else:
print('[Model] Can not load the model from file %s' % self.save_fnameh5)
return False
def exist_model(self, filepath):
if os.path.exists(filepath):
return True
return False
def save_architecture(self, filepath):
plot_model(self.model, to_file=filepath, show_shapes=True)
print('[Model] Model Architecture saved at %s' % filepath)
def save_model(self, filepath):
self.model.save(filepath)
print('[Model] Model for inference saved at %s' % filepath)
def train(self, x, y, epochs, batch_size, validation, shuffle=False, verbose=False, callbacks=None):
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size' % (epochs, batch_size))
#print('[Model] Shape of data train: ', x.shape)
#save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
if callbacks is None:
print('DEBUG')
if self.earlystopping:
callbacks = [
EarlyStopping(monitor='loss', mode='min', verbose=1),
ModelCheckpoint(filepath=self.save_fnameh5, monitor='val_loss', mode='min', save_best_only=True)
]
else:
callbacks = [
ModelCheckpoint(filepath=self.save_fnameh5, monitor='val_loss', mode='min', save_best_only=True)
]
else:
pass
history = self.model.fit(
x,
y,
verbose=verbose,
validation_split=validation,
epochs=epochs,
batch_size=batch_size,
shuffle=shuffle,
callbacks=callbacks
)
if self.save == True:
self.save_model(self.save_fnameh5)
# what epocks the algorith stopped
if self.earlystopping:
self.stopped_epoch = callbacks[0].stopped_epoch
print('[Model] Model training stopped at %s epoch' % self.stopped_epoch)
print('[Model] Training Completed. Model h5 saved as %s' % self.save_fnameh5)
print('[Model] Model train with structure:', self.model.inputs)
timer.stop()
return history
def train_generator(self, data_gen, epochs, batch_size, steps_per_epoch, save_dir):
timer = Timer()
timer.start()
print('[Model] Training Started')
print('[Model] %s epochs, %s batch size, %s batches per epoch' % (epochs, batch_size, steps_per_epoch))
save_fname = os.path.join(save_dir, '%s-e%s.h5' % (dt.datetime.now().strftime('%d%m%Y-%H%M%S'), str(epochs)))
callbacks = [
ModelCheckpoint(filepath=save_fname, monitor='loss', save_best_only=True)
]
self.model.fit_generator(
data_gen,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks,
workers=1
)
print('[Model] Training Completed. Model saved as %s' % save_fname)
timer.stop()
def evaluate(self, x, y, batch_size=10):
results = self.model.evaluate(x, y, batch_size=batch_size, verbose=2)
print('[Model] Test loss %s accuracy %s :' %(results[0], results[1]))
def predict_one_hit(self, data):
#Predict each timestep given the last sequence of true data, in effect only predicting 1 step ahead each time
print('[Model] Predicting Hit-by-Hit...')
predicted = self.model.predict(data)
print('[Model] Predicted shape predicted%s size %s' % (predicted.shape, predicted.size))
#predicted = np.reshape(predicted, (predicted.size, 1))
return predicted
def predict_sequences_multiple(self, data, window_size, prediction_len):
#Predict sequence of 50 steps before shifting prediction run forward by 50 steps
print('[Model] Predicting Sequences Multiple...')
prediction_seqs = []
for i in range(int(len(data)/prediction_len)):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(self.model.predict(curr_frame[np.newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
'''
def predict_full_sequences(self, x_test, y_true, hits_len):
timer = Timer()
timer.start()
print('[Model] Predicting Sequences Started')
total = len(x_test)
correct = 0
incorrect = 0
pred_sequences = []
for j in range(total):
curr_frame = x_test[j]
predicted = []
for i in range(hits_len):
pred = self.model.predict(curr_frame[np.newaxis,:,:])
predicted.append(pred)
curr_frame = curr_frame[1:]
# inserta um valor np.insert(array, index, value, axes)
curr_frame = np.insert(curr_frame, [3], predicted[-1], axis=0)
#print(curr_frame, predicted[-1])
pred_sequences.append(predicted)
print('[Model] Prediction Finished.')
timer.stop()
return pred_sequences
'''
def predict_full_sequences(self, x_test, data, num_hits=6, normalise=False, tol=0.1):
'''
x_test: input data
normalise: say input data must be scaled
'''
timer = Timer()
timer.start()
print('[Model] Predicting Sequences Started')
total = len(x_test)
pred_sequences = []
#count_correct = np.zeros(num_hits)
for j in range(total):
curr_frame = x_test[j]
predicted = []
for i in range(num_hits):
if normalise:
curr_frame = data.x_scaler.transform(np.reshape(curr_frame,(1,12)))
curr_frame_orig = data.inverse_transform_x(pd.DataFrame(curr_frame).values.flatten())
curr_frame_orig = np.reshape(curr_frame_orig, (4,3))
curr_frame = np.reshape(curr_frame, (4,3))
else:
curr_frame = curr_frame
curr_frame_orig = curr_frame
pred = self.model.predict(curr_frame[np.newaxis,:,:])
pred = np.reshape(pred, (1, 3))
if normalise:
pred = data.inverse_transform_y(pred)
else:
pred = pred
pred = np.reshape(pred, (1, 3))
#if np.isclose(curr_hit, near_pred, atol=0.01).all():
# count_correct[i]=+1
predicted.append(pred)
curr_frame = curr_frame_orig[1:]
# inserta um valor np.insert(array, index, value, axes)
curr_frame =
|
np.insert(curr_frame, [3], predicted[-1], axis=0)
|
numpy.insert
|
#!/usr/bin/env python3
import os
import sys
import copy
import json
import time
import queue
import ephem
import numpy
import ctypes
import shutil
import signal
import logging
from logging.handlers import TimedRotatingFileHandler
import argparse
import threading
import subprocess
from datetime import datetime
from collections import deque
from scipy.special import pro_ang1, iv
from scipy.stats import scoreatpercentile as percentile
from astropy.constants import c as speedOfLight
speedOfLight = speedOfLight.to('m/s').value
from lsl.common.stations import lwasv, parse_ssmif
from lsl.correlator import uvutils
from lsl.imaging import utils
from lsl.common.adp import fS, fC
from lsl.astro import MJD_OFFSET, DJD_OFFSET
from bifrost.address import Address
from bifrost.udp_socket import UDPSocket
from bifrost.packet_capture import PacketCaptureCallback, DiskReader, UDPCapture, UDPSniffer
from bifrost.ring import Ring
from bifrost.libbifrost import bf
import bifrost.affinity as cpu_affinity
import bifrost.ndarray as BFArray
from bifrost.fft import Fft
from bifrost.quantize import quantize as Quantize
from bifrost.linalg import LinAlg as Correlator
from bifrost.orville import Orville as Gridder
from bifrost.proclog import ProcLog
from bifrost import map as BFMap, asarray as BFAsArray
from bifrost.DataType import DataType as BFDataType
from bifrost.transpose import transpose as BFTranspose
from bifrost.ndarray import memset_array, copy_array
from bifrost.device import set_device as BFSetGPU, get_device as BFGetGPU, stream_synchronize as BFSync, set_devices_no_spin_cpu as BFNoSpinZone
from bifrost import device
BFNoSpinZone()
import PIL.Image, PIL.ImageDraw, PIL.ImageFont
from OrvilleImageDB import OrvilleImageDB
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CAL_PATH = os.path.join(BASE_PATH, 'calibration')
if not os.path.exists(CAL_PATH):
os.mkdir(CAL_PATH)
STATION = lwasv
ANTENNAS = STATION.antennas
W_STEP = 0.1
SUPPORT_SIZE = 7
SUPPORT_OVERSAMPLE = 64
def round_up_to_even(n, maxprimes=3):
"""
Round up the given value to minimize the number of prime factors. Factors
other than 2, 3, and 5 are not allowed, and the number of factors of 3 and
5 cannot exceed maxprimes.
"""
if n % 2 != 0:
n += 1
while True:
r = n
nPrimes = 0
while r > 1 and r % 2 == 0:
r //= 2
while r > 1 and r % 3 == 0:
r //= 3
nPrimes += 1
while r > 1 and r % 5 == 0:
r //= 5
nPrimes += 1
if r == 1 and nPrimes <= maxprimes:
return n
n += 2
def timetag_to_mjdatetime(time_tag):
"""
Convert a DP/ADP timestamp into a MJD and UTC hour, minute, and second.
"""
## Get the date
unix_time_tag_i = time_tag // int(fS)
unix_time_tag_f = (time_tag % int(fS)) / float(fS)
mjd = int(40587 + unix_time_tag_i // 86400)
unix_day_frac_s = unix_time_tag_i - (unix_time_tag_i // 86400) * 86400
h = unix_day_frac_s // 3600
m = unix_day_frac_s % 3600 // 60
s = unix_day_frac_s % 60 + unix_time_tag_f
return mjd, h, m, s
class MultiQueue(object):
def __init__(self, slots, maxsize=0):
self._lock = threading.RLock()
self._slots = [queue.Queue(maxsize=maxsize) for i in range(slots)]
def empty(self):
with self._lock:
is_empty = all([slot.empty() for slot in self._slots])
return is_empty
def full(self):
with self._lock:
is_full = any([slot.full() for slot in self._slots])
return is_full
def qsize(self):
with self._lock:
size = max([slot.qsize() for slot in self._slots])
return size
def put(self, item, block=True, timeout=None):
with self._lock:
for slot in self._slots:
slot.put(item, block=block, timeout=timeout)
def put_nowait(self, item):
with self._lock:
for slot in self._slots:
slot.put_nowait(item)
def get(self, slot, block=True, timeout=None):
with self._lock:
item = self._slots[slot].get(block=block, timeout=timeout)
return item
def get_nowait(self, slot):
with self._lock:
item = self._slots[slot].get_nowait()
return item
def task_done(self, slot):
self._slots[slot].task_done()
def join(self):
for slot in self._slots:
slot.join()
FILL_QUEUE = queue.Queue(maxsize=4)
def get_good_and_missing_rx():
pid = os.getpid()
statsname = os.path.join('/dev/shm/bifrost', str(pid), 'udp_capture', 'stats')
good = 'ngood_bytes : 0'
missing = 'nmissing_bytes : 0'
if os.path.exists(statsname):
with open(os.path.join('/dev/shm/bifrost', str(pid), 'udp_capture', 'stats'), 'r') as fh:
good = fh.readline()
missing = fh.readline()
good = int(good.split(':', 1)[1], 10)
missing = int(missing.split(':', 1)[1], 10)
return good, missing
class CaptureOp(object):
def __init__(self, log, oring, sock, *args, **kwargs):
self.log = log
self.oring = oring
self.sock = sock
self.args = args
self.kwargs = kwargs
self.shutdown_event = threading.Event()
## HACK TESTING
#self.seq_callback = None
def shutdown(self):
self.shutdown_event.set()
def cor_callback(self, seq0, time_tag, chan0, nchan, navg, nsrc, hdr_ptr, hdr_size_ptr):
print("++++++++++++++++ seq0 =", seq0)
print(" time_tag =", time_tag)
hdr = {'time_tag': time_tag,
'seq0': seq0,
'chan0': chan0,
'cfreq': chan0*25e3,
'nchan': nchan,
'bw': nchan*4*25e3,
'navg': navg,
'nstand': int(numpy.sqrt(8*nsrc+1)-1)//2,
'npol': 2,
'nbl': nsrc,
'complex': True,
'nbit': 32}
print("******** CFREQ:", hdr['cfreq'])
hdr_str = json.dumps(hdr).encode()
# TODO: Can't pad with NULL because returned as C-string
#hdr_str = json.dumps(hdr).ljust(4096, '\0')
#hdr_str = json.dumps(hdr).ljust(4096, ' ')
header_buf = ctypes.create_string_buffer(hdr_str)
hdr_ptr[0] = ctypes.cast(header_buf, ctypes.c_void_p)
hdr_size_ptr[0] = len(hdr_str)
return 0
def main(self):
global FILL_QUEUE
seq_callback = PacketCaptureCallback()
seq_callback.set_cor(self.cor_callback)
with UDPCapture("cor", self.sock, self.oring, *self.args, sequence_callback=seq_callback, **self.kwargs) as capture:
good, missing = get_good_and_missing_rx()
while not self.shutdown_event.is_set():
status = capture.recv()
print('III', status)
# Determine the fill level of the last gulp
new_good, new_missing = get_good_and_missing_rx()
try:
fill_level = float(new_good-good) / (new_good-good + new_missing-missing)
except ZeroDivisionError:
fill_level = 0.0
good, missing = new_good, new_missing
try:
FILL_QUEUE.put_nowait(fill_level)
except queue.Full:
pass
del capture
class SpectraOp(object):
def __init__(self, log, iring, base_dir=os.getcwd(), core=-1, gpu=-1):
self.log = log
self.iring = iring
self.output_dir = os.path.join(base_dir, 'spectra')
self.core = core
self.gpu = gpu
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update({'nring':1, 'ring0':self.iring.name})
def _plot_spectra(self, time_tag, freq, specs, status):
# Plotting setup
nchan = freq.size
nstand = specs.shape[0]
try:
minval = numpy.min(specs[numpy.where(numpy.isfinite(specs))])
maxval = numpy.max(specs[numpy.where(numpy.isfinite(specs))])
except ValueError:
minval = 0.0
maxval = 1.0
# Image setup
width = height = 16
im = PIL.Image.new('RGB', (width * 65 + 1, height * 65 + 21), '#FFFFFF')
draw = PIL.ImageDraw.Draw(im)
font = PIL.ImageFont.load(os.path.join(BASE_PATH, 'fonts', 'helvB10.pil'))
# Axes boxes
for i in range(width + 1):
draw.line([i * 65, 0, i * 65, height * 65], fill = '#000000')
for i in range(height + 1):
draw.line([(0, i * 65), (im.size[0], i * 65)], fill = '#000000')
# Power as a function of frequency for all antennas
x = numpy.arange(nchan) * 64 // nchan
for s in range(nstand):
if s >= height * width:
break
x0, y0 = (s % width) * 65 + 1, (s // width + 1) * 65
draw.text((x0 + 5, y0 - 60), str(s+1), font=font, fill='#000000')
## XX
c = '#1F77B4'
if status[2*s+0] != 33:
c = '#799CB4'
y = ((54.0 / (maxval - minval)) * (specs[s,:,0] - minval)).clip(0, 54)
draw.line(list(zip(x0 + x, y0 - y)), fill=c)
## YY
c = '#FF7F0E'
if status[2*s+1] != 33:
c = '#FFC28C'
y = ((54.0 / (maxval - minval)) * (specs[s,:,1] - minval)).clip(0, 54)
draw.line(list(zip(x0 + x, y0 - y)), fill=c)
# Summary
ySummary = height * 65 + 2
timeStr = datetime.utcfromtimestamp(time_tag / fS)
timeStr = timeStr.strftime("%Y/%m/%d %H:%M:%S UTC")
draw.text((5, ySummary), timeStr, font = font, fill = '#000000')
rangeStr = 'range shown: %.3f to %.3f dB' % (minval, maxval)
draw.text((210, ySummary), rangeStr, font = font, fill = '#000000')
x = im.size[0] + 15
for label, c in reversed(list(zip(('good XX','good YY','flagged XX','flagged YY'),
('#1F77B4','#FF7F0E','#799CB4', '#FFC28C')))):
x -= draw.textsize(label, font = font)[0] + 20
draw.text((x, ySummary), label, font = font, fill = c)
return im
def main(self):
cpu_affinity.set_core(self.core)
if self.gpu != -1:
BFSetGPU(self.gpu)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),
'ngpu': 1,
'gpu0': BFGetGPU(),})
status = [ant.combined_status for ant in ANTENNAS]
for iseq in self.iring.read(guarantee=True):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info('SpectraOp: Config - %s', ihdr)
# Setup the ring metadata and gulp sizes
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
nbl = ihdr['nbl']
nstand = int(numpy.sqrt(8*nbl+1)-1)//2
npol = ihdr['npol']
navg = ihdr['navg']
time_tag0 = iseq.time_tag
time_tag = time_tag0
igulp_size = nstand*(nstand+1)//2*nchan*npol*npol*8
ishape = (nstand*(nstand+1)//2,nchan,npol,npol)
self.iring.resize(igulp_size, igulp_size*10)
# Setup the arrays for the frequencies and auto-correlations
freq = chan0*fC + numpy.arange(nchan)*4*fC
autos = [i*(2*(nstand-1)+1-i)//2 + i for i in range(nstand)]
intCount = 0
prev_time = time.time()
for ispan in iseq.read(igulp_size):
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
t0 = time.time()
idata = ispan.data_view(numpy.complex64).reshape(ishape)
## Pull out the auto-correlations
adata = idata[autos,:,:,:].real
adata = adata[:,:,[0,1],[0,1]]
## Plot
im = self._plot_spectra(time_tag, freq, 10*numpy.log10(adata), status)
## Save
### Timetag stuff
mjd, h, m, s = timetag_to_mjdatetime(time_tag)
### The actual save
outname = os.path.join(self.output_dir, str(mjd))
if not os.path.exists(outname):
os.mkdir(outname)
filename = '%i_%02i%02i%02i_%.3fMHz_%.3fMHz.png' % (mjd, h, m, s, freq.min()/1e6, freq.max()/1e6)
outname = os.path.join(outname, filename)
im.save(outname, 'PNG')
self.log.debug("Wrote spectra %i to disk as '%s'", intCount, os.path.basename(outname))
time_tag += navg * (fS / 100.0)
intCount += 1
curr_time = time.time()
process_time = curr_time - prev_time
self.log.debug('Spectra plotter processing time was %.3f s', process_time)
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': 0.0,
'process_time': process_time,})
self.log.info("SpectraOp - Done")
class BaselineOp(object):
def __init__(self, log, iring, base_dir=os.getcwd(), core=-1, gpu=-1):
self.log = log
self.iring = iring
self.output_dir = os.path.join(base_dir, 'baselines')
self.core = core
self.gpu = gpu
if not os.path.exists(base_dir):
os.mkdir(base_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update({'nring':1, 'ring0':self.iring.name})
self.station = STATION
def _plot_baselines(self, time_tag, freq, dist, baselines, valid):
# Plotting setup
nchan = freq.size
nbl = baselines.shape[0]
freq = freq[range(nchan//6//2,nchan,nchan//6)]
baselines = baselines[:,range(nchan//6//2,nchan,nchan//6),:,:]
baselines = numpy.abs(baselines[:,:,[0,1,1],[0,0,1]])
minval = numpy.min(baselines[valid,:,:])
maxval = numpy.max(baselines[valid,:,:])
if minval == maxval:
maxval = minval + 1.0
mindst = 0.0
maxdst = numpy.max(dist)
# Image setup
width, height = 2, 3
im = PIL.Image.new('RGB', (width*500 + 1, height*300 + 21), '#FFFFFF')
draw = PIL.ImageDraw.Draw(im)
font = PIL.ImageFont.load(os.path.join(BASE_PATH, 'fonts', 'helvB10.pil'))
# Axes boxes
for i in range(width + 1):
draw.line([i * 500, 0, i * 500, height * 300], fill = '#000000')
for i in range(height + 1):
draw.line([(0, i * 300), (im.size[0], i * 300)], fill = '#000000')
# Visiblity amplitudes as a function of (u,v) distance
for c in range(baselines.shape[1]):
if c >= height * width:
break
x0, y0 = (c % width) * 500 + 1, (c // width + 1) * 300
draw.text((x0 + 5, y0 - 295), '%.3f MHz' % (freq[c]/1e6,), font=font, fill='#000000')
## (u,v) distance as adjusted for the frequency
x = ((499.0 / (maxdst - mindst)) * (dist[valid]*freq[c]/freq[0] - mindst)).clip(0, 499)
## XX
y = ((299.0 / (maxval - minval)) * (baselines[valid,c,0] - minval)).clip(0, 299)
draw.point(list(zip(x0 + x, y0 - y)), fill='#1F77B4')
## YY
y = ((299.0 / (maxval - minval)) * (baselines[valid,c,2] - minval)).clip(0, 299)
draw.point(list(zip(x0 + x, y0 - y)), fill='#FF7F0E')
### XY
#y = ((299.0 / (maxval - minval)) * (baselines[valid,c,1] - minval)).clip(0, 299)
#draw.point(zip(x0 + x, y0 - y), fill='#A00000')
# Details and labels
ySummary = height * 300 + 2
timeStr = datetime.utcfromtimestamp(time_tag / fS)
timeStr = timeStr.strftime("%Y/%m/%d %H:%M:%S UTC")
draw.text((5, ySummary), timeStr, font = font, fill = '#000000')
rangeStr = 'range shown: %.6f - %.6f' % (minval, maxval)
draw.text((210, ySummary), rangeStr, font = font, fill = '#000000')
x = im.size[0] + 15
#for label, c in reversed(list(zip(('XX','XY','YY'), ('#1F77B4','#A00000','#FF7F0E')))):
for label, c in reversed(list(zip(('XX','YY'), ('#1F77B4','#FF7F0E')))):
x -= draw.textsize(label, font = font)[0] + 20
draw.text((x, ySummary), label, font = font, fill = c)
return im
def main(self):
cpu_affinity.set_core(self.core)
if self.gpu != -1:
BFSetGPU(self.gpu)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),
'ngpu': 1,
'gpu0': BFGetGPU(),})
for iseq in self.iring.read(guarantee=True):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info('BaselineOp: Config - %s', ihdr)
# Setup the ring metadata and gulp sizes
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
nbl = ihdr['nbl']
nstand = int(numpy.sqrt(8*nbl+1)-1)//2
npol = ihdr['npol']
navg = ihdr['navg']
time_tag0 = iseq.time_tag
time_tag = time_tag0
igulp_size = nstand*(nstand+1)//2*nchan*npol*npol*8
ishape = (nstand*(nstand+1)//2,nchan,npol,npol)
self.iring.resize(igulp_size, igulp_size*10)
# Setup the arrays for the frequencies and baseline lengths
freq = chan0*fC + numpy.arange(nchan)*4*fC
t0 = time.time()
distname = os.path.join(CAL_PATH, 'dist_%i_%i_%i.npy' % (nbl, chan0, nchan))
try:
if os.path.exists(distname) and os.path.getmtime(distname) < os.path.getmtime(__file__):
raise IOError
dist = numpy.load(distname)
except IOError:
print('dist cache failed')
uvw = uvutils.compute_uvw(ANTENNAS[0::2], HA=0, dec=self.station.lat*180/numpy.pi,
freq=freq[0], site=self.station.get_observer(), include_auto=True)
print('uvw.shape', uvw.shape)
dist = numpy.sqrt(uvw[:,0,0]**2 + uvw[:,1,0]**2)
numpy.save(distname, dist)
valid = numpy.where( dist > 0.1 )[0]
print('@dist', time.time() - t0, '@', dist.shape, dist.size*4/1024.**2)
intCount = 0
prev_time = time.time()
for ispan in iseq.read(igulp_size):
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view(numpy.complex64).reshape(ishape)
## Plot
im = self._plot_baselines(time_tag, freq, dist, idata, valid)
## Save
### Timetag stuff
mjd, h, m, s = timetag_to_mjdatetime(time_tag)
### The actual save
outname = os.path.join(self.output_dir, str(mjd))
if not os.path.exists(outname):
os.mkdir(outname)
filename = '%i_%02i%02i%02i_%.3fMHz_%.3fMHz.png' % (mjd, h, m, s, freq.min()/1e6, freq.max()/1e6)
outname = os.path.join(outname, filename)
im.save(outname, 'PNG')
self.log.debug("Wrote baselines %i to disk as '%s'", intCount, os.path.basename(outname))
time_tag += navg * (fS / 100.0)
intCount += 1
curr_time = time.time()
process_time = curr_time - prev_time
self.log.debug('Baseline plotter processing time was %.3f s', process_time)
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': 0.0,
'process_time': process_time,})
self.log.info("BaselineOp - Done")
class ImagingOp(object):
def __init__(self, log, iring, oring, decimation=1, core=-1, gpu=-1):
self.log = log
self.iring = iring
self.oring = oring
self.decimation = decimation
self.core = core
self.gpu = gpu
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.out_proclog = ProcLog(type(self).__name__+"/out")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update({'nring':1, 'ring0':self.iring.name})
self.out_proclog.update({'nring':1, 'ring0':self.oring.name})
self.station = copy.deepcopy(STATION)
self.phase_center_ha = 0.0 # radians
self.phase_center_dec = self.station.lat # radians
if self.station.id == 'SV':
# Alternate phase center for Sevilleta that minimized the w RMS
self.phase_center_ha = 1.0*ephem.hours("-0:07:59.82")
self.phase_center_dec = 1.0*ephem.degrees("33:21:27.5")
def main(self):
cpu_affinity.set_core(self.core)
if self.gpu != -1:
BFSetGPU(self.gpu)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),
'ngpu': 1,
'gpu0': BFGetGPU(),})
pce = numpy.sin(self.phase_center_dec)*numpy.sin(self.station.lat) \
+ numpy.cos(self.phase_center_dec)*numpy.cos(self.station.lat)*numpy.cos(self.phase_center_ha)
pce = numpy.arcsin(pce)
pca = numpy.sin(self.phase_center_dec) - numpy.sin(pce)*numpy.sin(self.station.lat)
pca = pca / numpy.cos(pca) / numpy.cos(self.station.lat)
pca = numpy.arccos(pca)
if numpy.sin(self.phase_center_ha) > 0:
pca = 2*numpy.pi - pca
phase_center = numpy.array([numpy.cos(pce)*numpy.sin(pca),
numpy.cos(pce)*numpy.cos(pca),
|
numpy.sin(pce)
|
numpy.sin
|
#---------------------------------------------------------------
import os, sys
sys.path.insert(0, os.getcwd()) # enables $ python examples/[EXAMPLE].py
#---------------------------------------------------------------
"""
ummon3 Examples
SINE
Run command:
python sine.py
"""
#
# IMPORTS
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ummon import *
#
# SET inital seed for reproducibility
|
np.random.seed(17)
|
numpy.random.seed
|
import re
import numpy as np
import pandas as pd
def update_namespace(filepath, namespace):
file = open(filepath, 'r').read()
file = re.sub(r'\S*_model_namespace', namespace + '_model_namespace', file)
with open(filepath, 'w') as f:
f.write(file)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return (x, y)
def jdays2years(epochs):
if (epochs.shape[0] > 0 and epochs[0] > 30000.0000):
return 1900.0 + (epochs - 15020.31352) / 365.242198781
else:
return epochs
def kepler(M, e, eps=1e-4):
En = M
Ens = En - (En - e * np.sin(En)- M) / (1 - e * np.cos(En))
k = 1
while (abs(Ens - En) > eps):
En = Ens
Ens = En - (En - e * np.sin(En) - M) / (1 - e * np.cos(En))
k += 1
if (k > 400000):
print('Kepler Eq (%f, %f) not solved in less than 400000 iterations\n', M, e)
E = np.nan
return E
E = Ens
return E
def getOrbit(t, T, P, e, a, w, Omega, i):
N = len(t)
E = np.empty(N)
# Iterate over epochs
for j in range(N):
# Mean anomaly
M = 2 * np.pi * (t[j] - T) / P
# Eccentric anomaly
E[j] = kepler(M, e)
# Thiele-Innes Constants
A = a * ( np.cos(w) * np.cos(Omega) -
|
np.sin(w)
|
numpy.sin
|
# import time
# import asyncio
#
# async def main():
# print(f'hi {time.time()}')
# await asyncio.sleep(1)
# print(f'world {time.time()}')
#
# def blocking():
# time.sleep(.5)
# print(f'b {time.time()}')
#
#
# loop = asyncio.get_event_loop()
# t = loop.create_task(main())
# loop.run_in_executor(None, blocking)
# loop.run_until_complete(t)
# loop.close()
#
# async def f():
# try:
# while True: await asyncio.sleep(0)
# except asyncio.CancelledError:
# print('task cancelled.. no')
# while True: await asyncio.sleep(0)
# else:
# return 123
#
# # a = f()
# # a.send(None)
# # a.send(None)
# # a.throw(asyncio.CancelledError)
# # a.send(None)
import numpy as np
a = np.zeros((2,2))
# print(np.pad(a, 1, 'constant', constant_values=10))
# print(np.diag(np.arange(4)+1, k=-1))
# print(np.unravel_index(99, (6,7,8)))
# print(np.tile(np.array([[0,1],[1,0]]), (4,4)))
color = np.dtype([
('r', np.ubyte, (1,)),
('g', np.ubyte, (1,)),
('b', np.ubyte, (1,))
])
# print(color)
# print(sum(range(5), -1))
# print(np.sum(range(5), -1))
a = np.ones(10)
# print(a**a)
# print(np.array(0) / np.array(0))
# print(np.array(0) // np.array(0))
# print(np.array([np.nan]).astype(int).astype(float))
Z = np.random.uniform(-10,+10,10)
# print(Z)
# print(np.round(Z))
# print(np.copysign(np.ceil(np.abs(Z)), Z))
a = np.random.randint(0, 10, 10)
b = np.random.randint(0, 10, 10)
# print(a)
# print(b)
# print(np.intersect1d(a, b))
#
# print(np.sqrt(-1) == np.emath.sqrt(-1))
yesterday = np.datetime64('today', 'D') - np.timedelta64(1, 'D')
today = np.datetime64('today', 'D')
tomorrow = np.datetime64('today', 'D') +
|
np.timedelta64(1, 'D')
|
numpy.timedelta64
|
from data import CITIES, BUSINESSES, USERS, REVIEWS, TIPS, CHECKINS
from data import get_business
import random
import pandas as pd
import numpy as np
def extract_genres(movies):
"""Create an unfolded genre dataframe. Unpacks genres seprated by a '|' into seperate rows.
Arguments:
movies -- a dataFrame containing at least the columns 'movieId' and 'genres'
where genres are seprated by '|'
"""
genres_m = movies.apply(lambda row: pd.Series([row['business_id']] + row['categories'].lower().split(", ")), axis=1)
stack_genres = genres_m.set_index(0).stack()
df_stack_genres = stack_genres.to_frame()
df_stack_genres['business_id'] = stack_genres.index.droplevel(1)
df_stack_genres.columns = ['categorie', 'business_id']
return df_stack_genres.reset_index()[['business_id', 'categorie']]
def pivot_genres(df):
"""Create a one-hot encoded matrix for genres.
Arguments:
df -- a dataFrame containing at least the columns 'movieId' and 'genre'
Output:
a matrix containing '0' or '1' in each cell.
1: the movie has the genre
0: the movie does not have the genre
"""
return df.pivot_table(index = 'business_id', columns = 'categorie', aggfunc = 'size', fill_value=0)
def select_neighborhood(similarity_matrix, utility_matrix, target_business):
"""selects all items with similarity > 0"""
similar = list(similarity_matrix[similarity_matrix[target_business] > 0].index)
return similarity_matrix[target_business]
def create_similarity_matrix_categories(matrix):
"""Create a """
npu = matrix.values
m1 = npu @ npu.T
diag = np.diag(m1)
m2 = m1 / diag
m3 =
|
np.minimum(m2, m2.T)
|
numpy.minimum
|
"""Python3 - User guided equivalent wideth measurement and analysis tool for stellar absorption spectra """
__version__ = "1.0.1"
import numpy as np
import matplotlib.pyplot as plt
#from scipy.optimize import fmin
from scipy.stats import chisquare
from scipy.interpolate import interp1d
from scipy.integrate import simpson
from numpy.random import multivariate_normal
from astropy.io import fits
from scipy.optimize import curve_fit
import glob
import george
from george import kernels
from scipy.optimize import minimize
import pickle
import copy
from scipy.spatial.distance import cdist
from numpy.linalg import inv
from numpy.linalg import slogdet
import subprocess
#----------------------------------------------------------Functions-------------------------------------------------------#
def plot_line_info(star, name, filt = None):
fig = plt.figure(figsize = (10,5))
info_plot = fig.add_subplot(111)
if filt == None:
info_plot.errorbar(star.lines, star.lines_ew, yerr = star.lines_ew_err, fmt='.', zorder = 2, ecolor='k', c = 'k')
info_plot.scatter(star.lines, star.lines_ew, c= star.lines_gauss_Xsquare, cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
info_plot.scatter(star.lines[star.lines_check_flag], star.lines_ew[star.lines_check_flag], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
else:
info_plot.errorbar(star.lines[filt], star.lines_ew[filt], yerr = star.lines_ew_err[filt], fmt='.', zorder = 2, ecolor='k', c = 'k')
info_plot.scatter(star.lines[filt], star.lines_ew[filt], c= star.lines_gauss_Xsquare[filt], cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
info_plot.scatter(star.lines[filt][star.lines_check_flag[filt]], star.lines_ew[filt][star.lines_check_flag[filt]], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
info_plot.grid()
info_plot.set_title(name, size = 20)
info_plot.set_xlabel(r'$\rm Wavelength\ (nm)$', size = 15)
info_plot.set_ylabel(r'$\rm Equivalent\ Width\ (mA)$', size = 15)
#plt.savefig(name+'_line_ew_info.pdf')
plt.show()
def plot_comparison_res(star,hand_measured, name,xy = [0,100], filt = None):
fig = plt.figure(figsize = (10,5))
#top plot
info_plot = fig.add_subplot(211)
if filt == None:
info_plot.errorbar(hand_measured, star.lines_ew, yerr = star.lines_ew_err, fmt='.', zorder = 2,ecolor='k', c = 'k')
info_plot.scatter(hand_measured, star.lines_ew, c= star.lines_gauss_Xsquare, cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
info_plot.scatter(hand_measured[star.lines_check_flag], star.lines_ew[star.lines_check_flag], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
else:
info_plot.errorbar(hand_measured[filt], star.lines_ew[filt], yerr = star.lines_ew_err[filt], fmt='.', zorder = 2,ecolor='k', c = 'k')
info_plot.scatter(hand_measured[filt], star.lines_ew[filt], c= star.lines_gauss_Xsquare[filt], cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
info_plot.scatter(hand_measured[filt][star.lines_check_flag[filt]], star.lines_ew[filt][star.lines_check_flag[filt]], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
info_plot.plot([xy[0],xy[1]],[xy[0],xy[1]], 'k--')
info_plot.set_title(name, size = 20)
info_plot.grid()
info_plot.set_ylabel(r'$\rm Auto\ Measured\ (mA)$', size = 15)
#residuals plot
res_plot = fig.add_subplot(212, sharex=info_plot)
if filt == None:
star_res_values = star.lines_ew - hand_measured
res_plot.errorbar(hand_measured, star_res_values, yerr = star.lines_ew_err, fmt='.', zorder = 2,ecolor='k', c = 'k')
res_plot.scatter(hand_measured, star_res_values, c= star.lines_gauss_Xsquare, cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
res_plot.scatter(hand_measured[star.lines_check_flag], star_res_values[star.lines_check_flag], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
else:
star_res_values = star.lines_ew - hand_measured
res_plot.errorbar(hand_measured[filt], star_res_values[filt], yerr = star.lines_ew_err[filt], fmt='.', zorder = 2,ecolor='k', c = 'k')
res_plot.scatter(hand_measured[filt], star_res_values[filt], c= star.lines_gauss_Xsquare[filt], cmap = plt.cm.Reds, edgecolors='k', zorder = 3, s = 30)
res_plot.scatter(hand_measured[filt][star.lines_check_flag[filt]], star_res_values[star.lines_check_flag], c = 'w', edgecolors= 'r', zorder = 0, s = 100)
#plt.savefig(name+'_ew_comparison.pdf')
res_plot.plot([xy[0],xy[1]],[0,0],'k--')
res_plot.set_xlabel(r'$\rm Hand\ Measured\ (mA)$', size = 15)
res_plot.grid()
plt.tight_layout()
plt.show()
def get_line_window(line, wave, flux, left_bound, right_bound,
line_input, window_size = 1.5):
boundaries = [0,0]
#if no line is specified, auto fine best line guess
if line_input == 0.0:
#find line tip
left_look = np.where((wave <= line)&(wave >= line - 0.1))
right_look = np.where((wave >= line)&(wave <= line + 0.1))
#find min
mins = [flux[left_look].min(),flux[right_look].min()]
best_line_guess = wave[np.where(flux == np.min(mins))][0]
else:
best_line_guess = line_input
#get_window around line
window = np.where((wave >= best_line_guess-window_size/2.0)&(wave <= best_line_guess+window_size/2.0))
#calc derivative
dy = np.gradient(flux[window])
dy_std = dy.std()
#if no left or right bound given set using std
auto_bound_l = False
auto_bound_r = False
if left_bound == 0:
dy_l = dy_std/2.0
auto_bound_l = True
if right_bound == 0:
dy_r = dy_std/2.0
auto_bound_r = True
#if no line boundaries specified auto find boundaries
if auto_bound_l:
left_look = np.where(wave[window] <= best_line_guess - 0.05)
dy1_left = np.where((dy[left_look] < dy_l)&(dy[left_look] > (-1)*dy_l))
if len(wave[window][left_look][dy1_left]) ==0:
print('line ',line,' very close to edge or dy selection value too small')
print('will attempt to remeasure, if not possible, add line to exclude lines list in .measure_all_ew() function')
plt.clf()
plt.plot(wave[window],flux[window])
plt.plot([line,line],[0.95,1.0], 'k')
plt.annotate(str(line), xy=[line,1.01])
plt.plot([best_line_guess,best_line_guess],[0.95,1.0], 'k--')
plt.annotate(str(best_line_guess), xy=[best_line_guess,1.01])
plt.show()
return 1,1,0,0
else:
boundaries[0] = wave[window][left_look][dy1_left][-1]
else:
boundaries[0] = left_bound
if auto_bound_r:
right_look = np.where(wave[window] >= best_line_guess + 0.05)
dy1_right = np.where((dy[right_look] < dy_r)&(dy[right_look] > (-1)*dy_r))
if len(wave[window][right_look][dy1_right]) ==0:
print('line ',line,' very close to edge or dy selection value too small')
print('will attempt to remeasure, if not possible, add line to exclude lines list in .measure_all_ew() function')
plt.clf()
plt.plot(wave[window],flux[window])
plt.plot([line,line],[0.95,1.0], 'k')
plt.annotate(str(line), xy=[line,1.01])
plt.plot([best_line_guess,best_line_guess],[0.95,1.0], 'k--')
plt.annotate(str(best_line_guess), xy=[best_line_guess,1.01])
plt.show()
return 0,0,1,1
else:
boundaries[1] = wave[window][right_look][dy1_right][0]
else:
boundaries[1] = right_bound
return window,best_line_guess, boundaries,dy
def gauss_model(x,A,mu,sigma, baseline):
return A*np.exp(-(x-mu)**2/2/sigma**2) + baseline
def gfit(wav,flux,wav_cen, fwhm):
sigma = fwhm/2.355
#limit window of search center +- 2*fwhm to exclude other emission lines
gwave = np.where((wav >= wav_cen-30)&(wav <= wav_cen+30))
#find better center to account for small doppler shift within same window of search
bet_cen = wav[np.where(flux == flux[gwave].max())[0][0]]
#Initial value for guass max value guess from max of curve
guess = flux[np.where(flux == flux[gwave].max())[0][0]]
#Set parameters for gauss curve fit
p0 = [guess,bet_cen,sigma, 0.]
bf,cov = curve_fit(gauss_model,wav[gwave],flux[gwave],p0)
#plt.plot(wav[gwave], flux[gwave], 'r')
return bf, np.sqrt(np.diag(cov)), p0
def gfit_simple(x_array, y_array, mu, sigma, baseline):
A = y_array.max()
p0 = [A, mu, sigma, baseline]
try:
bf, cov = curve_fit(gauss_model, x_array, y_array, p0)
return bf, np.sqrt(np.diag(cov)), p0
except:
bf, cov = [0,0,0,0],None
return bf, cov, p0
def gauss_ew(a, fwhm):
if a == 0 or fwhm == 0:
return 0
else:
return 500.*a*np.sqrt(np.pi/np.log(2))*fwhm #From Adamow pyMOOG ew measure
#Gaussian Process code and kernals taken from LSSTC DSFP notebook
def SEKernel(par, x1, x2):
A, Gamma = par
D2 = cdist(x1.reshape(len(x1),1), x2.reshape(len(x2),1), metric = 'sqeuclidean')
return A*np.exp(-Gamma*D2)
def Pred_GP(CovFunc, CovPar, xobs, yobs, eobs, xtest):
# evaluate the covariance matrix for pairs of observed inputs
K = CovFunc(CovPar, xobs, xobs)
# add white noise
K += np.identity(xobs.shape[0]) * eobs**2
# evaluate the covariance matrix for pairs of test inputs
Kss = CovFunc(CovPar, xtest, xtest)
# evaluate the cross-term
Ks = CovFunc(CovPar, xtest, xobs)
# invert K
Ki = inv(K)
# evaluate the predictive mean
m = np.dot(Ks, np.dot(Ki, yobs))
# evaluate the covariance
cov = Kss - np.dot(Ks, np.dot(Ki, Ks.T))
return m, cov
def NLL_GP(p,CovFunc,x,y,e):
# Evaluate the covariance matrix
K = CovFunc(p,x,x)
# Add the white noise term
K += np.identity(x.shape[0]) * e**2
# invert it
Ki = inv(K)
# evaluate each of the three terms in the NLL
term1 = 0.5 * np.dot(y,np.dot(Ki,y))
term2 = 0.5 * slogdet(K)[1]
term3 = 0.5 * len(y) * np.log(2*np.pi)
# return the total
return term1 + term2 + term3
def make_line(x,m,b):
return m*x+b
def combine_files(empty_obj,objects = []):
final_wavelength = []
final_flux = []
final_norm_flux = []
final_shifted_wavelength = []
final_estimated_shift = []
final_continuum = []
final_obs_err = []
final_pred_all = []
final_pred_var_all = []
final_gain = []
for j in objects:
for i in range(len(j.flux)):
final_norm_flux.append(j.normalized_flux[i])
final_shifted_wavelength.append(j.shifted_wavelength[i])
final_wavelength.append(j.wavelength[i])
final_flux.append(j.flux[i])
final_estimated_shift.append(j.estimated_shift[i])
final_continuum.append(j.continuum[i])
final_obs_err.append(j.obs_err[i])
final_pred_all.append(j.pred_all[i])
final_pred_var_all.append(j.pred_var_all[i])
final_gain.append(j.gain[i])
empty_obj.wavelength = np.array(final_wavelength)
empty_obj.flux = np.array(final_flux)
empty_obj.shifted_wavelength = np.array(final_shifted_wavelength)
empty_obj.normalized_flux = np.array(final_norm_flux)
empty_obj.estimated_shift = np.array(final_estimated_shift)
empty_obj.continuum = np.array(final_continuum)
empty_obj.obs_err = np.array(final_obs_err)
empty_obj.pred_all = np.array(final_pred_all)
empty_obj.pred_var_all = np.array(final_pred_var_all)
empty_obj.gain = np.array(final_gain)
del final_wavelength
del final_flux
del final_norm_flux
del final_shifted_wavelength
del final_estimated_shift
del final_continuum
del final_obs_err
del final_pred_all
del final_pred_var_all
del final_gain
return empty_obj
def reduce_cc(x,y,lines,lines_removed,limit=0.12):
#check correlation before going further
cc = np.corrcoef(x,y)
print('starting cc', cc[0,1])
if abs(cc[0,1]) < limit:
print('cc good enough')
return lines,x,y,lines_removed
check_ccs = np.zeros(len(x))
#remove largest cc difference
for i in range(len(x)):
new_x = np.delete(x,i)
new_y = np.delete(y,i)
new_cc = np.corrcoef(new_x,new_y)
check_ccs[i] = new_cc[0,1]
#Calculate differences
diffs = [abs(cc[0,1])- abs(j) for j in check_ccs]
#which gives largest difference?
biggest_diff = np.where(diffs == max(diffs))[0][0]
#remove that one line
lines_removed.append([lines[biggest_diff],x[biggest_diff],y[biggest_diff]])
x = np.delete(x,biggest_diff)
y = np.delete(y,biggest_diff)
lines = np.delete(lines,biggest_diff)
print('line removed: ', lines_removed)
#recalculate cc
cc = np.corrcoef(x,y)
print('ending cc', cc[0,1])
#Call function again to remove lines until 0.12 is passed
lines,x,y,lines_removed = reduce_cc(x,y,lines,lines_removed)
return lines,x,y,lines_removed
def load_object(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
def make_plots_folder():
folder_name = 'line_plots'
#check if folder exists
filenames = glob.glob('*')
if folder_name in filenames:
pass
else:
#make folder if not
cmd = 'mkdir '+folder_name
subprocess.call(cmd, shell=True)
def replace_w(array, replace_value, replace_with = 'med' ):
"""
Use to replace individual values with either median or average of the order
or with a specific value
array format: [[order1],[order2],...] orderN = [x1,x2,x3,...]
replace_value - value to be replaced
repace_with - 'med', 'avg', or value to replace with
"""
for i in range(len(array)):
gd_0 = np.where(array[i] == replace_value)
gd = np.where(array[i] != replace_value)
for j in gd_0:
if replace_with == 'med':
array[i][j] = np.median(array[i][gd])
elif replace_with == 'avg':
array[i][j] = np.average(array[i][gd])
else:
array[i][j] = replace_with
return None
#--------------------------------------------------Classes--------------------------------------------------#
class Spectrum_Data():
def __init__(self, filename, KECK_file = True, spectx=False, specty=False, order_split = (False,3500)):
"""
filenmae - used to name plots and files
KECK_file - True if loading KECK fits file
spectx, specty - input spectrum if loading from arrays
if not using order_split:
input format: [[order1],[order2],...] orderN = [x1,x2,x3,...]
if using order_split:
input format: [x1,x2,x3,...]
"""
self.filename = filename
self.gain = None
if KECK_file:
self.wavelength, self.flux = self.read_spec()
else:
#split input array into orders
if order_split[0]:
#Try targetting about 3500 points per order, too many points per order will slow code down
order_count = int(len(spectx)/order_split[1])
order_split_wave = np.array_split(spectx, order_count)
order_split_flux = np.array_split(specty, order_count)
self.wavelength = order_split_wave
self.flux = order_split_flux
del order_split_wave
del order_split_flux
print('If values need to be replaced, use replace_w() function')
else:
self.wavelength = spectx
self.flux = specty
self.normalized_flux = copy.deepcopy(self.flux) #deepcopy creates new object
self.combined_flux = None
self.shifted_wavelength = copy.deepcopy(self.wavelength)
self.estimated_shift = np.zeros(len(self.wavelength))
self.rv = None #km/s
#continuum information
self.continuum = np.full(len(self.wavelength), None)
#print('cont array empty', self.continuum)
self.pred_all = np.full(len(self.wavelength), None)
self.pred_var_all = np.full(len(self.wavelength), None)
self.obs_err = np.full(len(self.wavelength), None)
for i in range(len(self.wavelength)):
false_array = np.full(len(self.wavelength[i]), False)
#print('false array created', false_array)
self.continuum[i] = false_array
self.pred_all[i] = np.full(len(self.wavelength[i]), 0)
self.pred_var_all[i] = np.full(len(self.wavelength[i]), 0)
self.obs_err[i] = np.sqrt(self.flux[i])
#print('empty continuum arrays created', self.continuum)
#Old way of setting these variables (change back if above code causes problems)
# self.continuum = np.full((len(self.wavelength),len(self.wavelength[0])), False)
# self.pred_all = np.zeros((len(self.wavelength),len(self.wavelength[0])))
# self.pred_var_all = np.zeros((len(self.wavelength),len(self.wavelength[0])))
# self.obs_err = np.zeros((len(self.wavelength),len(self.wavelength[0])))
#line information
self.lines = None
#line - extra parameters [0] - shift continuum, [1] - left boundary in Angstroms
#[2] - right boundary in Angstroms, [3] - line center in Angstroms
self.lines_exp = None
#line - extra data [0] - element, [1] - excitation potential
#[2] - gf, [3] - rad
self.lines_exd = None
#line - equivalent width
self.lines_ew = None
#line - equivalent width error
self.lines_ew_err = None
#line - equivalent width calculated by simpon's rule integration
self.lines_ew_simp = None
#line - equivalent width error from integraion
self.lines_ew_simp_err = None
#line - best fit parameters for gaussian fit
self.lines_bf_params = None
#line - X squared value for gaussian and data
self.lines_gauss_Xsquare = None
#line - X squared threshold value
self.X_thresh = 0.003
#line - X squared value above threshold or EW = 0
self.lines_check_flag = None
#used to switch between Adamow ew calculation and simpson's rule integration
self.temp_line_ew = None
self.temp_line_ew_err = None
def normalize_all(self, window_width = 1.5, continuum_depth = 90):
#loop through orders
for i in range(len(self.flux)):
#use Gaussian Process to fit continuum
self.normalize(i, window_width, continuum_depth)
#Replace un-normalized points with value before it
#This should only be replacing the last point in the
#spectrum that is always missed by normalize
# err_est = self.obs_err[i]/self.pred_all[i]
# non_norm_points = np.where(self.normalized_flux[i] > np.average(self.normalized_flux[i][self.continuum[i]]+err_est[self.continuum[i]]*100))
# #replace non norm points
# self.normalized_flux[i][non_norm_points] = self.normalized_flux[i][non_norm_points[0]-1]
return None
def normalize(self, order, window_width = 1.5, continuum_depth = 90, clip = [-999,-999]):
if clip[0] != -999 and clip[1] != -999:
#clipped = True
clipl = np.where(self.wavelength[order] <= clip[0])[0][-1]
clipr = np.where(self.wavelength[order] >= clip[1])[0][0]
else:
#clipped = False
clipl = 0
clipr = len(self.flux[order])
err = np.sqrt(self.flux[order][clipl:clipr])
continuum_scan_obj = Continuum_scan(window_width, continuum_depth)
continuum_scan_obj.load_data(self.wavelength[order][clipl:clipr],self.flux[order][clipl:clipr])
continuum_scan_obj.scan()
cont = continuum_scan_obj.get_selected()
del continuum_scan_obj
#Gaussian Process to fit continuum
kernel = np.var(self.flux[order][clipl:clipr][cont]) * kernels.Matern32Kernel(10)
#print("cont", len(self.flux[order][cont]))
#kernel = np.var(self.flux[order][cont]) * kernels.ExpSquaredKernel(10)
gp = george.GP(kernel,mean=self.flux[order][clipl:clipr][cont].mean())
gp.compute(self.wavelength[order][clipl:clipr][cont], err[cont])
x_pred = self.wavelength[order][clipl:clipr].copy()
pred, pred_var = gp.predict(self.flux[order][clipl:clipr][cont], x_pred, return_var=True)
#print("ln-likelihood: {0:.2f}".format(gp1.log_likelihood(self.flux[order][cont])))
params = [gp,self.flux[order][clipl:clipr][cont]]
result = minimize(self.neg_ln_like, gp.get_parameter_vector(), args = params, jac=self.grad_neg_ln_like)
#print(result)
gp.set_parameter_vector(result.x)
pred, pred_var = gp.predict(self.flux[order][clipl:clipr][cont], x_pred, return_var=True)
#print("\nFinal ln-likelihood: {0:.2f}".format(gp.log_likelihood(self.flux[order][cont])))
self.continuum[order][clipl:clipr] = cont
self.pred_all[order][clipl:clipr] = pred
self.pred_var_all[order][clipl:clipr] = pred_var
self.obs_err[order][clipl:clipr] = err
self.normalized_flux[order][clipl:clipr] = self.flux[order][clipl:clipr]/pred
return None
def grad_neg_ln_like(self,p, params):
params[0].set_parameter_vector(p)
neg_ln = (-1)*params[0].grad_log_likelihood(params[1])
return neg_ln
def neg_ln_like(self,p, params):
params[0].set_parameter_vector(p)
neg_ln = (-1)*params[0].log_likelihood(params[1])
return neg_ln
def S_N(self, rows = 5, cols = 4, save_plot = False):
#Siganl to noise is overestimated compared to MAKEE output
plt.clf()
f = plt.figure(figsize=(20,15))
plt.suptitle(str(self.filename) + ' S/N', size = 20)
for i in range(len(self.wavelength)):
order = i
i += 1
ax = f.add_subplot(rows, cols,i)
ax.plot(self.wavelength[order],np.sqrt(self.flux[order]*self.gain[order]))
ax.set_title(str(i))
ax.grid()
plt.tight_layout()
if save_plot:
plt.savefig(str(self.filename)+'_SNR.pdf')
plt.show()
def load_normalized(self, name):
pathnames = glob.glob(name+'*'+'.npy')
for i in range(len(pathnames)):
if '_flux' in pathnames[i]:
self.normalized_flux = np.load(pathnames[i])
print('flux loaded')
elif '_wavelength' in pathnames[i]:
self.wavelength = np.load(pathnames[i])
print('wavelength loaded')
elif '_cont' in pathnames[i]:
self.continuum = np.load(pathnames[i])
print('continuum loaded')
elif '_obs_err' in pathnames[i]:
self.obs_err = np.load(pathnames[i])
print('errors loaded')
elif '_pred' in pathnames[i]:
self.pred_all = np.load(pathnames[i])
print('all preds loaded')
elif '_pred_var' in pathnames[i]:
self.pred_var_all = np.load(pathnames[i])
print('all pred vars loaded')
def save_normalized(self, name):
#arrays are separated by order
np.save(name+'_flux',self.normalized_flux)
np.save(name+'_wavelength',self.shifted_wavelength)
np.save(name+'_cont',self.continuum)
np.save(name+'_obs_err',self.obs_err)
np.save(name+'_pred',self.pred_all)
np.save(name+'_pred_var',self.pred_var_all)
return None
def wave_shift(self, order, shift):
self.shifted_wavelength[order] = self.wavelength[order] + shift
self.estimated_shift[order] = shift
def combine_spectra(self, spectB, resolution = 1000, shift=True):
print('Use self.update_combined() when you are happy with the combined flux to override self.flux')
#find corresponding orders that match self in spectB
med_A = [np.median(self.shifted_wavelength[i]) for i in range(len(self.shifted_wavelength))]
med_B = [np.median(spectB.shifted_wavelength[i]) for i in range(len(spectB.shifted_wavelength))]
b_order = []
#find corresponding B order
for k in range(len(med_A)):
diff = abs(med_A[k] - med_B)
loc = np.where(diff == diff.min())
b_order.append(loc)
combined_flux_orders = np.zeros_like(self.flux)
if shift:
#first shift A to match B with higher accuracy (higher resolution)
#may have to include a try statement for errors
self.estimate_shift([spectB], shift_spacing=resolution)
self.clean_shift()
#loop through orders
for i in range(len(self.shifted_wavelength)):
#combining flux values for each wavelength value
combined_flux = np.zeros(len(self.shifted_wavelength[i]))
print('A order', i, 'B order', b_order[i][0][0])
#loop through each shifted wavelength value
for j in range(len(self.shifted_wavelength[i])):
#difference between one shifted wavelength value and all B wavelength values
#element closest to zero is location of closest wavelength values
ed = 5
if j < ed:
le = 0
re = j + ed
elif j > len(self.shifted_wavelength[i])-ed:
le = j - ed
re = len(self.shifted_wavelength[i]) + 1
else:
le = j - ed
re = j + ed
near_point = spectB.wavelength[b_order[i][0][0]][le:re]
diff_array = abs(self.shifted_wavelength[i][j] - near_point)
loc = np.where(diff_array == diff_array.min())
#add A flux with B flux at location where diff = 0
combined_flux[j] = self.flux[i][j] + spectB.flux[b_order[i][0][0]][le:re][loc]
#print(combined_flux)
#collect flux values for each order
combined_flux_orders[i] = combined_flux
self.obs_err[i] = np.sqrt(combined_flux)
plt.plot(self.shifted_wavelength[i], self.flux[i], label = 'A')
plt.plot(spectB.wavelength[b_order[i][0][0]], spectB.flux[b_order[i][0][0]], label = 'B')
plt.plot(self.shifted_wavelength[i], combined_flux, label = 'A+B')
plt.xlim([np.median(self.shifted_wavelength[i]-(self.shifted_wavelength[i].max() - self.shifted_wavelength[i].min())/10),
np.median(self.shifted_wavelength[i]+(self.shifted_wavelength[i].max() - self.shifted_wavelength[i].min())/10)])
plt.grid()
plt.legend()
plt.show()
print('#-----------------------#')
#replace original flux for A with combined flux
self.combined_flux = combined_flux_orders
def update_combined(self):
self.flux = self.combined_flux
def estimate_shift(self, sun_spectra, shift_max = 5, shift_min = -5, shift_spacing = 100):
#setup num orders, place holder for chi min, shifts array
orders = len(self.wavelength)
chi = np.zeros(shift_spacing)
shifts = np.linspace(shift_min, shift_max, shift_spacing)
#begin looping through each order in star spectrum
for q in range(orders):
order = q
order_found = False
order_mean = self.shifted_wavelength[order].mean()
#loop through each included solar spectrum to find a matching order
for j in range(len(sun_spectra)):
sun = sun_spectra[j]
#Loop through sun orders to find similar order
for i in range(len(sun.wavelength)):
#use average wavelength value in an order to look for
#matching solar order
if order_mean < sun.wavelength[i].max() and order_mean > sun.wavelength[i].min():
order_found = True
sun_range = sun.wavelength[i].max() - sun.wavelength[i].min()
sun_window = [sun.wavelength[i][0], sun.wavelength[i][-1]]
#loop through specified shift values to find best match
for k in range(len(shifts)):
shift = shifts[k]
self.wave_shift(order,shift)
order_range = self.shifted_wavelength[order].max() - self.shifted_wavelength[order].min()
order_window = [self.shifted_wavelength[order][0], self.shifted_wavelength[order][-1]]
# |------------| --> |------------|
#|------------| --> |xx|----------|
if order_window[0] > sun_window[0]:
sun_window[0] = sun.wavelength[i][np.where(sun.wavelength[i] > order_window[0])][0]
#|------------| --> |------------|
# |------------| --> |-------|xxxxx|
if order_window[1] < sun_window[1]:
sun_window[1] = sun.wavelength[i][
|
np.where(sun.wavelength[i] < order_window[1])
|
numpy.where
|
# -*- coding: utf-8 -*-
import numpy
"""Top-level package for Sequel."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
def getsequence(seqhand,nattempts=1000,*args,**kwargs):
attempt = 0
while attempt < nattempts:
attempt += 1
seq = seqhand(*args,**kwargs)
if not numpy.any(numpy.isnan(seq)):
return seq,attempt
return numpy.nan,attempt
# insight 1: you can brute-force a first-order counter-balanced sequence
# quite easily even for fairly large condition numbers (<100 works fine)
def transferseq(tmat,startind=None):
# important to avoid overlap across calls
tmat = numpy.copy(tmat)
ntransfers = numpy.sum(tmat.flat)
dims = numpy.shape(tmat)
assert len(dims)==2, 'input tmat must be 2D'
assert dims[0] == dims[1], 'input tmat must be square matrix'
ncon = dims[0]
if startind is None:
startind = numpy.random.randint(ncon)
seq = [startind]
niter = 0
while numpy.sum(tmat.flat)>0.:
niter += 1
# the current trial is in rows
rowind = seq[-1]
validcol = numpy.where(tmat[rowind,:] > 0.)[0]
if not len(validcol):
return numpy.nan
colind = validcol[numpy.random.randint(len(validcol))]
seq.append(colind)
tmat[rowind,colind] -= 1
return seq
def transfermatrix(seq):
ncon = numpy.max(seq).astype(int)+1
out = numpy.zeros([ncon,ncon,len(seq)-1])
out[seq[:-1],seq[1:],range(0,len(seq)-1)] = 1
return numpy.sum(out,axis=2)
def permutationrep(x,n,allowrep=False,maxiter=1000):
"""generate repeating sequence of random indices in 0:x range."""
seq = numpy.arange(x)
numpy.random.shuffle(seq)
niter = 0
while len(seq) < (x*n):
niter += 1
newseq =
|
numpy.arange(x)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 21 13:28:38 2021
@author: lukepinkel
"""
import numpy as np
import pandas as pd
from pystatsm.utilities.random import exact_rmvnorm, vine_corr
from pystatsm.pyglm.betareg import BetaReg, LogitLink, LogLink
from pystatsm.pyglm.clm import CLM
from pystatsm.pyglm.glm import (GLM, Binomial, Poisson, Gamma, InverseGaussian, PowerLink,
Gaussian, IdentityLink)
from pystatsm.pyglm.nb2 import NegativeBinomial
from pystatsm.pyglm.zimodels import ZIP
from pystatsm.utilities.numerical_derivs import fo_fc_cd, so_gc_cd
def test_betareg():
SEED = 1234
rng = np.random.default_rng(SEED)
n_obs = 10_000
X = exact_rmvnorm(np.eye(4)/100, n=n_obs, seed=SEED)
Z = exact_rmvnorm(np.eye(2)/100, n=n_obs, seed=SEED)
betam = np.array([4.0, 1.0, -1.0, -2.0])
betas = np.array([2.0, -2.0])
etam, etas = X.dot(betam)+1.0, 2+Z.dot(betas)#np.tanh(Z.dot(betas))/2.0 + 2.4
mu, phi = LogitLink().inv_link(etam), LogLink().inv_link(etas)
a = mu * phi
b = (1.0 - mu) * phi
y = rng.beta(a, b)
xcols = [f"x{i}" for i in range(1, 4+1)]
zcols = [f"z{i}" for i in range(1, 2+1)]
data = pd.DataFrame(np.hstack((X, Z)), columns=xcols+zcols)
data["y"] = y
m_formula = "y~1+"+"+".join(xcols)
s_formula = "y~1+"+"+".join(zcols)
model = BetaReg(m_formula=m_formula, s_formula=s_formula, data=data)
model.fit()
theta = np.array([0.99819859, 3.92262116, 1.02091902, -0.98526682, -1.9795528,
1.98535573, 2.06533661, -2.06805411])
assert(np.allclose(model.theta, theta))
g1 = fo_fc_cd(model.loglike, model.theta*0.95)
g2 = model.gradient(model.theta*0.95)
assert(np.allclose(g1, g2))
H1 = so_gc_cd(model.gradient, model.theta)
H2 = model.hessian(model.theta)
assert(np.allclose(H1, H2))
assert(model.optimizer.success==True)
assert((np.abs(model.optimizer.grad)<1e-5).all())
def test_clm():
SEED = 1234
rng = np.random.default_rng(SEED)
n_obs, n_var, rsquared = 10_000, 8, 0.25
S = np.eye(n_var)
X = exact_rmvnorm(S, n=n_obs, seed=1234)
beta = np.zeros(n_var)
beta[np.arange(n_var//2)] = rng.choice([-1., 1., -0.5, 0.5], n_var//2)
var_names = [f"x{i}" for i in range(1, n_var+1)]
eta = X.dot(beta)
eta_var = eta.var()
scale = np.sqrt((1.0 - rsquared) / rsquared * eta_var)
y = rng.normal(eta, scale=scale)
df = pd.DataFrame(X, columns=var_names)
df["y"] = pd.qcut(y, 7).codes
formula = "y~-1+"+"+".join(var_names)
model = CLM(frm=formula, data=df)
model.fit()
theta = np.array([-2.08417224, -1.08288221, -0.34199706, 0.34199368, 1.08217316,
2.08327387, 0.37275823, 0.37544884, 0.3572407 , 0.71165265,
0.0086888 , -0.00846944, 0.00975741, 0.01257564])
assert(np.allclose(theta, model.params))
params_init, params = model.params_init.copy(), model.params.copy()
tol = np.finfo(float).eps**(1/3)
np.allclose(model.gradient(params_init), fo_fc_cd(model.loglike, params_init))
np.allclose(model.gradient(params), fo_fc_cd(model.loglike, params), atol=tol)
np.allclose(model.hessian(params_init), so_gc_cd(model.gradient, params_init))
np.allclose(model.hessian(params), so_gc_cd(model.gradient, params))
def test_glm():
seed = 1234
rng = np.random.default_rng(seed)
response_dists = ['Gaussian', 'Binomial', 'Poisson', 'Gamma', "InvGauss"]
n_obs, nx, r = 2000, 10, 0.5
n_true = nx//4
R = vine_corr(nx, 10, seed=seed)
X = {}
X1 = exact_rmvnorm(R, n_obs, seed=seed)
X2 = exact_rmvnorm(R, n_obs, seed=seed)
X2 = X2 - np.min(X2, axis=0) + 0.1
X['Gaussian'] = X1.copy()
X['Binomial'] = X1.copy()
X['Poisson'] = X1.copy()
X['Gamma'] = X1.copy()
X['InvGauss'] = X2.copy()
beta = dict(Binomial=np.zeros(nx), Poisson=np.zeros(nx), Gamma=np.zeros(nx),
Gaussian=np.zeros(nx), InvGauss=np.zeros(nx))
beta['Gaussian'][:n_true*2] = np.concatenate((0.5*np.ones(n_true), -0.5*np.ones(n_true)))
beta['Binomial'][:n_true*2] = np.concatenate((0.5*np.ones(n_true), -0.5*np.ones(n_true)))
beta['Poisson'][:n_true*2] = np.concatenate((0.5*np.ones(n_true), -0.5*np.ones(n_true)))
beta['Gamma'][:n_true*2] = np.concatenate((0.1*np.ones(n_true), -0.1*np.ones(n_true)))
beta['InvGauss'][:n_true*2] = np.concatenate((0.1*np.ones(n_true), 0.1*np.ones(n_true)))
for dist in response_dists:
beta[dist] = beta[dist][rng.choice(nx, nx, replace=False)]
eta = {}
eta_var = {}
u_var = {}
u = {}
linpred = {}
for dist in response_dists:
eta[dist] = X[dist].dot(beta[dist])
eta_var[dist] = eta[dist].var()
u_var[dist] = np.sqrt(eta_var[dist] * (1.0 - r) / r)
u[dist] = rng.normal(0, u_var[dist], size=(n_obs))
linpred[dist] = u[dist]+eta[dist]
if dist in ['InvGauss']:
linpred[dist] -= linpred[dist].min()
linpred[dist] += 0.01
Y = {}
Y['Gaussian'] = IdentityLink().inv_link(linpred['Gaussian'])
Y['Binomial'] = rng.binomial(n=10, p=LogitLink().inv_link(linpred['Binomial']))/10.0
Y['Poisson'] = rng.poisson(lam=LogLink().inv_link(linpred['Poisson']))
Y['Gamma'] = rng.gamma(shape=LogLink().inv_link(linpred['Gamma']), scale=3.0)
Y['InvGauss'] = rng.wald(mean=PowerLink(-2).inv_link(eta['InvGauss']), scale=2.0)
data = {}
formula = "y~"+"+".join([f"x{i}" for i in range(1, nx+1)])
for dist in response_dists:
data[dist] = pd.DataFrame(np.hstack((X[dist], Y[dist].reshape(-1, 1))),
columns=[f'x{i}' for i in range(1, nx+1)]+['y'])
models = {}
models['Gaussian'] = GLM(formula=formula, data=data['Gaussian'], fam=Gaussian(), scale_estimator='NR')
models['Binomial'] = GLM(formula=formula, data=data['Binomial'], fam=Binomial(weights=np.ones(n_obs)*10.0))
models['Poisson'] = GLM(formula=formula, data=data['Poisson'], fam=Poisson())
models['Gamma'] = GLM(formula=formula, data=data['Gamma'], fam=Gamma())
models['Gamma2'] = GLM(formula=formula, data=data['Gamma'], fam=Gamma(), scale_estimator='NR')
models['InvGauss'] = GLM(formula=formula, data=data['InvGauss'], fam=InverseGaussian(), scale_estimator='NR')
models['Gaussian'].fit()
models['Binomial'].fit()
models['Poisson'].fit()
models['Gamma'].fit()
models['Gamma2'].fit()
models['InvGauss'].fit()
grad_conv = {}
grad_conv["Gaussian"] = np.mean(models['Gaussian'].optimizer.grad**2)<1e-6
grad_conv["Binomial"] = np.mean(models['Binomial'].optimizer.grad**2)<1e-6
grad_conv["Poisson"] = np.mean(models['Poisson'].optimizer.grad**2)<1e-6
grad_conv["Gamma"] = models['Gamma'].optimizer['|g|'][-1]<1e-6
grad_conv["Gamma2"] = models['Gamma2'].optimizer['|g|'][-1]<1e-6
grad_conv["InvGauss"] = np.mean(models['InvGauss'].optimizer.grad**2)<1e-6
assert(np.all(grad_conv.values()))
param_vals = {}
param_vals["Gaussian"] = np.array([0.01677157, 0.01768816, 0.03232757, -0.50586418, 0.00538817,
0.01215466, 0.46273009, 0.03222982, 0.51013559, -0.00482659,
-0.44925714, -0.08297647])
param_vals["Binomial"] = np.array([-0.04811123, 0.34608258, 0.02748488, 0.02109192, -0.35403311,
0.37825192, -0.46275101, 0.00668586, 0.06837819, 0.00136615,
0.00321255])
param_vals["Poisson"] = np.array([ 0.78523498, -0.52630851, -0.0407732 , 0.02971785, -0.03919242,
-0.01845692, 0.34397533, -0.55594235, 0.0257876 , 0.42205263,
0.13051603])
param_vals["Gamma"] = np.array([ 0.33020564, -0.00496934, -0.01392126, 0.03581743, -0.01186388,
0.03645015, -0.00609281, -0.01056508, 0.00163984, -0.03324063,
-0.00937269])
param_vals["Gamma2"] = np.array([ 0.33020564, -0.00496934, -0.01392126, 0.03581743, -0.01186388,
0.03645015, -0.00609281, -0.01056508, 0.00163984, -0.03324063,
-0.00937269, 0.09260053])
param_vals["InvGauss"] = np.array([ 0.51658718, -0.03040851, 0.14254292, 0.10087636, 0.05071923,
-0.05297573, -0.04039982, -0.04293772, 0.1251764 , -0.02370386,
0.01912702, -0.66386179])
param_close = {}
grad_close = {}
hess_close = {}
for key in param_vals.keys():
m = models[key]
param_close[key] = np.allclose(param_vals[key], m.params)
x = m.params * 0.98
grad_close[key] = np.allclose(fo_fc_cd(m.loglike, x), m.gradient(x))
hess_close[key] = np.allclose(so_gc_cd(m.gradient, x), m.hessian(x))
assert(np.all(param_close.values()))
assert(np.all(grad_conv.values()))
assert(np.all(grad_close.values()))
assert(np.all(hess_close.values()))
def test_nb2():
seed = 1234
rng =
|
np.random.default_rng(seed)
|
numpy.random.default_rng
|
import numpy as np
import pandas as pd
import time, os, glob
import cv2
from keras.applications.vgg16 import VGG16
from keras.optimizers import Adam
from keras.layers import GlobalAveragePooling2D, Dense
from keras import Model
from keras.applications.imagenet_utils import preprocess_input
def get_rotnet(num_class, input_size):
base_model = VGG16(weights='imagenet', include_top=False,
input_shape=[input_size,input_size,3], classes=num_class)
x = base_model.get_layer('block5_pool').output
x = GlobalAveragePooling2D()(x)
x = Dense(4, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=x)
optimizer = Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
return model
def randomHueSaturationValue(image, hue_shift_limit=(-180, 180),
sat_shift_limit=(-255, 255),
val_shift_limit=(-255, 255), u=0.5):
if np.random.random() < u:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(image)
hue_shift = np.random.uniform(hue_shift_limit[0], hue_shift_limit[1])
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(sat_shift_limit[0], sat_shift_limit[1])
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(val_shift_limit[0], val_shift_limit[1])
v = cv2.add(v, val_shift)
image = cv2.merge((h, s, v))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def randomShiftScaleRotate(image,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-45, 45), aspect_limit=(0, 0),
borderMode=cv2.BORDER_CONSTANT, u=0.5):
if np.random.random() < u:
height, width, channel = image.shape
angle = np.random.uniform(rotate_limit[0], rotate_limit[1]) # degree
scale = np.random.uniform(1 + scale_limit[0], 1 + scale_limit[1])
aspect = np.random.uniform(1 + aspect_limit[0], 1 + aspect_limit[1])
sx = scale * aspect / (aspect ** 0.5)
sy = scale / (aspect ** 0.5)
dx = round(np.random.uniform(shift_limit[0], shift_limit[1]) * width)
dy = round(np.random.uniform(shift_limit[0], shift_limit[1]) * height)
cc = np.math.cos(angle / 180 * np.math.pi) * sx
ss = np.math.sin(angle / 180 * np.math.pi) * sy
rotate_matrix =
|
np.array([[cc, -ss], [ss, cc]])
|
numpy.array
|
import numpy as np
from reservoirlib.esn import ArcTanh
from reservoirlib.esn import InvertedSigmoid
class BenchmarkExperiment:
"""
Provides a framework for running an experiment using various tasks
"""
def __init__(self, esn, task, trainer, num_training_trials,
invert_target_of_training=False):
"""
:param esn: an object that can be trained on task data
esn should conform to the BaseESN interface
:param task: an object that will generate input and target time-series
task should conform the BaseTask interface
:param trainer: a function that takes as arguments
a target and model numpy array and returns a set of parameters
:param num_training_trials: number of trials to run the task
:param invert_target_of_training: whether to invert the target signal
for training. This is usually True if ESN output layer has tanh or
sigmoid neurons and you are using least squared regression. Else
keep false. Default: False
"""
self.esn = esn
self.task = task
self.trainer = trainer
self.num_training_trials = num_training_trials
self.invert_target_of_training = invert_target_of_training
def train_model(self):
"""
Generates input and target signals and runs the ESN's training algorithm
:return: None
"""
# Generate data for training
input_trials = [None for i in range(self.num_training_trials)]
target_trials = [None for i in range(self.num_training_trials)]
history_trials = [None for i in range(self.num_training_trials)]
input_cuts = [None for i in range(self.num_training_trials)]
output_cuts = [None for i in range(self.num_training_trials)]
for i in range(self.num_training_trials):
input_trials[i], target_trials[i], input_cuts[i], output_cuts[i] = \
self.task.generate_signal()
input_trials[i] = np.expand_dims(input_trials[i], axis=2)
target_trials[i] = np.expand_dims(target_trials[i], axis=2)
self.esn.run(input_trials[i], record=True, output=False)
history_trials[i] = np.zeros((input_trials[i].shape[0],
self.esn.num_neurons, 1),
dtype=self.esn.dtype)
history_trials[i][:] = self.esn.history
self.esn.reset()
# stack trial data
stacked_target = stack_targets(target_trials, output_cuts, self.esn.dtype)
stacked_history = stack_history(input_trials, history_trials, input_cuts,
self.esn.dtype)
# invert target stack data if applicable
if self.invert_target_of_training:
invert_target_array(stacked_target, self.esn.output_type,
self.esn.output_neuron_pars)
# train on data
solution = self.trainer(stacked_history, stacked_target)
# call set weights for esn
self.esn.set_output_weights(solution)
def evaluate_model(self):
"""
Task specific validation runs
:return: The task specific validation output
"""
input_signal, target_output, in_cut, out_cut = self.task.generate_signal()
prediction = self.esn.run(
|
np.expand_dims(input_signal, axis=2)
|
numpy.expand_dims
|
"""
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
import numpy as np
from numpy.core.numeric import normalize_axis_tuple
from numpy.core.overrides import array_function_dispatch, set_module
__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
class DummyArray:
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
"""
Create a view into the array with the given shape and strides.
.. warning:: This function has to be used with extreme care, see notes.
Parameters
----------
x : ndarray
Array to create a new.
shape : sequence of int, optional
The shape of the new array. Defaults to ``x.shape``.
strides : sequence of int, optional
The strides of the new array. Defaults to ``x.strides``.
subok : bool, optional
.. versionadded:: 1.10
If True, subclasses are preserved.
writeable : bool, optional
.. versionadded:: 1.12
If set to False, the returned array will always be readonly.
Otherwise it will be writable if the original array was. It
is advisable to set this to False if possible (see Notes).
Returns
-------
view : ndarray
See also
--------
broadcast_to : broadcast an array to a given shape.
reshape : reshape an array.
lib.stride_tricks.sliding_window_view :
userfriendly and safe function for the creation of sliding window views.
Notes
-----
``as_strided`` creates a view into the array given the exact strides
and shape. This means it manipulates the internal data structure of
ndarray and, if done incorrectly, the array elements can point to
invalid memory and can corrupt results or crash your program.
It is advisable to always use the original ``x.strides`` when
calculating new strides to avoid reliance on a contiguous memory
layout.
Furthermore, arrays created with this function often contain self
overlapping memory, so that two elements are identical.
Vectorized write operations on such arrays will typically be
unpredictable. They may even give different results for small, large,
or transposed arrays.
Since writing to these arrays has to be tested and done with great
care, you may want to use ``writeable=False`` to avoid accidental write
operations.
For these reasons it is advisable to avoid ``as_strided`` when
possible.
"""
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=False, subok=subok)
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# The route via `__interface__` does not preserve structured
# dtypes. Since dtype should remain unchanged, we set it explicitly.
array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
if view.flags.writeable and not writeable:
view.flags.writeable = False
return view
def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
subok=None, writeable=None):
return (x,)
@
|
array_function_dispatch(_sliding_window_view_dispatcher)
|
numpy.core.overrides.array_function_dispatch
|
from configparser import ConfigParser
from glob import glob
import os
from astropy.table import Table
from astropy.io import fits
from astropy.io import ascii
import numpy as np
class DitherSequence:
"""Access class to dithering sequence data from nightwatch or redux
files."""
def __init__(self, inifile, dry_run, output):
"""Parse a configuration file in INI format.
Parameters
----------
inifile : str
Name of INI file with configuration data.
dry_run : bool
If true, do not process input files.
output : str
Name of output file (FITS format).
"""
config = ConfigParser()
config.read(inifile)
sequence = config['dithersequence']
# Set up the output.
self._output = output
# Set up the file type and exposure sequence.
self._location = sequence['location']
self._filetype = sequence['filetype']
self._date = sequence['date']
self._exposures = [int(e) for e in sequence['exposures'].split()]
if 'coordinates' not in config:
raise ValueError('no coordinates set for dither!')
coords = config['coordinates']
self._dithertype = coords['dithertype']
self._wcs = fits.getdata(coords['wcsfile'], 2)
self._wcs = self._wcs[np.argsort(self._wcs['mjd_obs'])]
self._central_exposure = int(sequence['centralexposure'])
if coords['dithertype'] == 'telescope':
fadir = coords['fiberassigndir']
self._ditherfa = fits.getdata(os.path.join(
fadir, 'fiberassign-%s.fits' % coords['ditheredtilenum']))
self._unditherfa = fits.getdata(os.path.join(
fadir, 'fiberassign-%s.fits' % coords['unditheredtilenum']))
expnum = [int(fn.split('-')[1]) for fn in self._wcs['filename']]
centralind = expnum.index(self._central_exposure)
self._central_wcs = self._wcs[centralind]
# Set the Tile ID for the output metadata.
self._tileid = coords['unditheredtilenum']
else:
raise ValueError('not implemented')
# Extract the list of exposures on disk.
self._exposure_files = self._getfilenames()
if not dry_run:
# Construct fiber output.
self._exposure_table = self._buildtable()
def _getfilenames(self):
"""Return a list of exposures and filenames given an INI configuration.
Returns
-------
exfiles : dict
Dictionary of exposure numbers and corresponding nightwatch
qcframe or redux sframe FITS files.
"""
# Set up the path and file prefix depending on the filetype.
if self._filetype == 'nightwatch':
fileprefix = 'qcframe'
if self._location == 'nersc':
prefix = '/global/project/projectdirs/desi/spectro/nightwatch/kpno'
elif self._location == 'kpno':
prefix = '/exposures/desi' # not correct path!
else:
raise ValueError('Unknown location {}'.format(self._location))
elif self._filetype == 'redux':
fileprefix = 'sframe'
if self._location == 'nersc':
prefix = '/global/project/projectdirs/desi/spectro/redux/daily/exposures'
elif self._location == 'kpno':
prefix = '/exposures/desi' # not correct path!
else:
raise ValueError('Unknown location {}'.format(self._location))
else:
raise ValueError('Unknown file type {}'.format(self._filetype))
# Find the exposures files.
exfiles = {}
for ex in self._exposures:
folder = '{}/{}/{:08d}'.format(prefix, self._date, ex)
files = sorted(glob('{}/{}*.fits'.format(folder, fileprefix)))
exfiles[ex] = files
return exfiles
def _buildtable(self):
"""Loop through the exposure list and construct an observation
table."""
tabrows = []
for i, (expid, exfiles) in enumerate(self._exposure_files.items()):
specflux_b, specflux_r, specflux_z = [], [], []
tab = None
if len(exfiles) == 0:
continue
print(expid)
for exfile in exfiles:
print(exfile)
hdu = fits.open(exfile)
# The following tables are present in the redux sframes and the
# nightwatch qcframes.
wave = hdu['WAVELENGTH'].data
# However, in the nightwatch files the wavelength data are a
# table of size nfiber x nwavelength.
if self._filetype == 'nightwatch':
if wave.ndim > 1:
wave = wave[0]
fluxhead = hdu['FLUX'].header
fluxdata = hdu['FLUX'].data
ivardata = hdu['IVAR'].data
fibermap = hdu['FIBERMAP'].data
exptime = fluxhead['EXPTIME']
if not np.all(self._unditherfa['FIBER'] ==
np.arange(len(self._unditherfa))):
raise ValueError('weird fiberassign file format!')
fibermap = self._unditherfa[fibermap['FIBER']]
target_id = fibermap['TARGETID']
target_ra = fibermap['TARGET_RA']
target_dec = fibermap['TARGET_DEC']
fiber = fibermap['FIBER']
objtype = fibermap['OBJTYPE']
flux_g = fibermap['FLUX_G']
flux_r = fibermap['FLUX_R']
flux_z = fibermap['FLUX_Z']
x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]
camera = fluxhead['CAMERA'][0].upper()
if getattr(self, '_deltara', None) is not None:
dra = self._deltara[i]*np.ones(len(fiber))
ddec = self._deltadec[i]*np.ones(len(fiber))
elif self._dithertype == 'telescope':
dithra = self._ditherfa['target_ra']
dithdec = self._ditherfa['target_dec']
udithra = self._unditherfa['target_ra']
udithdec = self._unditherfa['target_dec']
ontarget = ((self._ditherfa['targetid'] ==
self._unditherfa['targetid']) &
(self._ditherfa['objtype'] == 'TGT'))
dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60
dfiberdec = (dithdec-udithdec)*60*60
if not np.all(self._ditherfa['FIBER'] ==
np.arange(len(self._ditherfa))):
raise ValueError('unexpected shape of dither file')
dfiberra[~ontarget] = np.nan
dfiberdec[~ontarget] = np.nan
dfiberra = dfiberra[fiber]
dfiberdec = dfiberdec[fiber]
wcs = self.lookup_wcs(fluxhead['MJD-OBS'])
centralwcs = self._central_wcs
if (~np.isfinite(centralwcs['cenra'][1]) or
~np.isfinite(centralwcs['cendec'][1])):
raise ValueError('central pointing ra/dec is NaN!')
dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])
dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))
dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]
dra = dfiberra + dtelra*60*60
ddec = dfiberdec + dteldec*60*60
if np.all(~np.isfinite(dra)):
print('warning: no good telescope offset for %s' %
exfile)
else:
raise ValueError('not implemented')
for j, fiber_id in enumerate(fiber):
flux = fluxdata[j]
ivar = ivardata[j]
if not np.any(ivar > 0):
specflux = 0
specflux_ivar = 0
else:
meanivar = np.mean(ivar[ivar > 0])
mask = ivar > meanivar / 100
specflux = np.trapz(flux*mask, wave)
specflux_ivar = 1./np.sum(ivar[mask]**-1)
# Schlegel: sum over correct wavelengths, all three
# filters, plus 11 pixel median filter to reject
# cosmics.
# will require being better about reading in
# the spectrographs together.
tabrows.append((expid, exptime,
target_id[j], target_ra[j], target_dec[j],
fiber[j], objtype[j],
flux_g[j], flux_r[j], flux_z[j],
specflux, specflux_ivar, camera,
dra[j], ddec[j],
x[j], y[j]))
tab = Table(rows=tabrows,
names=('EXPID', 'EXPTIME',
'TARGETID', 'TARGET_RA', 'TARGET_DEC',
'FIBER', 'OBJTYPE',
'FLUX_G', 'FLUX_R', 'FLUX_Z',
'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',
'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',
'XFOCAL', 'YFOCAL'),
meta={'EXTNAME' : 'DITHER',
'TILEID' : '{}'.format(self._tileid)})
return tab
def lookup_wcs(self, mjd):
# expfn = self._exposure_files[expnum]
# mjd = fits.getheader(expfn)['MJD-OBS']
ind = np.searchsorted(self._wcs['mjd_obs'], mjd)
if ind >= len(self._wcs):
return np.array(((np.nan,)*3, (np.nan,)*3),
dtype=[('cenra', '3f8'), ('cendec', '3f8')])
twcs = self._wcs[ind]
if twcs['mjd_obs'] <= mjd:
raise ValueError('Something confusing with wcs list')
return twcs
def save(self, filename=None, overwrite=True):
"""Save exposure table to a FITS file.
Parameters
----------
filename : str
Output filename. If none, use default output class member.
overwrite : bool
If true, clobber an existing file with the same name.
"""
if filename is None:
filename = self._output
self._exposure_table.write(filename, overwrite=overwrite)
def rearrange_table(self):
exps = np.sort(np.unique(self._exposure_table['EXPID']))
nexp = len(exps)
nfiber = 5000
camera =
|
np.unique(self._exposure_table['CAMERA'])
|
numpy.unique
|
"""Test module for memory operations."""
import numpy
import pytest
from concrete.common.data_types import UnsignedInteger
from concrete.common.values import EncryptedTensor
from concrete.numpy import compile_numpy_function
@pytest.mark.parametrize(
"function,parameters,inputset,test_input,expected_output",
[
pytest.param(
lambda x: x.flatten(),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3, 2)),
},
[numpy.random.randint(0, 2 ** 4, size=(3, 2)) for _ in range(10)],
[[0, 1], [1, 2], [2, 3]],
[0, 1, 1, 2, 2, 3],
),
pytest.param(
lambda x: x.flatten(),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 4, 5, 6)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 4, 5, 6)) for _ in range(10)],
(numpy.arange(720) % 10).reshape((2, 3, 4, 5, 6)),
(numpy.arange(720) % 10),
),
pytest.param(
lambda x: x.reshape((1, 3)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3,)),
},
[numpy.random.randint(0, 2 ** 4, size=(3,)) for _ in range(10)],
[5, 9, 1],
[[5, 9, 1]],
),
pytest.param(
lambda x: x.reshape((3, 1)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3,)),
},
[numpy.random.randint(0, 2 ** 4, size=(3,)) for _ in range(10)],
[5, 9, 1],
[[5], [9], [1]],
),
pytest.param(
lambda x: x.reshape((3, 2)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3, 2)),
},
[numpy.random.randint(0, 2 ** 4, size=(3, 2)) for _ in range(10)],
[[0, 1], [1, 2], [2, 3]],
[[0, 1], [1, 2], [2, 3]],
),
pytest.param(
lambda x: x.reshape((3, 2)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3)) for _ in range(10)],
[[0, 1, 1], [2, 2, 3]],
[[0, 1], [1, 2], [2, 3]],
),
pytest.param(
lambda x: x.reshape(-1),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3, 2)),
},
[numpy.random.randint(0, 2 ** 4, size=(3, 2)) for _ in range(10)],
[[0, 1], [1, 2], [2, 3]],
[0, 1, 1, 2, 2, 3],
),
pytest.param(
lambda x: x.reshape((2, 2, 3)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(4, 3)),
},
[numpy.random.randint(0, 2 ** 4, size=(4, 3)) for _ in range(10)],
(numpy.arange(12) % 10).reshape((4, 3)),
(numpy.arange(12) % 10).reshape((2, 2, 3)),
),
pytest.param(
lambda x: x.reshape((4, 3)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 2, 3)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 2, 3)) for _ in range(10)],
(numpy.arange(12) % 10).reshape((2, 2, 3)),
(numpy.arange(12) % 10).reshape((4, 3)),
),
pytest.param(
lambda x: x.reshape((3, 2, 2)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3, 4)),
},
[numpy.random.randint(0, 2 ** 4, size=(3, 4)) for _ in range(10)],
(numpy.arange(12) % 10).reshape((3, 4)),
(numpy.arange(12) % 10).reshape((3, 2, 2)),
),
pytest.param(
lambda x: x.reshape((3, 4)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(3, 2, 2)),
},
[numpy.random.randint(0, 2 ** 4, size=(3, 2, 2)) for _ in range(10)],
(numpy.arange(12) % 10).reshape((3, 2, 2)),
(numpy.arange(12) % 10).reshape((3, 4)),
),
pytest.param(
lambda x: x.reshape((5, 3, 2)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(6, 5)),
},
[numpy.random.randint(0, 2 ** 4, size=(6, 5)) for _ in range(10)],
(numpy.arange(30) % 10).reshape((6, 5)),
(numpy.arange(30) % 10).reshape((5, 3, 2)),
),
pytest.param(
lambda x: x.reshape((5, 6)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 5)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 5)) for _ in range(10)],
(numpy.arange(30) % 10).reshape((2, 3, 5)),
(numpy.arange(30) % 10).reshape((5, 6)),
),
pytest.param(
lambda x: x.reshape((6, 4, 30)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 4, 5, 6)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 4, 5, 6)) for _ in range(10)],
(numpy.arange(720) % 10).reshape((2, 3, 4, 5, 6)),
(numpy.arange(720) % 10).reshape((6, 4, 30)),
),
pytest.param(
lambda x: x.reshape((2, 60, 6)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 4, 5, 6)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 4, 5, 6)) for _ in range(10)],
(numpy.arange(720) % 10).reshape((2, 3, 4, 5, 6)),
(numpy.arange(720) % 10).reshape((2, 60, 6)),
),
pytest.param(
lambda x: x.reshape((6, 6, -1)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 2, 3, 4)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 2, 3, 4)) for _ in range(10)],
(numpy.arange(144) % 10).reshape((2, 3, 2, 3, 4)),
(numpy.arange(144) % 10).reshape((6, 6, -1)),
),
pytest.param(
lambda x: x.reshape((6, -1, 12)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 2, 3, 4)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 2, 3, 4)) for _ in range(10)],
(numpy.arange(144) % 10).reshape((2, 3, 2, 3, 4)),
(numpy.arange(144) % 10).reshape((6, -1, 12)),
),
pytest.param(
lambda x: x.reshape((-1, 18, 4)),
{
"x": EncryptedTensor(UnsignedInteger(4), shape=(2, 3, 2, 3, 4)),
},
[numpy.random.randint(0, 2 ** 4, size=(2, 3, 2, 3, 4)) for _ in range(10)],
(
|
numpy.arange(144)
|
numpy.arange
|
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class Test_cutout(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked(self, mock_inplace):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
mock_inplace.return_value = "foo"
rng = iarandom.RNG(0)
image_aug = iaa.cutout(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="gaussian",
cval=1,
fill_per_channel=0.5,
seed=rng)
assert mock_inplace.call_count == 1
assert image_aug == "foo"
args = mock_inplace.call_args_list[0][0]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(args[1], 10)
assert np.isclose(args[2], 20)
assert np.isclose(args[3], 30)
assert np.isclose(args[4], 40)
assert args[5] == "gaussian"
assert args[6] == 1
assert np.isclose(args[7], 0.5)
assert args[8] is rng
class Test_cutout_(unittest.TestCase):
def test_with_simple_image(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
mask = np.zeros(image.shape, dtype=bool)
mask[20:40, 10:30, :] = True
overlap_inside = np.sum(image_aug[mask] == 0) / np.sum(mask)
overlap_outside = np.sum(image_aug[~mask] > 0) / np.sum(~mask)
assert image_aug is image
assert overlap_inside >= 1.0 - 1e-4
assert overlap_outside >= 1.0 - 1e-4
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_constant_")
def test_fill_mode_constant_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("constant", mock_fill)
@mock.patch("imgaug.augmenters.arithmetic._fill_rectangle_gaussian_")
def test_fill_mode_gaussian_mocked(self, mock_fill):
self._test_with_fill_mode_mocked("gaussian", mock_fill)
@classmethod
def _test_with_fill_mode_mocked(cls, fill_mode, mock_fill):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
mock_fill.return_value = image
seed = iarandom.RNG(0)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=40,
fill_mode=fill_mode,
cval=0,
fill_per_channel=False,
seed=seed)
assert mock_fill.call_count == 1
args = mock_fill.call_args_list[0][0]
kwargs = mock_fill.call_args_list[0][1]
assert image_aug is image
assert args[0] is image
assert kwargs["x1"] == 10
assert kwargs["y1"] == 20
assert kwargs["x2"] == 30
assert kwargs["y2"] == 40
assert kwargs["cval"] == 0
assert kwargs["per_channel"] is False
assert kwargs["random_state"] is seed
def test_zero_height(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=30,
y2=20,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_zero_height_width(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=10,
y1=20,
x2=10,
y2=40,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_fully_outside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-50,
y1=150,
x2=-1,
y2=200,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.array_equal(image_aug, image_cp)
def test_position_outside_of_image_rect_partially_inside(self):
image = np.mod(np.arange(100*100*3), 255).astype(np.uint8).reshape(
(100, 100, 3))
image = 1 + image
image_aug = iaa.cutout_(image,
x1=-25,
y1=-25,
x2=25,
y2=25,
fill_mode="constant",
cval=0,
fill_per_channel=False,
seed=None)
assert np.all(image_aug[0:25, 0:25] == 0)
assert np.all(image_aug[0:25, 25:] > 0)
assert np.all(image_aug[25:, :] > 0)
def test_zero_sized_axes(self):
shapes = [(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(0, 1, 1),
(1, 1, 0),
(1, 0, 1),
(1, 0),
(0, 1),
(0, 0)]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
image_cp = np.copy(image)
image_aug = iaa.cutout_(image,
x1=-5,
y1=-5,
x2=5,
y2=5,
fill_mode="constant",
cval=0)
assert np.array_equal(image_aug, image_cp)
class Test_fill_rectangle_gaussian_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=20,
x2=60,
y2=70,
cval=0,
per_channel=False,
random_state=rng)
assert np.array_equal(image_aug[:20, :],
image_cp[:20, :])
assert not np.array_equal(image_aug[20:70, 10:60],
image_cp[20:70, 10:60])
assert np.isclose(np.average(image_aug[20:70, 10:60]), 127.5,
rtol=0, atol=5.0)
assert np.isclose(np.std(image_aug[20:70, 10:60]), 255.0/2.0/3.0,
rtol=0, atol=2.5)
def test_per_channel(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=False,
random_state=iarandom.RNG(0))
image_aug_pc = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
diff11 = (image_aug[..., 0] != image_aug[..., 1])
diff12 = (image_aug[..., 0] != image_aug[..., 2])
diff21 = (image_aug_pc[..., 0] != image_aug_pc[..., 1])
diff22 = (image_aug_pc[..., 0] != image_aug_pc[..., 2])
assert not np.any(diff11)
assert not np.any(diff12)
assert np.any(diff21)
assert np.any(diff22)
def test_deterministic_with_same_seed(self):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)), (1, 1, 3))
image_aug_pc1 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc2 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
image_aug_pc3 = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(1))
assert np.array_equal(image_aug_pc1, image_aug_pc2)
assert not np.array_equal(image_aug_pc2, image_aug_pc3)
def test_no_channels(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = image.reshape((1, 10))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=per_channel,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 3, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(nb_channels=nb_channels,
per_channel=per_channel):
image = np.uint8([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
image = np.tile(image.reshape((1, 10, 1)),
(1, 1, nb_channels))
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
np.copy(image),
x1=0, y1=0, x2=10, y2=1,
cval=0,
per_channel=True,
random_state=iarandom.RNG(0))
assert not np.array_equal(image_aug, image)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image,
x1=10,
y1=10,
x2=300-10,
y2=300-10,
cval=0,
per_channel=per_channel,
random_state=rng)
rect = image_aug[10:-10, 10:-10]
p_true = np.sum(rect) / rect.size
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect, image_cp[10:-10, 10:-10])
assert np.isclose(p_true, 0.5, rtol=0, atol=0.1)
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_int_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
dynamic_range = int(max_value) - int(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
assert not np.array_equal(rect,
image_cp[10:-10, 10:-10])
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.array_equal(image_aug[..., 0],
image_aug[..., c])
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
min_value = 0.0
center_value = 0.5
max_value = 1.0
dynamic_range = np.float128(max_value) - np.float128(min_value)
gaussian_min = iarandom.RNG(0).normal(min_value, 0.0001,
size=(1,))
gaussian_max = iarandom.RNG(0).normal(max_value, 0.0001,
size=(1,))
assert min_value - 1.0 <= gaussian_min <= min_value + 1.0
assert max_value - 1.0 <= gaussian_max <= max_value + 1.0
for per_channel in [False, True]:
with self.subTest(dtype=dtype, per_channel=per_channel):
# dont generate image from choice() here, that seems
# to not support uint64 (max value not in result)
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert np.any(np.isclose(image, min_value,
rtol=0, atol=1e-4))
assert np.any(np.isclose(image, max_value,
rtol=0, atol=1e-4))
image_cp = np.copy(image)
rng = iarandom.RNG(0)
image_aug = arithmetic_lib._fill_rectangle_gaussian_(
image, x1=10, y1=10, x2=300-10, y2=300-10,
cval=0, per_channel=per_channel, random_state=rng)
rect = image_aug[10:-10, 10:-10]
mean = np.average(np.float128(rect))
std = np.std(np.float128(rect) - center_value)
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
assert not np.allclose(rect, image_cp[10:-10, 10:-10],
rtol=0, atol=1e-4)
assert np.isclose(mean, center_value, rtol=0,
atol=0.05*dynamic_range)
assert np.isclose(std, dynamic_range/2.0/3.0, rtol=0,
atol=0.05*dynamic_range/2.0/3.0)
assert np.min(rect) < min_value + 0.2 * dynamic_range
assert np.max(rect) > max_value - 0.2 * dynamic_range
if per_channel:
for c in np.arange(1, image.shape[2]):
assert not np.allclose(image_aug[..., 0],
image_aug[..., c],
rtol=0, atol=1e-4)
class Test_fill_rectangle_constant_(unittest.TestCase):
def test_simple_image(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_but_per_channel_is_false(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=False, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_iterable_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 25)
def test_iterable_cval_with_per_channel_is_true_channel_mismatch(self):
image = np.mod(np.arange(100*100*5), 256).astype(np.uint8).reshape(
(100, 100, 5))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 21)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
assert np.all(image_aug[20:70, 10:60, 3] == 21)
assert np.all(image_aug[20:70, 10:60, 4] == 17)
def test_single_cval_with_per_channel_is_true(self):
image = np.mod(np.arange(100*100*3), 256).astype(np.uint8).reshape(
(100, 100, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=True, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60, 0] == 17)
assert np.all(image_aug[20:70, 10:60, 1] == 17)
assert np.all(image_aug[20:70, 10:60, 2] == 17)
def test_no_channels_single_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=17, per_channel=per_channel, random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_no_channels_iterable_cval(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100), 256
).astype(np.uint8).reshape((100, 100))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21, 25], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
assert np.all(image_aug[20:70, 10:60] == 17)
def test_unusual_channel_numbers(self):
for nb_channels in [1, 2, 4, 5, 511, 512, 513]:
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.mod(
np.arange(100*100*nb_channels), 256
).astype(np.uint8).reshape((100, 100, nb_channels))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=20, x2=60, y2=70,
cval=[17, 21], per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:20, :], image_cp[:20, :])
if per_channel:
for c in np.arange(nb_channels):
val = 17 if c % 2 == 0 else 21
assert np.all(image_aug[20:70, 10:60, c] == val)
else:
assert np.all(image_aug[20:70, 10:60, :] == 17)
def test_other_dtypes_bool(self):
for per_channel in [False, True]:
with self.subTest(per_channel=per_channel):
image = np.array([0, 1], dtype=bool)
image = np.tile(image, (int(3*300*300/2),))
image = image.reshape((300, 300, 3))
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[0, 1], per_channel=per_channel,
random_state=None)
rect = image_aug[10:-10, 10:-10]
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0] == 0)
assert np.all(image_aug[10:-10, 10:-10, 1] == 1)
assert np.all(image_aug[10:-10, 10:-10, 2] == 0)
else:
assert np.all(image_aug[20:70, 10:60] == 0)
def test_other_dtypes_uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
assert min_value in image
assert max_value in image
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert np.array_equal(image_aug[:10, :], image_cp[:10, :])
if per_channel:
assert np.all(image_aug[10:-10, 10:-10, 0]
== min_value)
assert np.all(image_aug[10:-10, 10:-10, 1]
== 10)
assert np.all(image_aug[10:-10, 10:-10, 2]
== max_value)
else:
assert np.all(image_aug[-10:-10, 10:-10] == min_value)
def test_other_dtypes_float(self):
dtypes = ["float16", "float32", "float64", "float128"]
for dtype in dtypes:
for per_channel in [False, True]:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
with self.subTest(dtype=dtype, per_channel=per_channel):
image = np.array([min_value, min_value+1,
int(center_value),
max_value-1, max_value], dtype=dtype)
image = np.tile(image, (int(3*300*300/5),))
image = image.reshape((300, 300, 3))
# Use this here instead of any(isclose(...)) because
# the latter one leads to overflow warnings.
assert image.flat[0] <= np.float128(min_value) + 1.0
assert image.flat[4] >= np.float128(max_value) - 1.0
image_cp = np.copy(image)
image_aug = arithmetic_lib._fill_rectangle_constant_(
image,
x1=10, y1=10, x2=300-10, y2=300-10,
cval=[min_value, 10, max_value],
per_channel=per_channel,
random_state=None)
assert image_aug.dtype.name == dtype
assert np.allclose(image_aug[:10, :], image_cp[:10, :],
rtol=0, atol=1e-4)
if per_channel:
assert np.allclose(image_aug[10:-10, 10:-10, 0],
np.float128(min_value),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 1],
np.float128(10),
rtol=0, atol=1e-4)
assert np.allclose(image_aug[10:-10, 10:-10, 2],
np.float128(max_value),
rtol=0, atol=1e-4)
else:
assert np.allclose(image_aug[-10:-10, 10:-10],
np.float128(min_value),
rtol=0, atol=1e-4)
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestCutout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Cutout()
assert aug.nb_iterations.value == 1
assert isinstance(aug.position[0], iap.Uniform)
assert isinstance(aug.position[1], iap.Uniform)
assert np.isclose(aug.size.value, 0.2)
assert aug.squared.value == 1
assert aug.fill_mode.value == "constant"
assert aug.cval.value == 128
assert aug.fill_per_channel.value == 0
def test___init___custom(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
assert aug.nb_iterations.value == 1
assert np.isclose(aug.position[0].value, 0.5)
assert np.isclose(aug.position[1].value, 0.5)
assert np.isclose(aug.size.value, 0.1)
assert np.isclose(aug.squared.p.value, 0.6)
assert aug.fill_mode.a == ["gaussian", "constant"]
assert np.isclose(aug.cval.a.value, 0)
assert np.isclose(aug.cval.b.value, 255)
assert np.isclose(aug.fill_per_channel.p.value, 0.5)
def test___init___fill_mode_is_stochastic_param(self):
param = iap.Deterministic("constant")
aug = iaa.Cutout(fill_mode=param)
assert aug.fill_mode is param
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_false(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=False,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.2*30))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.2*30))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
@mock.patch("imgaug.augmenters.arithmetic.cutout_")
def test_mocked__squared_true(self, mock_apply):
aug = iaa.Cutout(nb_iterations=2,
position=(0.5, 0.6),
size=iap.DeterministicList([0.1, 0.2]),
squared=True,
fill_mode="gaussian",
cval=1,
fill_per_channel=True)
image = np.zeros((10, 30, 3), dtype=np.uint8)
# dont return image itself, otherwise the loop below will fail
# at its second iteration as the method is expected to handle
# internally a copy of the image and not the image itself
mock_apply.return_value = np.copy(image)
_ = aug(image=image)
assert mock_apply.call_count == 2
for call_idx in np.arange(2):
args = mock_apply.call_args_list[call_idx][0]
kwargs = mock_apply.call_args_list[call_idx][1]
assert args[0] is not image
assert np.array_equal(args[0], image)
assert np.isclose(kwargs["x1"], 0.5*30 - 0.5 * (0.1*10))
assert np.isclose(kwargs["y1"], 0.6*10 - 0.5 * (0.1*10))
assert np.isclose(kwargs["x2"], 0.5*30 + 0.5 * (0.1*10))
assert np.isclose(kwargs["y2"], 0.6*10 + 0.5 * (0.1*10))
assert kwargs["fill_mode"] == "gaussian"
assert np.array_equal(kwargs["cval"], [1, 1, 1])
assert np.isclose(kwargs["fill_per_channel"], 1.0)
assert isinstance(kwargs["seed"], iarandom.RNG)
def test_simple_image(self):
aug = iaa.Cutout(nb_iterations=2,
position=(
iap.DeterministicList([0.2, 0.8]),
iap.DeterministicList([0.2, 0.8])
),
size=0.2,
fill_mode="constant",
cval=iap.DeterministicList([0, 0, 0, 1, 1, 1]))
image = np.full((100, 100, 3), 255, dtype=np.uint8)
for _ in np.arange(3):
images_aug = aug(images=[image, image])
for image_aug in images_aug:
values = np.unique(image_aug)
assert len(values) == 3
assert 0 in values
assert 1 in values
assert 255 in values
def test_batch_contains_only_non_image_data(self):
aug = iaa.Cutout()
segmap_arr = np.ones((3, 3, 1), dtype=np.int32)
segmap = ia.SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
segmap_aug = aug.augment_segmentation_maps(segmap)
assert np.array_equal(segmap.get_arr(), segmap_aug.get_arr())
def test_sampling_when_position_is_stochastic_parameter(self):
# sampling of position works slightly differently when it is a single
# parameter instead of tuple (paramX, paramY), so we have an extra
# test for that situation here
param = iap.DeterministicList([0.5, 0.6])
aug = iaa.Cutout(position=param)
samples = aug._draw_samples([
np.zeros((3, 3, 3), dtype=np.uint8),
np.zeros((3, 3, 3), dtype=np.uint8)
], iarandom.RNG(0))
assert np.allclose(samples.pos_x, [0.5, 0.5])
assert np.allclose(samples.pos_y, [0.6, 0.6])
def test_by_comparison_to_official_implementation(self):
image = np.ones((10, 8, 2), dtype=np.uint8)
aug = iaa.Cutout(1, position="uniform", size=0.2, squared=True,
cval=0)
aug_official = _CutoutOfficial(n_holes=1, length=int(10*0.2))
dropped = np.zeros((10, 8, 2), dtype=np.int32)
dropped_official = np.copy(dropped)
height = np.zeros((10, 8, 2), dtype=np.int32)
width = np.copy(height)
height_official = np.copy(height)
width_official = np.copy(width)
nb_iterations = 3 * 1000
images_aug = aug(images=[image] * nb_iterations)
for image_aug in images_aug:
image_aug_off = aug_official(image)
mask = (image_aug == 0)
mask_off = (image_aug_off == 0)
dropped += mask
dropped_official += mask_off
ydrop = np.max(mask, axis=(2, 1))
xdrop = np.max(mask, axis=(2, 0))
wx = np.where(xdrop)
wy = np.where(ydrop)
x1 = wx[0][0]
x2 = wx[0][-1]
y1 = wy[0][0]
y2 = wy[0][-1]
ydrop_off = np.max(mask_off, axis=(2, 1))
xdrop_off = np.max(mask_off, axis=(2, 0))
wx_off = np.where(xdrop_off)
wy_off = np.where(ydrop_off)
x1_off = wx_off[0][0]
x2_off = wx_off[0][-1]
y1_off = wy_off[0][0]
y2_off = wy_off[0][-1]
height += (
np.full(height.shape, 1 + (y2 - y1), dtype=np.int32)
* mask)
width += (
np.full(width.shape, 1 + (x2 - x1), dtype=np.int32)
* mask)
height_official += (
np.full(height_official.shape, 1 + (y2_off - y1_off),
dtype=np.int32)
* mask_off)
width_official += (
np.full(width_official.shape, 1 + (x2_off - x1_off),
dtype=np.int32)
* mask_off)
dropped_prob = dropped / nb_iterations
dropped_prob_off = dropped_official / nb_iterations
height_avg = height / (dropped + 1e-4)
height_avg_off = height_official / (dropped_official + 1e-4)
width_avg = width / (dropped + 1e-4)
width_avg_off = width_official / (dropped_official + 1e-4)
prob_max_diff = np.max(np.abs(dropped_prob - dropped_prob_off))
height_avg_max_diff = np.max(np.abs(height_avg - height_avg_off))
width_avg_max_diff = np.max(np.abs(width_avg - width_avg_off))
assert prob_max_diff < 0.04
assert height_avg_max_diff < 0.3
assert width_avg_max_diff < 0.3
def test_determinism(self):
aug = iaa.Cutout(nb_iterations=(1, 3),
size=(0.1, 0.2),
fill_mode=["gaussian", "constant"],
cval=(0, 255))
image = np.mod(
np.arange(100*100*3), 256
).reshape((100, 100, 3)).astype(np.uint8)
sums = []
for _ in np.arange(10):
aug_det = aug.to_deterministic()
image_aug1 = aug_det(image=image)
image_aug2 = aug_det(image=image)
assert np.array_equal(image_aug1, image_aug2)
sums.append(np.sum(image_aug1))
assert len(np.unique(sums)) > 1
def test_get_parameters(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
params = aug.get_parameters()
assert params[0] is aug.nb_iterations
assert params[1] is aug.position
assert params[2] is aug.size
assert params[3] is aug.squared
assert params[4] is aug.fill_mode
assert params[5] is aug.cval
assert params[6] is aug.fill_per_channel
def test_pickleable(self):
aug = iaa.Cutout(
nb_iterations=1,
position=(0.5, 0.5),
size=0.1,
squared=0.6,
fill_mode=["gaussian", "constant"],
cval=(0, 255),
fill_per_channel=0.5
)
runtest_pickleable_uint8_img(aug)
# this is mostly copy-pasted cutout code from
# https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
# we use this to compare our implementation against
# we changed some pytorch to numpy stuff
class _CutoutOfficial(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
"""
def __init__(self, n_holes, length):
self.n_holes = n_holes
self.length = length
def __call__(self, img):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of
it.
"""
# h = img.size(1)
# w = img.size(2)
h = img.shape[0]
w = img.shape[1]
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
# note that in the paper they normalize to 0-mean,
# i.e. 0 here is actually not black but grayish pixels
mask[y1: y2, x1: x2] = 0
# mask = torch.from_numpy(mask)
# mask = mask.expand_as(img)
if img.ndim != 2:
mask = np.tile(mask[:, :, np.newaxis], (1, 1, img.shape[-1]))
img = img * mask
return img
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, seed=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img =
|
np.zeros((50, 50, 1), dtype=np.uint8)
|
numpy.zeros
|
import sys
sys.path.append('/Users/eshanking/repos')
import matplotlib.pyplot as plt
from cycler import cycler
import seaborn as sns
import numpy as np
import os
import math
import scipy.stats
from fears.utils import dir_manager, fitness
# from fears.utils.experiment_class import Experiment
from matplotlib.collections import LineCollection
import networkx as nx
from labellines import labelLine
def gen_color_cycler(style=None,palette='bright',n_colors=16):
if style is None:
colors = sns.color_palette(palette)
colors = np.concatenate((colors[0:9],colors[0:7]),axis=0)
# colors[[14,15]] = colors[[15,14]]
colors[[7,8]] = colors[[8,7]]
cc = (cycler(color=colors) +
cycler(linestyle=['-', '-','-','-','-','-','-','-','-',
'--','--','--','--','--','--','--']))
elif style == 'solid':
colors = sns.color_palette(palette,n_colors)
cc = cycler(color=colors)
return cc
def plot_timecourse(pop,counts_t=None,title_t=None):
if (pop.counts == 0).all() and counts_t is None:
print('No data to plot!')
return
elif counts_t is None:
counts = pop.counts
else:
counts = counts_t # an input other than pop overrides pop
if title_t is not None:
title = title_t
else:
title = pop.fig_title
left = 0.1
width = 0.8
if pop.plot_entropy == True:
fig,(ax1,ax3) = plt.subplots(2,1,figsize=(6,4),sharex=True)
ax3.set_position([left, 0.2, width, 0.2]) # ax3 is entropy
else:
fig,ax1 = plt.subplots(1,1,figsize=(6,4),sharex=True)
ax1.set_position([left, 0.5, width, 0.6]) # ax1 is the timecourse
counts_total = np.sum(counts,axis=0)
sorted_index = counts_total.argsort()
sorted_index_big = sorted_index[-8:]
cc = gen_color_cycler()
ax1.set_prop_cycle(cc)
color = [0.5,0.5,0.5]
if pop.plot_drug_curve:
ax2 = ax1.twinx() # ax2 is the drug timecourse
ax2.set_position([left, 0.5, width, 0.6])
ax2.set_ylabel('Drug Concentration \n($\u03BC$M)', color=color,fontsize=20) # we already handled the x-label with ax1
drug_curve = pop.drug_curve
ax2.plot(drug_curve, color='black', linewidth=2.0)
ax2.tick_params(axis='y', labelcolor=color)
if pop.drug_log_scale:
ax2.set_yscale('log')
if min(drug_curve) <= 0:
axmin = 10**-3
else:
axmin = min(drug_curve)
ax2.set_ylim(axmin,2*max(drug_curve))
ax2.legend(['Drug Conc.'],loc=(1.3,0.93),frameon=False,fontsize=15)
else:
ax2.set_ylim(0,1.1*max(drug_curve))
ax2.legend(['Drug Conc.'],loc=(1.25,0.93),frameon=False,fontsize=15)
ax2.tick_params(labelsize=15)
ax2.set_title(title,fontsize=20)
for allele in range(counts.shape[1]):
if allele in sorted_index_big:
ax1.plot(counts[:,allele],linewidth=3.0,label=str(pop.int_to_binary(allele)))
else:
ax1.plot(counts[:,allele],linewidth=3.0,label=None)
ax1.legend(loc=(1.25,-.12),frameon=False,fontsize=15)
ax1.set_xlim(0,pop.x_lim)
ax1.set_facecolor(color='w')
ax1.grid(False)
ax1.set_ylabel('Cells',fontsize=20)
ax1.tick_params(labelsize=15)
if pop.plot_entropy == True:
e = pop.entropy(counts)
ax3.plot(e,color='black')
ax3.set_xlabel('Time',fontsize=20)
ax3.set_ylabel('Entropy',fontsize=20)
if pop.entropy_lim is not None:
ax3.set_ylim(0,pop.entropy_lim)
ax3.tick_params(labelsize=15)
if pop.y_lim is not None:
y_lim = pop.y_lim
else:
y_lim = np.max(counts) + 0.05*np.max(counts)
if pop.counts_log_scale:
ax1.set_yscale('log')
# ax1.set_ylim(1,5*10**5)
else:
ax1.set_ylim(0,y_lim)
xlabels = ax1.get_xticks()
xlabels = xlabels*pop.timestep_scale
xlabels = xlabels/24
xlabels = np.array(xlabels).astype('int')
ax1.set_xticklabels(xlabels)
ax1.set_xlabel('Days',fontsize=20)
plt.show()
return fig
def plot_fitness_curves(pop,
fig_title='',
plot_r0 = False,
save=False,
savename=None,
fig=None,
ax=None,
labelsize=15,
linewidth=3,
show_legend=True,
show_axes_labels=True,
color_kwargs={}):
if ax is None:
fig, ax = plt.subplots(figsize = (10,6))
conc = np.logspace(-3,5,200)
cc = gen_color_cycler(**color_kwargs)
ax.set_prop_cycle(cc)
fit = np.zeros((pop.n_genotype,conc.shape[0]))
for j in range(conc.shape[0]):
fit[:,j] = pop.gen_fit_land(conc[j])
if plot_r0:
fit = fit-pop.death_rate
ylabel = '$R_{0}$'
thresh = np.ones(conc.shape)
ax.plot(conc,thresh,linestyle='dashdot',color='black',linewidth=linewidth)
else:
ylabel = 'Growth Rate'
for gen in range(pop.n_genotype):
ax.plot(conc,fit[gen,:],linewidth=linewidth,label=str(pop.int_to_binary(gen)))
if show_legend:
ax.legend(fontsize=labelsize,frameon=False,loc=(1,-.10))
ax.set_xscale('log')
ax.set_title(fig_title,fontsize=labelsize)
ax.tick_params(labelsize=labelsize)
if show_axes_labels:
ax.set_xlabel('Drug concentration ($\mathrm{\mu}$M)',fontsize=labelsize)
ax.set_ylabel(ylabel,fontsize=labelsize)
ax.set_frame_on(False)
if save:
if savename is None:
savename = 'fitness_seascape.pdf'
r = dir_manager.get_project_root()
savename = str(r) + os.sep + 'figures' + os.sep + savename
plt.savefig(savename,bbox_inches="tight")
return fig,ax
def plot_msw(pop,wt,conc=None,fc=None):
"""
plot_msw: method for plotting mutant selection window figures.
Parameters
----------
pop : population_class object
fitness_curves : numpy array
Columns 1-N represents a genotype that is a neighbor of column 0
(ancestor). Rows represent drug concentration.
conc : numpy array
Drug concentration used to calculate fitness_curves
genotypes : list of ints
Genotypes that were used to calculate the fitness_curves.
save : bool
Returns
-------
fig : figure object
MSW figures
"""
if conc is None:
conc = np.logspace(-3,5,1000)
if fc is None:
fc = fitness.gen_fitness_curves(pop,conc)
rows = int((pop.n_allele)/2)
fig, ax = plt.subplots(rows,2)
neighbors = pop.gen_neighbors(wt)
wt_fitness_curve = fc[wt]
i = 0
for r in range(rows):
for col in range(2):
n = neighbors[i]
wtlabel = pop.int_to_binary(wt) + ' (wt)'
ax[r,col].plot(conc,wt_fitness_curve,label=wtlabel,linewidth=3)
bitstring = pop.int_to_binary(n)
ax[r,col].plot(conc,fc[n],label=bitstring,linewidth=3)
msw_left,msw_right = get_msw(wt_fitness_curve,fc[n],conc)
ax[r,col].axvspan(msw_left, msw_right,
facecolor='#2ca02c',alpha=0.5,
label='MSW')
ax[r,col].set_xscale('log')
ax[r,col].legend(fontsize=10,frameon=False)
i+=1
for r in range(rows):
ax[r,0].set_ylabel('$R_{0}$',fontsize=10)
for c in range(2):
ax[rows-1,c].set_xlabel('Drug concentration ($\mathrm{\mu}$M)',
fontsize=10)
"""
n_genotype = pop.n_genotype
rows = int((n_genotype-1)/2)
fig, ax = plt.subplots(rows,2)
g = 1
wt_fitness_curve = fitness_curves[:,0]
for r in range(rows):
for col in range(2):
ax[r,col].plot(conc,wt_fitness_curve,label='wt',linewidth=3)
cur_fitness_curve = fitness_curves[:,g]
gt = genotypes[g]
bitstring = pop.int_to_binary(gt)
ax[r,col].plot(conc,cur_fitness_curve,label=bitstring,linewidth=3)
msw_left_assigned = False
msw_right_assigned = False
if wt_fitness_curve[0] > cur_fitness_curve[0] \
and any(cur_fitness_curve>wt_fitness_curve):
for c in range(len(conc)):
if wt_fitness_curve[c] < cur_fitness_curve[c] \
and msw_left_assigned is False:
msw_left = conc[c]
msw_left_assigned = True
if (cur_fitness_curve[c] < 1
and msw_right_assigned is False):
msw_right = conc[c]
msw_right_assigned = True
if msw_left < msw_right:
ax[r,col].axvspan(msw_left, msw_right,
facecolor='#2ca02c',alpha=0.5,
label='MSW')
ax[r,col].set_xscale('log')
ax[r,col].legend(fontsize=10,frameon=False)
g+=1
for r in range(rows):
ax[r,0].set_ylabel('$R_{0}$',fontsize=10)
for c in range(2):
ax[rows-1,c].set_xlabel('Drug concentration ($\mathrm{\mu}$M)',
fontsize=10)
if save:
r = dir_manager.get_project_root()
savename = str(r) + os.sep + 'figures' + os.sep + 'msw.pdf'
plt.savefig(savename,bbox_inches="tight")
"""
return fig
def plot_timecourse_to_axes(pop,
counts,
counts_ax,
drug_curve=None,
drug_curve_label='Drug Concentration \n($\u03BC$M)',
drug_curve_legend_label = None,
# drug_curve_linestyle='--',
drug_ax_sci_notation=False,
drug_ax=None,
labelsize=15,
linewidth=3,
legend_labels = True,
label_lines = False,
select_labels=None,
label_xpos = None,
grayscale=False,
legend_size=8,
color_kwargs = {},
drug_kwargs = {},
label_kwargs={},
**kwargs):
"""
Plots simulation timecourse to user defined axes (counts_ax).
Parameters
----------
pop : Population class object
Population class object containing population visualization options.
counts : numpy array
Simulation data to be plotted.
counts_ax : matplotlib axes object
Axes on which data is plotted.
drug_curve : numpy array, optional
Optional drug concentration curve to plot. Requires additional drug
axes. The default is None.
drug_ax : matplotlib axes, optional
Axes on which drug curve is plotted. The default is None.
labelsize : float, optional
Font size of the labels. The default is 15.
linewidth : float, optional
Width parameter passed to matplotlib plot function. The default is 3.
Raises
------
Exception
Error given if no drug axes are provided but the drug curve is not
None (drug data needs drug axes to plot to).
Returns
-------
counts_ax : matplotlib axes
Axes with counts data plotted.
drug_ax : matplotlib axes
Axes with drug curve data plotted.
"""
counts_total = np.sum(counts,axis=0)
sorted_index = counts_total.argsort()
sorted_index_big = sorted_index[-legend_size:]
if grayscale is False:
cc = gen_color_cycler(**color_kwargs)
counts_ax.set_prop_cycle(cc)
if drug_curve is not None:
if 'color' in drug_kwargs:
color = drug_kwargs['color']
else:
color='black'
if drug_ax is None:
drug_ax = counts_ax.twinx() # ax2 is the drug timecourse
if drug_curve_label is None:
yax_label = ''
else:
yax_label = drug_curve_label
drug_ax.set_ylabel(yax_label,
color=color,fontsize=labelsize)
drug_ax.plot(drug_curve,zorder=0,**drug_kwargs)
if pop.drug_log_scale:
drug_ax.set_yscale('log')
if min(drug_curve) <= 0:
axmin = 10**-3
else:
axmin = min(drug_curve)
drug_ax.set_ylim(axmin,2*max(drug_curve))
else:
drug_ax.set_ylim(0,1.1*max(drug_curve))
# drug_ax.yaxis.label.set_color('gray')
drug_ax.tick_params(labelsize=labelsize,color=color)
plt.setp(drug_ax.get_yticklabels(), color=color)
if drug_ax_sci_notation:
drug_ax.ticklabel_format(style='scientific',axis='y',
scilimits=(0,3))
for genotype in range(counts.shape[1]):
if genotype in sorted_index_big:
if legend_labels:
counts_ax.plot(counts[:,genotype],linewidth=linewidth,
zorder=10,
label=str(pop.int_to_binary(genotype)),
**kwargs)
else:
counts_ax.plot(counts[:,genotype],linewidth=linewidth,
zorder=10,
**kwargs)
else:
counts_ax.plot(counts[:,genotype],linewidth=linewidth,
zorder=10,
label=None)
if pop.counts_log_scale:
counts_ax.set_yscale('log')
yl = counts_ax.get_ylim()
yl = [10**1,yl[1]]
counts_ax.set_ylim(yl)
counts_ax.set_xlim(0,pop.x_lim)
counts_ax.set_facecolor(color='w')
counts_ax.grid(False)
# counts_ax.set_ylabel('Cells',fontsize=20)
counts_ax.tick_params(labelsize=labelsize)
xticks = counts_ax.get_xticks()
xlabels = xticks
xlabels = xlabels*pop.timestep_scale
xlabels = xlabels/24
xlabels = np.array(xlabels).astype('int')
counts_ax.set_xticks(xticks)
counts_ax.set_xticklabels(xlabels)
xl = [0,len(counts[:,0])]
counts_ax.set_xlim(xl)
counts_ax.spines["top"].set_visible(False)
counts_ax.spines["right"].set_visible(False)
counts_ax.patch.set_alpha(0)
if drug_ax is not None:
drug_ax.zorder = 0
# counts_ax.zorder = 10
if label_lines:
lines = counts_ax.get_lines()
for i in range(len(label_xpos)):
sl = select_labels[i]
labelLine(lines[sl],label_xpos[i],
fontsize=5,
zorder=100,
outline_color='white',
outline_width=6,
**label_kwargs)
return counts_ax, drug_ax
def plot_landscape(p,conc=10**0,
fitness=None,
relative=True,
rank=True,
ax=None,
ignore_zero=False,
colorbar_lim=None,
colorbar=True,
node_size = 800,
textsize=11,
resize_param=0.2,
square=False,
textcolor='black',
cbax=None,
cblabel='',
cbloc = [0.1,0.8,0.3,0.5],
**kwargs):
"""
Plots a graph representation of this landscape on the current matplotlib figure.
If p is set to a vector of occupation probabilities, the edges in the graph will
have thickness proportional to the transition probability between nodes.
"""
if fitness is None:
fitness = p.gen_fit_land(conc)
if relative:
fitness = fitness-min(fitness)
fitness = fitness/max(fitness)
if ax is None:
fig,ax=plt.subplots()
if rank:
fitness = scipy.stats.rankdata(fitness)
cblabel = 'Rank'
if ignore_zero:
fitness_t = [f==0 for f in fitness]
fitness[fitness==0] = 'NaN'
# Figure out the length of the bit sequences we're working with
N = int(np.log2(len(fitness)))
# Generate all possible N-bit sequences
genotypes = np.arange(2**N)
genotypes = [p.int_to_binary(g) for g in genotypes]
# Turn the unique bit sequences array into a list of tuples with the bit sequence and its corresponding fitness
# The tuples can still be used as nodes because they are hashable objects
genotypes = [(genotypes[i], fitness[i]) for i in range(len(genotypes))]
# Build hierarchical structure for N-bit sequences that differ by 1 bit at each level
hierarchy = [[] for i in range(N+1)]
for g in genotypes: hierarchy[g[0].count("1")].append(g)
# Add all unique bit sequences as nodes to the graph
G = nx.DiGraph()
G.add_nodes_from(genotypes)
# Add edges with appropriate weights depending on the TM
TM = p.random_mutations(len(genotypes))
for i in range(len(TM)):
for j in range(len(TM[i])):
if TM[i][j] != 0 and i != j:
G.add_edge(genotypes[i], genotypes[j], weight=1)
# just using spring layout to generate an initial dummy pos dict
pos = nx.spring_layout(G)
# # calculate how many entires in the longest row, it will be N choose N/2
# # because the longest row will have every possible way of putting N/2 1s (or 0s) into N bits
maxLen = math.factorial(N) / math.factorial(N//2)**2
# Position the nodes in a layered hierarchical structure by modifying pos dict
y = 1
for row in hierarchy:
if len(row) > maxLen: maxLen = len(row)
for i in range(len(hierarchy)):
levelLen = len(hierarchy[i])
# algorithm for horizontal spacing.. may not be 100% correct?
offset = (maxLen - levelLen + 1) / maxLen
xs = np.linspace(0 + offset / 2, 1 - offset / 2, levelLen)
for j in range(len(hierarchy[i])):
pos[hierarchy[i][j]] = (xs[j], y)
y -= 1 / N
labels = dict(pos)
for k in labels.keys():
labels[k] = k[0]
xy = np.asarray([pos[v] for v in list(G)])
# draw edges
edgelist = list(G.edges())
edge_pos = np.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
edge_collection = LineCollection(
edge_pos,
linewidths=1,
antialiaseds=(1,),
linestyle='solid',
zorder=1)
edge_collection.set_zorder(1)
ax.add_collection(edge_collection)
# draw nodes
if colorbar_lim is not None:
vmin = colorbar_lim[0]
vmax = colorbar_lim[1]
else:
vmin=min(fitness)
vmax=max(fitness)
ax.scatter(xy[:,0],xy[:,1],
s=node_size,
c=fitness,
vmin=vmin,
vmax=vmax,
clip_on=False,
**kwargs)
# if you don't want to include nodes with fitness = 0
if ignore_zero:
fitness_t = np.array(fitness_t)
indx = np.argwhere(fitness_t==True)
for i in indx:
ax.scatter(xy[i,0],xy[i,1],
s=node_size,
c='gray',
clip_on=False,
**kwargs)
if textcolor is not None:
for n, label in labels.items():
(x, y) = pos[n]
if not isinstance(label, str):
label = str(label) # this makes "1" and 1 labeled the same
ax.text(
x,
y,
label,
size=textsize,
color=textcolor,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transData,
clip_on=True,
)
# display colorbar
if colorbar:
if 'cmap' in kwargs:
cmap=kwargs['cmap']
sm = plt.cm.ScalarMappable(cmap=cmap,
norm=plt.Normalize(vmin = vmin, vmax=vmax))
else:
sm = plt.cm.ScalarMappable(norm=plt.Normalize(
vmin = vmin, vmax=vmax))
sm._A = []
cbax = ax.inset_axes(cbloc)
cbax.set_frame_on(False)
cbax.set_xticks([])
cbax.set_yticks([])
cb = plt.colorbar(sm,
drawedges=False,
ax=cbax,
location='right',
aspect=10)
cb.outline.set_visible(False)
cb.set_label(cblabel,fontsize=8)
if rank:
ticks = [min(fitness),max(fitness)]
cb.set_ticks(ticks)
ticks = [max(fitness),min(fitness)]
ticks = np.array(ticks).astype('int')
ticks = [str(t) for t in ticks]
cb.set_ticklabels(ticks)
if square:
ydata_range = max(xy[:,1])-min(xy[:,1])
xdata_range = max(xy[:,0])-min(xy[:,0])
ax.set_aspect(xdata_range/ydata_range)
xl = ax.get_xlim()
xrange = xl[1]-xl[0]
xl = [xl[0]-resize_param*xrange,xl[1]+xrange*resize_param]
ax.set_xlim(xl)
yl = ax.get_ylim()
yrange = yl[1]-yl[0]
yl = [yl[0]-resize_param*yrange,yl[1]+yrange*resize_param]
ax.set_ylim(yl)
ax.set_axis_off()
return ax
def add_landscape_to_fitness_curve(c,ax,pop,
textcolor='gray',
colorbar=False,
square=True,
vert_lines=True,
position = 'top',
pad = 0,
vert_lines_ydata = None,
**kwargs):
if position == 'top':
ypos = 1+pad
elif position == 'bottom':
ypos = -1-pad
else:
raise Exception('Position argument not recognized')
x = get_pos_in_log_space(c, 3)
l = ax.inset_axes([x[0],ypos,x[1]-x[0],0.5],transform=ax.transData)
l = plot_landscape(pop,c,ax=l,node_size=200,
colorbar=colorbar,
textcolor=textcolor,
square=square,
**kwargs)
if vert_lines:
if vert_lines_ydata is None:
yl = ax.get_ylim()
ydata = np.arange(yl[0],yl[1],0.1)
else:
ydata = vert_lines_ydata
xdata = np.ones(len(ydata))*c
ax.plot(xdata,ydata,'--',color='black',alpha=0.5)
return l
def plot_population_count(pop,
c,
ax=None,
thresh=None,
normalize=False,
max_cells=None,
logscale=True,
**kwargs):
if ax is None:
fig,ax = plt.subplots(figsize=(6,4))
if thresh is None:
thresh = pop.max_cells/10
c1 = [245,100,100]
c1 = [c/255 for c in c1]
c2 = [100,100,245]
c2 = [c/255 for c in c2]
if c[-1] < thresh:
if normalize:
c = c/pop.max_cells
ax.plot(c,color=c2,label='extinct',**kwargs)
else:
if normalize:
c = c/pop.max_cells
ax.plot(c,color=c1,label='resistant',**kwargs)
xticks = ax.get_xticks()
xlabels = xticks
xlabels = xlabels*pop.timestep_scale
xlabels = xlabels/24
xlabels = np.array(xlabels).astype('int')
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
if logscale:
ax.set_yscale('log')
return ax
def plot_kaplan_meier(pop,
event_times,
label=None,
t_max=None,
n_sims=None,
ax=None,
mode='resistant',
errorband=True,
**kwargs):
if t_max is None:
t_max = int(max(event_times)) # hours
if n_sims is None:
n_sims = pop.n_sims
survival_curve = np.ones(t_max)*100
for t in range(len(survival_curve)-1):
if t>0:
survival_curve[t] = survival_curve[t-1]
if any(event_times==t):
num = np.argwhere(event_times==t)
num = num.shape[0]
perc = 100*num/n_sims
survival_curve[t] = survival_curve[t]-perc
survival_curve[-1] = survival_curve[-2]
if ax is None:
fig,ax = plt.subplot(figsize=(5,7))
if mode == 'resistant':
survival_curve = 100-survival_curve
ylabel='% resistant'
else:
ylabel='% survival'
ax.plot(survival_curve,label=label,**kwargs)
if errorband:
# compute error bars
# rule of succession explanation: https://en.wikipedia.org/wiki/Rule_of_succession
err = np.zeros(t_max)
for t in range(t_max):
p = (np.array(survival_curve[t]) + 1)/(n_sims + 2) # uniform prior (rule of succession)
n = n_sims
q = 1-p
# standard deviation of the estimator of the parameter of a binomial distribution
err[t] = 100*(p*q/n)**0.5 #
t = np.arange(t_max)
ax.fill_between(t,survival_curve-err,survival_curve+err,alpha=0.4)
xticks = ax.get_xticks()
xlabels = xticks
xlabels = xlabels/24
xlabels = np.array(xlabels).astype('int')
ax.set_xticks(xticks)
ax.set_xticklabels(xlabels)
xl = [0,len(survival_curve)]
ax.set_xlim(xl)
ax.set_ylim([0,100])
ax.set_ylabel(ylabel)
ax.set_xlabel('Days')
return ax
def get_msw(wt_fitness_curve,cur_fitness_curve,conc):
msw_left =
|
np.argwhere(wt_fitness_curve<cur_fitness_curve)
|
numpy.argwhere
|
import warnings
import pyfar.dsp.filter as filt
import numpy as np
def sti(signal, data_type=None, gender='male', level=None, snr=None, amb=True):
"""
Calculation of the speech transmission index (STI).
Returns a np array with the female or male STI , a single number value
on a metric scale between 0 (bad) and 1 (excellent) for quality assessment
of speech transmission channels.
The indices are based on the modulation transfer function (MTF) that
determines affections of the intensity envelope throughout the
transmission. The MTF values are assessed from the IR and are further
modified based on auditory, ambient noise and masking aspects.
STI considers 7 octaves between 125 Hz and 8 kHz (125 Hz is not considered
for the female STI) and 14 modulation frequencies between 0.63 Hz and
12 Hz.
References
----------
.. [1] IEC 60268-16:2011
Sound system equipment - Part 16: Objective rating of speech
intelligibility by speech transmission index
.. [2] IEC 60268-16/Ed.5: 2019-08 (DRAFT)
Sound system equipment - Part 16: Objective rating of speech
intelligibility by speech transmission index
============================
Parameters
---------
signal : Signal
The impulse responses (IR) to be analyzed. Length must be at least
1.6 s and not shorter than 1/2 RT60. [1], section 6.2
data_type : 'electrical', 'acoustical'
Determines weather input signals are obtained acoustically or
electrically. Auditory effects can only be considered when "acoustical"
[1], section A.3.1. Default is 'None'.
gender: 'female', 'male'
Defines the applied weighting factors. Default is 'male' because the
STI is more critical in this case due to the expanded low frequency
range of the male voice.
level: np.array, None
Level of the test signal without any present noise sources.
Given in 7 octave bands 125 Hz - 8000 Hz in dB_SPL. Np array with
7 elements per row and rows for all given IR. See [1], section A.3.2
snr: np.array, None
Ratio between test signal level (see above) and noise level when
the test source is turned of. Given in 7 octave bands 125 Hz - 8000 Hz
in dB_SPL. Np array with 7 elements per row and rows for all given IR.
See [1], section 3
amb: bool, True
Consideration of ambient noise effects as proposed in [2],
section A.2.3. Default is True.
"""
# preprocess and verify input data
sig, inp_sig_oct, inp_da_ty, inp_gen, inp_lvl, inp_snr, inp_amb = \
preprocess(signal, data_type, gender, level, snr, amb)
# calculate IR for 14 modulation frequencies in 7 octave bands
mtf_data = mtf(inp_sig_oct, inp_da_ty, inp_lvl, inp_snr, inp_amb)
# calculate sti from MTF
sti_data = sti_calc(mtf_data, signal, inp_gen)
# return result
return sti_data
def preprocess(signal, data_type=None, gender='male', level=None, snr=None,
amb=True):
# get flattened signal copy
sig = signal.copy().flatten()
# check / flatten snr
if snr is not None:
snr = np.asarray(snr).flatten()
if np.squeeze(snr.flatten().shape)/7 != (np.squeeze(sig.cshape)):
raise ValueError("SNR consists of wrong number of components.")
if np.any(snr < 20):
warnings.warn("SNR should be at least 20 dB for every octave "
"band.")
snr = np.reshape(snr, (-1, 7)).T
# set snr to infinity if not given
else:
snr = np.ones([7, np.squeeze(sig.cshape)])*np.inf
# check / flatten level
if level is not None:
level = np.asarray(level).flatten()
if np.squeeze(level.flatten().shape)/7 != (np.squeeze(sig.cshape)):
raise ValueError("Level consists of wrong number of components.")
level = np.reshape(level, (-1, 7)).T
# check for sufficient signal length ([1], section 6.2)
if signal.n_samples/sig.sampling_rate < 1.6:
warnings.warn("Signal length below 1.6 seconds.")
# check data_type
if data_type is None:
warnings.warn("Data type is considered as acoustical. Consideration "
"of masking effects not valid for electrically obtained "
"signals.")
data_type = "acoustical"
if data_type not in ["electrical", "acoustical"]:
raise ValueError(f"Data_type is '{data_type}' but must be "
"'electrical' or 'acoustical'.")
# check gender
if gender not in ["male", "female"]:
raise ValueError(f"Gender is '{gender}' but must be 'male' "
"or 'female'.")
# apply octave band filters (preliminary with crossover; later: perf.
# reconstructing oct. filter)
sig_oct = (filt.fractional_octave_bands(sig, num_fractions=1,
freq_range=(125, 8e3)))
return sig, sig_oct, data_type, gender, level, snr, amb
def mtf(sig_oct, data_type, level, snr, amb):
# MTF per octave and modulation frequency ([1], section 6.1)
mf = [0.63, 0.80, 1, 1.25, 1.60, 2, 2.5, 3.15, 4, 5, 6.3, 8, 10, 12]
mtf = np.zeros((len(mf),)+sig_oct.cshape)
sig_en = np.sum(sig_oct.time**2, axis=-1)
t = np.arange(sig_oct.n_samples)
with np.errstate(divide='ignore'): # return nan for empty IR
for i, f in enumerate(mf):
mtf[i] = np.abs(np.sum(sig_oct.time**2*np.exp(-2*1j*np.pi*mf[i] *
t/44100),
axis=-1))/sig_en * np.squeeze(1/(1+10 **
(-snr/10)))
# Adjustment of mtf for ambient noise, auditory masking and threshold
# effects ([1], sections A.3, A.5.3)
if level is not None:
# overall intensity ([1], section A.3.2)
i_k = 10**(level/10)+10**((level-snr)/10)
# apply ambient noise effects (proposed in [2], section A.2.3)
if amb is True:
mtf = mtf*(10**(np.squeeze(level)/10)/np.squeeze(i_k))
# consideration of auditory effects only for acoustical signals
# ([1], section A.3.1)
if data_type == "electrical":
pass
else:
# level-dependent auditory masking ([1], section A.3.2)
amdb = level.copy()
amdb[amdb < 63] = 0.5*amdb[amdb < 63]-65
amdb[(63 <= amdb) & (amdb < 67)] = 1.8*amdb[(63 <= amdb) &
(amdb < 67)]-146.9
amdb[(67 <= amdb) & (amdb < 100)] = 0.5*amdb[(67 <= amdb) &
(amdb < 100)]-59.8
amdb[100 <= amdb] = amdb[100 <= amdb]-10
amf = 10**(amdb/10)
# masking intensity
i_am = np.zeros(i_k.shape)
i_am = i_k*amf
# absolute speech reception threshold ([1], section A.3.3)
artdb = np.array([[46, 27, 12, 6.5, 7.5, 8, 12]]).T
i_rt = 10**(artdb/10)
# apply auditory and masking effects ([1], section A.5.3)
i_T = i_k/(i_k+i_am+i_rt)
i_T = np.squeeze(i_T)
mtf = mtf*i_T
# limit mtf to 1 ([1], section A.5.3)
mtf[mtf > 1] = 1
return mtf
def sti_calc(mtf, signal, gender):
# effective SNR per octave and modulation frequency ([1], section A.5.4)
with np.errstate(divide='ignore'):
snr_eff = 10*np.log10(mtf/(1-mtf))
# min value: -15 dB, max. value +15 dB
snr_eff[snr_eff < -15] = -15
snr_eff[snr_eff > 15] = 15
# transmission index (TI) per octave and modulation frequency ([1],
# section A.5.5)
ti = ((snr_eff+15)/30)
# modulation transmission indices (MTI) per octave ([1], section A.5.6)
mti = (np.array(1/14*np.sum(ti, axis=0))).reshape(7, signal.flatten()
.cshape[-1])
# speech transmission index (STI) ([1], section A.5.6)
if gender == "female":
alpha = np.array([[0], [0.117], [0.223], [0.216], [0.328], [0.250],
[0.194]])
beta =
|
np.array([[0], [0.099], [0.066], [0.062], [0.025], [0.076]])
|
numpy.array
|
"""
QTNM base field module.
Provides the abstract classes, QtnmBaseField and QtnmBaseSolver.
New concrete implementations of this class should be compatible with
other python code within the Electron-Tracking package.
"""
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import electron_mass as me, elementary_charge as qe
from scipy.integrate import solve_ivp
from utils import calculate_omega
class QtnmBaseSolver(ABC):
def __init__(self, charge=-qe, mass=me, b_field=1.0, calc_b_field=None):
self.mass = mass
self.charge = charge
self.b_field = b_field
self.calc_b_field = calc_b_field
if calc_b_field is not None:
# Handle cases where calc_b_field returns a single component
if np.size(calc_b_field(0, 0, 0)) == 1:
self.calc_b_field = lambda x, y, z: \
np.array([0.0, 0.0, calc_b_field(x, y, z)])
# If calc_b_field not provided, assume constant field, and store omega
if calc_b_field is None:
omega0 = calculate_omega(b_field, mass=mass, charge=charge)
if
|
np.size(omega0)
|
numpy.size
|
from argparse import ArgumentParser
import sys
import h5py
import json
import copy
import importlib
import numpy as np
from enum import Enum
import random
from typing import List, Dict, Tuple
from collections import OrderedDict
from weighted_collection import WeightedCollection
from tdw.tdw_utils import TDWUtils
from tdw.librarian import ModelRecord, MaterialLibrarian
from tdw.output_data import OutputData, Transforms, Images, CameraMatrices
from tdw_physics.rigidbodies_dataset import (RigidbodiesDataset,
get_random_xyz_transform,
get_range,
handle_random_transform_args)
from tdw_physics.util import (MODEL_LIBRARIES, FLEX_MODELS, MODEL_CATEGORIES,
MATERIAL_TYPES, MATERIAL_NAMES,
get_parser,
xyz_to_arr, arr_to_xyz, str_to_xyz,
none_or_str, none_or_int, int_or_bool)
from tdw_physics.postprocessing.labels import get_all_label_funcs
PRIMITIVE_NAMES = [r.name for r in MODEL_LIBRARIES['models_flex.json'].records if not r.do_not_use]
FULL_NAMES = [r.name for r in MODEL_LIBRARIES['models_full.json'].records if not r.do_not_use]
def get_args(dataset_dir: str, parse=True):
"""
Combine Domino-specific arguments with controller-common arguments
"""
common = get_parser(dataset_dir, get_help=False)
parser = ArgumentParser(parents=[common], add_help=parse, fromfile_prefix_chars='@')
parser.add_argument("--num_middle_objects",
type=int,
default=3,
help="The number of middle objects to place")
parser.add_argument("--zone",
type=str,
default="cube",
help="comma-separated list of possible target zone shapes")
parser.add_argument("--target",
type=str,
default="cube",
help="comma-separated list of possible target objects")
parser.add_argument("--probe",
type=str,
default="cube",
help="comma-separated list of possible probe objects")
parser.add_argument("--middle",
type=str,
default=None,
help="comma-separated list of possible middle objects; default to same as target")
parser.add_argument("--ramp",
type=int,
default=0,
help="Whether to place the probe object on the top of a ramp")
parser.add_argument("--rscale",
type=none_or_str,
default=None,
help="The xyz scale of the ramp")
parser.add_argument("--rfriction",
action="store_true",
help="Whether the ramp has friction")
parser.add_argument("--zscale",
type=str,
default="0.5,0.01,2.0",
help="scale of target zone")
parser.add_argument("--zlocation",
type=none_or_str,
default=None,
help="Where to place the target zone. None will default to a scenario-specific place.")
parser.add_argument("--zfriction",
type=float,
default=0.1,
help="Static and dynamic friction on the target zone.")
parser.add_argument("--tscale",
type=str,
default="0.1,0.5,0.25",
help="scale of target objects")
parser.add_argument("--trot",
type=str,
default="[0,0]",
help="comma separated list of initial target rotation values")
parser.add_argument("--mrot",
type=str,
default="[-30,30]",
help="comma separated list of initial middle object rotation values")
parser.add_argument("--prot",
type=str,
default="[0,0]",
help="comma separated list of initial probe rotation values")
parser.add_argument("--phorizontal",
type=int_or_bool,
default=0,
help="whether the probe is horizontal")
parser.add_argument("--mscale",
type=str,
default="0.1,0.5,0.25",
help="Scale or scale range for middle objects")
parser.add_argument("--mmass",
type=str,
default="2.0",
help="Scale or scale range for middle objects")
parser.add_argument("--horizontal",
type=int_or_bool,
default=0,
help="Whether to rotate middle objects horizontally")
parser.add_argument("--pscale",
type=str,
default="0.1,0.5,0.25",
help="scale of probe objects")
parser.add_argument("--pmass",
type=str,
default="2.0",
help="scale of probe objects")
parser.add_argument("--fscale",
type=str,
default="2.0",
help="range of scales to apply to push force")
parser.add_argument("--frot",
type=str,
default="[0,0]",
help="range of angles in xz plane to apply push force")
parser.add_argument("--foffset",
type=str,
default="0.0,0.8,0.0",
help="offset from probe centroid from which to apply force, relative to probe scale")
parser.add_argument("--fjitter",
type=float,
default=0.0,
help="jitter around object centroid to apply force")
parser.add_argument("--fwait",
type=none_or_str,
default="[0,0]",
help="How many frames to wait before applying the force")
parser.add_argument("--tcolor",
type=none_or_str,
default="1.0,0.0,0.0",
help="comma-separated R,G,B values for the target object color. None to random.")
parser.add_argument("--zcolor",
type=none_or_str,
default="1.0,1.0,0.0",
help="comma-separated R,G,B values for the target zone color. None is random")
parser.add_argument("--rcolor",
type=none_or_str,
default="0.75,0.75,1.0",
help="comma-separated R,G,B values for the target zone color. None is random")
parser.add_argument("--pcolor",
type=none_or_str,
default="0.0,1.0,1.0",
help="comma-separated R,G,B values for the probe object color. None is random.")
parser.add_argument("--mcolor",
type=none_or_str,
default=None,
help="comma-separated R,G,B values for the middle object color. None is random.")
parser.add_argument("--collision_axis_length",
type=float,
default=2.0,
help="Length of spacing between probe and target objects at initialization.")
parser.add_argument("--spacing_jitter",
type=float,
default=0.2,
help="jitter in how to space middle objects, as a fraction of uniform spacing")
parser.add_argument("--lateral_jitter",
type=float,
default=0.2,
help="lateral jitter in how to space middle objects, as a fraction of object width")
parser.add_argument("--remove_target",
type=int_or_bool,
default=0,
help="Don't actually put the target object in the scene.")
parser.add_argument("--remove_zone",
type=int_or_bool,
default=0,
help="Don't actually put the target zone in the scene.")
parser.add_argument("--camera_distance",
type=none_or_str,
default="1.75",
help="radial distance from camera to centerpoint")
parser.add_argument("--camera_min_height",
type=float,
default=0.75,
help="min height of camera")
parser.add_argument("--camera_max_height",
type=float,
default=2.0,
help="max height of camera")
parser.add_argument("--camera_min_angle",
type=float,
default=45,
help="minimum angle of camera rotation around centerpoint")
parser.add_argument("--camera_max_angle",
type=float,
default=225,
help="maximum angle of camera rotation around centerpoint")
parser.add_argument("--camera_left_right_reflections",
action="store_true",
help="Whether camera angle range includes reflections along the collision axis")
parser.add_argument("--material_types",
type=none_or_str,
default="Wood,Metal,Plastic",
help="Which class of materials to sample material names from")
parser.add_argument("--tmaterial",
type=none_or_str,
default="parquet_wood_red_cedar",
help="Material name for target. If None, samples from material_type")
parser.add_argument("--zmaterial",
type=none_or_str,
default="wood_european_ash",
help="Material name for target. If None, samples from material_type")
parser.add_argument("--rmaterial",
type=none_or_str,
default=None,
help="Material name for ramp. If None, same as zone material")
parser.add_argument("--pmaterial",
type=none_or_str,
default="parquet_wood_red_cedar",
help="Material name for probe. If None, samples from material_type")
parser.add_argument("--pfriction",
action="store_true",
help="Whether the probe object has friction")
parser.add_argument("--mmaterial",
type=none_or_str,
default="parquet_wood_red_cedar",
help="Material name for middle objects. If None, samples from material_type")
parser.add_argument("--distractor",
type=none_or_str,
default="core",
help="The names or library of distractor objects to use")
parser.add_argument("--distractor_categories",
type=none_or_str,
help="The categories of distractors to choose from (comma-separated)")
parser.add_argument("--num_distractors",
type=int,
default=0,
help="The number of background distractor objects to place")
parser.add_argument("--distractor_aspect_ratio",
type=none_or_str,
default=None,
help="The range of valid distractor aspect ratios")
parser.add_argument("--occluder",
type=none_or_str,
default="core",
help="The names or library of occluder objects to use")
parser.add_argument("--occluder_categories",
type=none_or_str,
help="The categories of occluders to choose from (comma-separated)")
parser.add_argument("--num_occluders",
type=int,
default=0,
help="The number of foreground occluder objects to place")
parser.add_argument("--occlusion_scale",
type=float,
default=0.75,
help="The height of the occluders as a proportion of camera height")
parser.add_argument("--occluder_aspect_ratio",
type=none_or_str,
default=None,
help="The range of valid occluder aspect ratios")
parser.add_argument("--no_moving_distractors",
action="store_true",
help="Prevent all distractors (and occluders) from moving by making them 'kinematic' objects")
parser.add_argument("--remove_middle",
action="store_true",
help="Remove one of the middle dominoes scene.")
# which models are allowed
parser.add_argument("--model_libraries",
type=none_or_str,
default=','.join(list(MODEL_LIBRARIES.keys())),
help="Which model libraries can be drawn from")
parser.add_argument("--only_use_flex_objects",
action="store_true",
help="Only use models that are FLEX models (and have readable meshes)")
# for generating training data without zones, targets, caps, and at lower resolution
parser.add_argument("--training_data_mode",
action="store_true",
help="Overwrite some parameters to generate training data without target objects, zones, etc.")
parser.add_argument("--readout_data_mode",
action="store_true",
help="Overwrite some parameters to generate training data without target objects, zones, etc.")
parser.add_argument("--testing_data_mode",
action="store_true",
help="Overwrite some parameters to generate training data without target objects, zones, etc.")
parser.add_argument("--match_probe_and_target_color",
action="store_true",
help="Probe and target will have the same color.")
def postprocess(args):
# testing set data drew from a different set of models; needs to be preserved
# for correct occluder/distractor sampling
if not (args.training_data_mode or args.readout_data_mode):
global PRIMITIVE_NAMES
PRIMITIVE_NAMES = [r.name for r in MODEL_LIBRARIES['models_flex.json'].records]
global FULL_NAMES
FULL_NAMES = [r.name for r in MODEL_LIBRARIES['models_full.json'].records]
# choose a valid room
assert args.room in ['box', 'tdw', 'house'], args.room
# parse the model libraries
if args.model_libraries is not None:
if not isinstance(args.model_libraries, list):
args.model_libraries = args.model_libraries.split(',')
libs = []
for lib in args.model_libraries:
if 'models_' not in lib:
libs.append('models_' + lib)
else:
libs.append(lib)
args.model_libraries = libs
# whether to set all objects same color
args.monochrome = bool(args.monochrome)
# camera distance
args.camera_distance = handle_random_transform_args(args.camera_distance)
# scaling and rotating of objects
args.rscale = handle_random_transform_args(args.rscale)
args.zscale = handle_random_transform_args(args.zscale)
args.zlocation = handle_random_transform_args(args.zlocation)
args.tscale = handle_random_transform_args(args.tscale)
args.trot = handle_random_transform_args(args.trot)
args.pscale = handle_random_transform_args(args.pscale)
args.pmass = handle_random_transform_args(args.pmass)
args.prot = handle_random_transform_args(args.prot)
args.mscale = handle_random_transform_args(args.mscale)
args.mrot = handle_random_transform_args(args.mrot)
args.mmass = handle_random_transform_args(args.mmass)
# the push force scale and direction
args.fscale = handle_random_transform_args(args.fscale)
args.frot = handle_random_transform_args(args.frot)
args.foffset = handle_random_transform_args(args.foffset)
args.fwait = handle_random_transform_args(args.fwait)
args.horizontal = bool(args.horizontal)
# occluders and distrators
args.occluder_aspect_ratio = handle_random_transform_args(args.occluder_aspect_ratio)
args.distractor_aspect_ratio = handle_random_transform_args(args.distractor_aspect_ratio)
if args.zone is not None:
zone_list = args.zone.split(',')
# assert all([t in PRIMITIVE_NAMES for t in zone_list]), \
# "All target object names must be elements of %s" % PRIMITIVE_NAMES
args.zone = zone_list
else:
args.zone = PRIMITIVE_NAMES
if args.target is not None:
targ_list = args.target.split(',')
# assert all([t in PRIMITIVE_NAMES for t in targ_list]), \
# "All target object names must be elements of %s" % PRIMITIVE_NAMES
args.target = targ_list
else:
args.target = PRIMITIVE_NAMES
if args.probe is not None:
probe_list = args.probe.split(',')
# assert all([t in PRIMITIVE_NAMES for t in probe_list]), \
# "All target object names must be elements of %s" % PRIMITIVE_NAMES
args.probe = probe_list
else:
args.probe = PRIMITIVE_NAMES
if args.middle is not None:
middle_list = args.middle.split(',')
args.middle = middle_list
if args.tcolor is not None:
rgb = [float(c) for c in args.tcolor.split(',')]
assert len(rgb) == 3, rgb
args.tcolor = args.color = rgb
else:
args.tcolor = args.color = None
if args.zcolor is not None:
rgb = [float(c) for c in args.zcolor.split(',')]
assert len(rgb) == 3, rgb
args.zcolor = rgb
if args.rcolor is not None:
rgb = [float(c) for c in args.rcolor.split(',')]
assert len(rgb) == 3, rgb
args.rcolor = rgb
if args.pcolor is not None:
rgb = [float(c) for c in args.pcolor.split(',')]
assert len(rgb) == 3, rgb
args.pcolor = rgb
if args.mcolor is not None:
rgb = [float(c) for c in args.mcolor.split(',')]
assert len(rgb) == 3, rgb
args.mcolor = rgb
if args.material_types is None:
args.material_types = MATERIAL_TYPES
else:
matlist = args.material_types.split(',')
assert all ([m in MATERIAL_TYPES for m in matlist]), \
"All material types must be elements of %s" % MATERIAL_TYPES
args.material_types = matlist
if args.distractor is None or args.distractor == 'full':
args.distractor = FULL_NAMES
elif args.distractor == 'core':
args.distractor = [r.name for r in MODEL_LIBRARIES['models_core.json'].records]
elif args.distractor in ['flex', 'primitives']:
args.distractor = PRIMITIVE_NAMES
else:
d_names = args.distractor.split(',')
args.distractor = [r for r in FULL_NAMES if any((nm in r for nm in d_names))]
if args.occluder is None or args.occluder == 'full':
args.occluder = FULL_NAMES
elif args.occluder == 'core':
args.occluder = [r.name for r in MODEL_LIBRARIES['models_core.json'].records]
elif args.occluder in ['flex', 'primitives']:
args.occluder = PRIMITIVE_NAMES
else:
o_names = args.occluder.split(',')
args.occluder = [r for r in FULL_NAMES if any((nm in r for nm in o_names))]
# produce training data
if args.training_data_mode:
# multiply the number of trials by a factor
args.num = int(float(args.num) * args.num_multiplier)
# change the random seed in a deterministic way
args.random = 0
args.seed = (args.seed * 1000) % 997
# randomize colors and wood textures
args.match_probe_and_target_color = False
args.color = args.tcolor = args.zcolor = args.pcolor = args.mcolor = args.rcolor = None
# only use the flex objects and make sure the distractors don't move
args.only_use_flex_objects = args.no_moving_distractors = True
# only save out the RGB images and the segmentation masks
args.write_passes = "_img,_id"
args.save_passes = ""
args.save_movies = False
args.save_meshes = True
args.use_test_mode_colors = False
# produce "readout" training data with red target and yellow zone,
# but seed is still different from whatever it was in the commandline_args.txt config
elif args.readout_data_mode:
# multiply the number of trials by a factor
args.num = int(float(args.num) * args.num_multiplier)
# change the random seed in a deterministic way
args.random = 0
args.seed = (args.seed * 3000) % 1999
# target is red, zone is yellow, others are random
args.color = args.tcolor = [1.0, 0.0, 0.0]
args.zcolor = [1.0, 1.0, 0.0]
args.pcolor = args.mcolor = args.rcolor = None
# only use the flex objects and make sure the distractors don't move
args.only_use_flex_objects = args.no_moving_distractors = True
# only save out the RGB images and the segmentation masks
args.write_passes = "_img,_id"
args.save_passes = ""
args.save_movies = False
args.save_meshes = True
args.use_test_mode_colors = True
# produce the same trials as the testing trials, but with red / yellow;
# seed MUST be pulled from a config.
elif args.testing_data_mode:
assert args.random == 0, "You can't regenerate the testing data without --random 0"
assert args.seed != -1, "Seed has to be specified but is instead the default"
assert all((('seed' not in a) for a in sys.argv[1:])), "You can't pass a new seed argument for generating the testing data; use the one in the commandline_args.txt config!"
# red and yellow target and zone
args.use_test_mode_colors = True
args.write_passes = "_img,_id,_depth,_normals,_flow"
args.save_passes = "_img,_id"
args.save_movies = True
args.save_meshes = True
else:
args.use_test_mode_colors = False
return args
if not parse:
return (parser, postprocess)
args = parser.parse_args()
args = postprocess(args)
return args
class Dominoes(RigidbodiesDataset):
"""
Drop a random Flex primitive object on another random Flex primitive object
"""
MAX_TRIALS = 1000
DEFAULT_RAMPS = [r for r in MODEL_LIBRARIES['models_full.json'].records if 'ramp_with_platform_30' in r.name]
CUBE = [r for r in MODEL_LIBRARIES['models_flex.json'].records if 'cube' in r.name][0]
PRINT = False
def __init__(self,
port: int = None,
room='box',
target_zone=['cube'],
zone_color=[1.0,1.0,0.0], #yellow is the default color for target zones
zone_location=None,
zone_scale_range=[0.5,0.01,0.5],
zone_friction=0.1,
probe_objects=PRIMITIVE_NAMES,
target_objects=PRIMITIVE_NAMES,
probe_scale_range=[0.2, 0.3],
probe_mass_range=[2.,7.],
probe_color=None,
probe_rotation_range=[0,0],
target_scale_range=[0.2, 0.3],
target_rotation_range=None,
target_color=None,
target_motion_thresh=0.01,
collision_axis_length=1.,
force_scale_range=[0.,8.],
force_angle_range=[-60,60],
force_offset={"x":0.,"y":0.5,"z":0.0},
force_offset_jitter=0.1,
force_wait=None,
remove_target=False,
remove_zone=False,
camera_radius=2.0,
camera_min_angle=0,
camera_max_angle=360,
camera_left_right_reflections=False,
camera_min_height=1./3,
camera_max_height=2./3,
material_types=MATERIAL_TYPES,
target_material=None,
probe_material=None,
probe_has_friction=False,
ramp_material=None,
zone_material=None,
model_libraries=MODEL_LIBRARIES.keys(),
distractor_types=PRIMITIVE_NAMES,
distractor_categories=None,
num_distractors=0,
distractor_aspect_ratio=None,
occluder_types=PRIMITIVE_NAMES,
occluder_categories=None,
num_occluders=0,
occlusion_scale=0.6,
occluder_aspect_ratio=None,
use_ramp=False,
ramp_has_friction=False,
ramp_scale=None,
ramp_color=[0.75,0.75,1.0],
ramp_base_height_range=0,
flex_only=False,
no_moving_distractors=False,
match_probe_and_target_color=False,
probe_horizontal=False,
use_test_mode_colors=False,
**kwargs):
## get random port unless one is specified
if port is None:
port = np.random.randint(1000,4000)
print("random port",port,"chosen. If communication with tdw build fails, set port to 1071 or update your tdw installation.")
## initializes static data and RNG
super().__init__(port=port, **kwargs)
## which room to use
self.room = room
## which model libraries can be sampled from
self.model_libraries = model_libraries
## whether only flex objects are allowed
self.flex_only = flex_only
## whether the occluders and distractors can move
self.no_moving_distractors = no_moving_distractors
## color randomization
self._random_target_color = (target_color is None)
self._random_zone_color = (zone_color is None)
self._random_probe_color = (probe_color is None)
## target zone
self.set_zone_types(target_zone)
self.zone_location = zone_location
self.zone_color = zone_color
self.zone_scale_range = zone_scale_range
self.zone_material = zone_material
self.zone_friction = zone_friction
self.remove_zone = remove_zone
## allowable object types
self.set_probe_types(probe_objects)
self.set_target_types(target_objects)
self.material_types = material_types
self.remove_target = remove_target
# whether to use a ramp
self.use_ramp = use_ramp
self.ramp_color = ramp_color
self.ramp_material = ramp_material or self.zone_material
if ramp_scale is not None:
self.ramp_scale = get_random_xyz_transform(ramp_scale)
else:
self.ramp_scale = None
self.ramp_base_height_range = ramp_base_height_range
self.ramp_physics_info = {}
if ramp_has_friction:
self.ramp_physics_info.update({
'mass': 1000,
'static_friction': 0.1,
'dynamic_friction': 0.1,
'bounciness': 0.1})
self.probe_has_friction = probe_has_friction
## object generation properties
self.target_scale_range = target_scale_range
self.target_color = target_color
self.target_rotation_range = target_rotation_range
self.target_material = target_material
self.target_motion_thresh = target_motion_thresh
self.probe_color = probe_color
self.probe_scale_range = probe_scale_range
self.probe_rotation_range = probe_rotation_range
self.probe_mass_range = get_range(probe_mass_range)
self.probe_material = probe_material
self.probe_horizontal = probe_horizontal
self.match_probe_and_target_color = match_probe_and_target_color
self.middle_scale_range = target_scale_range
## Scenario config properties
self.collision_axis_length = collision_axis_length
self.force_scale_range = force_scale_range
self.force_angle_range = force_angle_range
self.force_offset = get_random_xyz_transform(force_offset)
self.force_offset_jitter = force_offset_jitter
self.force_wait_range = force_wait or [0,0]
## camera properties
self.camera_radius_range = get_range(camera_radius)
self.camera_min_angle = camera_min_angle
self.camera_max_angle = camera_max_angle
self.camera_left_right_reflections = camera_left_right_reflections
self.camera_min_height = camera_min_height
self.camera_max_height = camera_max_height
self.camera_aim = {"x": 0., "y": 0.5, "z": 0.} # fixed aim
## distractors and occluders
self.num_distractors = num_distractors
self.distractor_aspect_ratio = get_range(distractor_aspect_ratio)
self.distractor_types = self.get_types(
distractor_types,
libraries=self.model_libraries,
categories=distractor_categories,
flex_only=self.flex_only,
aspect_ratio_min=self.distractor_aspect_ratio[0],
aspect_ratio_max=self.distractor_aspect_ratio[1]
)
self.num_occluders = num_occluders
self.occlusion_scale = occlusion_scale
self.occluder_aspect_ratio = get_range(occluder_aspect_ratio)
self.occluder_types = self.get_types(
occluder_types,
libraries=self.model_libraries,
categories=occluder_categories,
flex_only=self.flex_only,
aspect_ratio_min=self.occluder_aspect_ratio[0],
aspect_ratio_max=self.occluder_aspect_ratio[1],
)
## target can move
self._fixed_target = False
self.use_test_mode_colors = use_test_mode_colors
def get_types(self,
objlist,
libraries=["models_flex.json"],
categories=None,
flex_only=True,
aspect_ratio_min=None,
aspect_ratio_max=None,
size_min=None,
size_max=None):
if isinstance(objlist, str):
objlist = [objlist]
recs = []
for lib in libraries:
recs.extend(MODEL_LIBRARIES[lib].records)
tlist = [r for r in recs if r.name in objlist]
if categories is not None:
if not isinstance(categories, list):
categories = categories.split(',')
tlist = [r for r in tlist if r.wcategory in categories]
if flex_only:
tlist = [r for r in tlist if r.flex == True]
if aspect_ratio_min:
tlist = [r for r in tlist if self.aspect_ratios(r)[0] > aspect_ratio_min]
if aspect_ratio_max:
tlist = [r for r in tlist if self.aspect_ratios(r)[1] < aspect_ratio_max]
if size_min or size_max:
if size_min is None:
size_min = 0.0
if size_max is None:
size_max = 1000.0
rlist = []
for r in tlist:
dims = self.get_record_dimensions(r)
dmin, dmax = [min(dims), max(dims)]
if (dmax > size_min) and (dmin < size_max):
rlist.append(r)
tlist = [r for r in rlist]
assert len(tlist), "You're trying to choose objects from an empty list"
return tlist
def set_probe_types(self, olist):
tlist = self.get_types(olist, flex_only=self.flex_only)
self._probe_types = tlist
def set_target_types(self, olist):
tlist = self.get_types(olist, flex_only=self.flex_only)
self._target_types = tlist
def set_zone_types(self, olist):
tlist = self.get_types(olist, flex_only=self.flex_only)
self._zone_types = tlist
def clear_static_data(self) -> None:
super().clear_static_data()
## randomize colors
if self._random_zone_color:
self.zone_color = None
if self._random_target_color:
self.target_color = None
if self._random_probe_color:
self.probe_color = None
## scenario-specific metadata: object types and drop position
self.target_type = None
self.target_rotation = None
self.target_position = None
self.target_delta_position = None
self.replace_target = False
self.probe_type = None
self.probe_mass = None
self.push_force = None
self.push_position = None
self.force_wait = None
@staticmethod
def get_controller_label_funcs(classname = 'Dominoes'):
funcs = super(Dominoes, Dominoes).get_controller_label_funcs(classname)
funcs += get_all_label_funcs()
def room(f):
return str(np.array(f['static']['room']))
def trial_seed(f):
return int(np.array(f['static']['trial_seed']))
def num_distractors(f):
try:
return int(len(f['static']['distractors']))
except KeyError:
return int(0)
def num_occluders(f):
try:
return int(len(f['static']['occluders']))
except KeyError:
return int(0)
def push_time(f):
try:
return int(np.array(f['static']['push_time']))
except KeyError:
return int(0)
funcs += [room, trial_seed, push_time, num_distractors, num_occluders]
return funcs
def get_field_of_view(self) -> float:
return 55
def get_scene_initialization_commands(self) -> List[dict]:
if self.room == 'box':
add_scene = self.get_add_scene(scene_name="box_room_2018")
elif self.room == 'tdw':
add_scene = self.get_add_scene(scene_name="tdw_room")
elif self.room == 'house':
add_scene = self.get_add_scene(scene_name='archviz_house')
return [add_scene,
{"$type": "set_aperture",
"aperture": 8.0},
{"$type": "set_post_exposure",
"post_exposure": 0.4},
{"$type": "set_ambient_occlusion_intensity",
"intensity": 0.175},
{"$type": "set_ambient_occlusion_thickness_modifier",
"thickness": 3.5}]
def get_trial_initialization_commands(self) -> List[dict]:
commands = []
# randomization across trials
if not(self.randomize):
self.trial_seed = (self.MAX_TRIALS * self.seed) + self._trial_num
random.seed(self.trial_seed)
else:
self.trial_seed = -1 # not used
# Choose and place the target zone.
commands.extend(self._place_target_zone())
# Choose and place a target object.
commands.extend(self._place_target_object())
# Set the probe color
if self.probe_color is None:
self.probe_color = self.target_color if (self.monochrome and self.match_probe_and_target_color) else None
# Choose, place, and push a probe object.
commands.extend(self._place_and_push_probe_object())
# Build the intermediate structure that captures some aspect of "intuitive physics."
commands.extend(self._build_intermediate_structure())
# Teleport the avatar to a reasonable position based on the drop height.
a_pos = self.get_random_avatar_position(radius_min=self.camera_radius_range[0],
radius_max=self.camera_radius_range[1],
angle_min=self.camera_min_angle,
angle_max=self.camera_max_angle,
y_min=self.camera_min_height,
y_max=self.camera_max_height,
center=TDWUtils.VECTOR3_ZERO,
reflections=self.camera_left_right_reflections)
# Set the camera parameters
self._set_avatar_attributes(a_pos)
commands.extend([
{"$type": "teleport_avatar_to",
"position": self.camera_position},
{"$type": "look_at_position",
"position": self.camera_aim},
{"$type": "set_focus_distance",
"focus_distance": TDWUtils.get_distance(a_pos, self.camera_aim)}
])
# Place distractor objects in the background
commands.extend(self._place_background_distractors())
# Place occluder objects in the background
commands.extend(self._place_occluders())
# test mode colors
if self.use_test_mode_colors:
self._set_test_mode_colors(commands)
return commands
def get_per_frame_commands(self, resp: List[bytes], frame: int) -> List[dict]:
if (self.force_wait != 0) and frame == self.force_wait:
if self.PRINT:
print("applied %s at time step %d" % (self.push_cmd, frame))
return [self.push_cmd]
else:
print(frame)
return []
def _write_static_data(self, static_group: h5py.Group) -> None:
super()._write_static_data(static_group)
# randomization
try:
static_group.create_dataset("room", data=self.room)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("seed", data=self.seed)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("randomize", data=self.randomize)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("trial_seed", data=self.trial_seed)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("trial_num", data=self._trial_num)
except (AttributeError,TypeError):
pass
## which objects are the zone, target, and probe
try:
static_group.create_dataset("zone_id", data=self.zone_id)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("target_id", data=self.target_id)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("probe_id", data=self.probe_id)
except (AttributeError,TypeError):
pass
if self.use_ramp:
static_group.create_dataset("ramp_id", data=self.ramp_id)
if self.ramp_base_height > 0.0:
static_group.create_dataset("ramp_base_height", data=float(self.ramp_base_height))
static_group.create_dataset("ramp_base_id", data=self.ramp_base_id)
## color and scales of primitive objects
try:
static_group.create_dataset("target_type", data=self.target_type)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("target_rotation", data=xyz_to_arr(self.target_rotation))
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("probe_type", data=self.probe_type)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("probe_mass", data=self.probe_mass)
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("push_force", data=xyz_to_arr(self.push_force))
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("push_position", data=xyz_to_arr(self.push_position))
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("push_time", data=int(self.force_wait))
except (AttributeError,TypeError):
pass
# distractors and occluders
try:
static_group.create_dataset("distractors", data=[r.name.encode('utf8') for r in self.distractors.values()])
except (AttributeError,TypeError):
pass
try:
static_group.create_dataset("occluders", data=[r.name.encode('utf8') for r in self.occluders.values()])
except (AttributeError,TypeError):
pass
def _write_frame(self,
frames_grp: h5py.Group,
resp: List[bytes],
frame_num: int) -> \
Tuple[h5py.Group, h5py.Group, dict, bool]:
frame, objs, tr, sleeping = super()._write_frame(frames_grp=frames_grp,
resp=resp,
frame_num=frame_num)
# If this is a stable structure, disregard whether anything is actually moving.
return frame, objs, tr, sleeping and not (frame_num < 150)
def _update_target_position(self, resp: List[bytes], frame_num: int) -> None:
if frame_num <= 0:
self.target_delta_position = xyz_to_arr(TDWUtils.VECTOR3_ZERO)
elif 'tran' in [OutputData.get_data_type_id(r) for r in resp[:-1]]:
target_position_new = self.get_object_position(self.target_id, resp) or self.target_position
try:
self.target_delta_position += (target_position_new - xyz_to_arr(self.target_position))
self.target_position = arr_to_xyz(target_position_new)
except TypeError:
print("Failed to get a new object position, %s" % target_position_new)
def _write_frame_labels(self,
frame_grp: h5py.Group,
resp: List[bytes],
frame_num: int,
sleeping: bool) -> Tuple[h5py.Group, List[bytes], int, bool]:
labels, resp, frame_num, done = super()._write_frame_labels(frame_grp, resp, frame_num, sleeping)
# Whether this trial has a target or zone to track
has_target = (not self.remove_target) or self.replace_target
has_zone = not self.remove_zone
labels.create_dataset("has_target", data=has_target)
labels.create_dataset("has_zone", data=has_zone)
if not (has_target or has_zone):
return labels, resp, frame_num, done
# Whether target moved from its initial position, and how much
if has_target:
self._update_target_position(resp, frame_num)
has_moved = np.sqrt((self.target_delta_position**2).sum()) > self.target_motion_thresh
labels.create_dataset("target_delta_position", data=self.target_delta_position)
labels.create_dataset("target_has_moved", data=has_moved)
# Whether target has fallen to the ground
c_points, c_normals = self.get_object_environment_collision(
self.target_id, resp)
if frame_num <= 0:
self.target_on_ground = False
self.target_ground_contacts = c_points
elif len(c_points) == 0:
self.target_on_ground = False
elif len(c_points) != len(self.target_ground_contacts):
self.target_on_ground = True
elif any([np.sqrt(((c_points[i] - self.target_ground_contacts[i])**2).sum()) > self.target_motion_thresh \
for i in range(min(len(c_points), len(self.target_ground_contacts)))]):
self.target_on_ground = True
labels.create_dataset("target_on_ground", data=self.target_on_ground)
# Whether target has hit the zone
if has_target and has_zone:
c_points, c_normals = self.get_object_target_collision(
self.target_id, self.zone_id, resp)
target_zone_contact = bool(len(c_points))
labels.create_dataset("target_contacting_zone", data=target_zone_contact)
return labels, resp, frame_num, done
def is_done(self, resp: List[bytes], frame: int) -> bool:
return frame > 300
def get_rotation(self, rot_range):
if rot_range is None:
return {"x": 0,
"y": random.uniform(0, 360),
"z": 0.}
else:
return get_random_xyz_transform(rot_range)
def get_y_rotation(self, rot_range):
if rot_range is None:
return self.get_rotation(rot_range)
else:
return {"x": 0.,
"y": random.uniform(*get_range(rot_range)),
"z": 0.}
def get_push_force(self, scale_range, angle_range, yforce = [0,0]):
#sample y force component
yforce = random.uniform(*yforce)
# rotate a unit vector initially pointing in positive-x direction
theta = np.radians(random.uniform(*get_range(angle_range)))
push = np.array([np.cos(theta), yforce, np.sin(theta)])
# scale it
push *= random.uniform(*get_range(scale_range))
# convert to xyz
return arr_to_xyz(push)
def _get_push_cmd(self, o_id, position_or_particle=None):
if position_or_particle is None:
cmd = {
"$type": "apply_force_to_object",
"force": self.push_force,
"id": o_id}
else:
cmd = {
"$type": "apply_force_at_position",
"force": self.push_force,
"position": position_or_particle,
"id": o_id}
return cmd
def _get_zone_location(self, scale):
return {
"x": 0.5 * self.collision_axis_length + scale["x"] + 0.1,
"y": 0.0 if not self.remove_zone else 10.0,
"z": 0.0 if not self.remove_zone else 10.0
}
def _place_target_zone(self) -> List[dict]:
# create a target zone (usually flat, with same texture as room)
record, data = self.random_primitive(self._zone_types,
scale=self.zone_scale_range,
color=self.zone_color,
add_data=False
)
o_id, scale, rgb = [data[k] for k in ["id", "scale", "color"]]
self.zone = record
self.zone_type = data["name"]
self.zone_color = rgb
self.zone_id = o_id
self.zone_scale = scale
if any((s <= 0 for s in scale.values())):
self.remove_zone = True
self.scales = self.scales[:-1]
self.colors = self.colors[:-1]
self.model_names = self.model_names[:-1]
# place it just beyond the target object with an effectively immovable mass and high friction
commands = []
commands.extend(
self.add_primitive(
record=record,
position=(self.zone_location or self._get_zone_location(scale)),
rotation=TDWUtils.VECTOR3_ZERO,
scale=scale,
material=self.zone_material,
color=rgb,
mass=500,
scale_mass=False,
dynamic_friction=self.zone_friction,
static_friction=(10.0 * self.zone_friction),
bounciness=0,
o_id=o_id,
add_data=(not self.remove_zone),
make_kinematic=True # zone shouldn't move
))
# get rid of it if not using a target object
if self.remove_zone:
commands.append(
{"$type": self._get_destroy_object_command_name(o_id),
"id": int(o_id)})
self.object_ids = self.object_ids[:-1]
return commands
@staticmethod
def rescale_record_to_size(record, size_range=1.0, randomize=False):
dims = Dominoes.get_record_dimensions(record)
dmin, dmax = [min(dims), max(dims)]
scale = 1.0
if randomize:
smin = random.uniform(*get_range(size_range))
smax = random.uniform(smin, get_range(size_range)[1])
else:
smin, smax = get_range(size_range)
if dmax < smin:
scale = smin / dmax
elif dmax > smax:
scale = smax / dmax
print("%s rescaled by %.2f" % (record.name, scale))
print("dims", dims, "dminmax", dmin, dmax)
print("bounds now", [d * scale for d in dims])
return arr_to_xyz(np.array([scale] * 3))
def _place_target_object(self, size_range=None) -> List[dict]:
"""
Place a primitive object at one end of the collision axis.
"""
# create a target object
record, data = self.random_primitive(self._target_types,
scale=self.target_scale_range,
color=self.target_color,
add_data=False
)
o_id, scale, rgb = [data[k] for k in ["id", "scale", "color"]]
if size_range is not None:
scale = self.rescale_record_to_size(record, size_range)
print("rescaled target", scale)
self.target = record
self.target_type = data["name"]
self.target_color = rgb
self.target_scale = self.middle_scale = scale
self.target_id = o_id
if any((s <= 0 for s in scale.values())):
self.remove_target = True
# Where to put the target
if self.target_rotation is None:
self.target_rotation = self.get_rotation(self.target_rotation_range)
if self.target_position is None:
self.target_position = {
"x": 0.5 * self.collision_axis_length,
"y": 0. if not self.remove_target else 10.0,
"z": 0. if not self.remove_target else 10.0
}
# Commands for adding hte object
commands = []
commands.extend(
self.add_primitive(
record=record,
position=self.target_position,
rotation=self.target_rotation,
scale=scale,
material=self.target_material,
color=rgb,
mass=2.0,
scale_mass=False,
dynamic_friction=0.5,
static_friction=0.5,
bounciness=0.0,
o_id=o_id,
add_data=(not self.remove_target),
make_kinematic=True if self._fixed_target else False,
apply_texture=True if self.target.name in PRIMITIVE_NAMES else False
))
# If this scene won't have a target
if self.remove_target:
commands.append(
{"$type": self._get_destroy_object_command_name(o_id),
"id": int(o_id)})
self.object_ids = self.object_ids[:-1]
return commands
def _place_and_push_probe_object(self, size_range=None) -> List[dict]:
"""
Place a probe object at the other end of the collision axis, then apply a force to push it.
"""
exclude = not (self.monochrome and self.match_probe_and_target_color)
record, data = self.random_primitive(self._probe_types,
scale=self.probe_scale_range,
color=self.probe_color,
exclude_color=(self.target_color if exclude else None),
exclude_range=0.25,
add_data=False)
o_id, scale, rgb = [data[k] for k in ["id", "scale", "color"]]
if size_range is not None:
scale = self.rescale_record_to_size(record, size_range)
print("rescaled probe", scale)
self.probe = record
self.probe_type = data["name"]
self.probe_scale = scale
self.probe_id = o_id
# Add the object with random physics values
commands = []
### better sampling of random physics values
self.probe_mass = random.uniform(self.probe_mass_range[0], self.probe_mass_range[1])
self.probe_initial_position = {"x": -0.5*self.collision_axis_length, "y": 0., "z": 0.}
rot = self.get_y_rotation(self.probe_rotation_range)
if self.probe_horizontal:
rot["z"] = 90
self.probe_initial_position["z"] += -np.sin(np.radians(rot["y"])) * scale["y"] * 0.5
self.probe_initial_position["x"] += np.cos(np.radians(rot["y"])) * scale["y"] * 0.5
if self.use_ramp:
commands.extend(self._place_ramp_under_probe())
if self.probe_has_friction:
probe_physics_info = {'dynamic_friction': 0.1, 'static_friction': 0.1, 'bounciness': 0.6}
else:
probe_physics_info = {'dynamic_friction': 0.01, 'static_friction': 0.01, 'bounciness': 0}
commands.extend(
self.add_primitive(
record=record,
position=self.probe_initial_position,
rotation=rot,
scale=scale,
material=self.probe_material,
color=rgb,
mass=self.probe_mass,
scale_mass=False,
o_id=o_id,
add_data=True,
make_kinematic=False,
apply_texture=True if self.probe.name in PRIMITIVE_NAMES else False,
**probe_physics_info
))
# Set its collision mode
commands.extend([
{"$type": "set_object_drag",
"id": o_id,
"drag": 0, "angular_drag": 0}])
# Apply a force to the probe object
self.push_force = self.get_push_force(
scale_range=self.probe_mass * np.array(self.force_scale_range),
angle_range=self.force_angle_range)
self.push_force = self.rotate_vector_parallel_to_floor(
self.push_force, -rot['y'], degrees=True)
self.push_position = self.probe_initial_position
if self.PRINT:
print("PROBE MASS", self.probe_mass)
print("PUSH FORCE", self.push_force)
if self.use_ramp:
self.push_cmd = self._get_push_cmd(o_id, None)
else:
self.push_position = {
k:v+self.force_offset[k]*self.rotate_vector_parallel_to_floor(
self.probe_scale, rot['y'])[k]
for k,v in self.push_position.items()}
self.push_position = {
k:v+random.uniform(-self.force_offset_jitter, self.force_offset_jitter)
for k,v in self.push_position.items()}
self.push_cmd = self._get_push_cmd(o_id, self.push_position)
# decide when to apply the force
self.force_wait = int(random.uniform(*get_range(self.force_wait_range)))
if self.PRINT:
print("force wait", self.force_wait)
if self.force_wait == 0:
commands.append(self.push_cmd)
return commands
def _place_ramp_under_probe(self) -> List[dict]:
cmds = []
# ramp params
self.ramp = random.choice(self.DEFAULT_RAMPS)
rgb = self.ramp_color or self.random_color(exclude=self.target_color)
ramp_pos = copy.deepcopy(self.probe_initial_position)
ramp_pos['y'] = self.zone_scale['y'] if not self.remove_zone else 0.0 # don't intersect w zone
ramp_rot = self.get_y_rotation([180,180])
ramp_id = self._get_next_object_id()
self.ramp_pos = ramp_pos
self.ramp_rot = ramp_rot
self.ramp_id = ramp_id
# figure out scale
r_len, r_height, r_dep = self.get_record_dimensions(self.ramp)
scale_x = (0.75 * self.collision_axis_length) / r_len
if self.ramp_scale is None:
self.ramp_scale = arr_to_xyz([scale_x, self.scale_to(r_height, 1.5), 0.75 * scale_x])
self.ramp_end_x = self.ramp_pos['x'] + self.ramp_scale['x'] * r_len * 0.5
# optionally add base
cmds.extend(self._add_ramp_base_to_ramp(color=rgb))
# add the ramp
cmds.extend(
self.add_ramp(
record = self.ramp,
position=self.ramp_pos,
rotation=self.ramp_rot,
scale=self.ramp_scale,
material=self.ramp_material,
color=rgb,
o_id=self.ramp_id,
add_data=True,
**self.ramp_physics_info
))
# need to adjust probe height as a result of ramp placement
self.probe_initial_position['x'] -= 0.5 * self.ramp_scale['x'] * r_len - 0.15
self.probe_initial_position['y'] = self.ramp_scale['y'] * r_height + self.ramp_base_height + self.probe_initial_position['y']
return cmds
def _add_ramp_base_to_ramp(self, color=None) -> None:
cmds = []
if color is None:
color = self.random_color(exclude=self.target_color)
self.ramp_base_height = random.uniform(*get_range(self.ramp_base_height_range))
if self.ramp_base_height < 0.01:
self.ramp_base_scale = copy.deepcopy(self.ramp_scale)
return []
self.ramp_base = self.CUBE
r_len, r_height, r_dep = self.get_record_dimensions(self.ramp)
self.ramp_base_scale = arr_to_xyz([
float(self.ramp_scale['x'] * r_len),
float(self.ramp_base_height),
float(self.ramp_scale['z'] * r_dep)])
self.ramp_base_id = self._get_next_object_id()
# add the base
ramp_base_physics_info = {
'mass': 500,
'dynamic_friction': 0.01,
'static_friction': 0.01,
'bounciness': 0}
if self.ramp_physics_info.get('dynamic_friction', None) is not None:
ramp_base_physics_info.update(self.ramp_physics_info)
cmds.extend(
RigidbodiesDataset.add_physics_object(
self,
record=self.ramp_base,
position=copy.deepcopy(self.ramp_pos),
rotation=TDWUtils.VECTOR3_ZERO,
o_id=self.ramp_base_id,
add_data=True,
**ramp_base_physics_info))
# scale it, color it, fix it
cmds.extend(
self.get_object_material_commands(
self.ramp_base, self.ramp_base_id, self.get_material_name(self.ramp_material)))
cmds.extend([
{"$type": "scale_object",
"scale_factor": self.ramp_base_scale,
"id": self.ramp_base_id},
{"$type": "set_color",
"color": {"r": color[0], "g": color[1], "b": color[2], "a": 1.},
"id": self.ramp_base_id},
{"$type": "set_object_collision_detection_mode",
"mode": "continuous_speculative",
"id": self.ramp_base_id},
{"$type": "set_kinematic_state",
"id": self.ramp_base_id,
"is_kinematic": True,
"use_gravity": True}])
# add data
self.model_names.append(self.ramp_base.name)
self.scales.append(self.ramp_base_scale)
self.colors = np.concatenate([self.colors, np.array(color).reshape((1,3))], axis=0)
# raise the ramp
self.ramp_pos['y'] += self.ramp_base_scale['y']
return cmds
def _replace_target_with_object(self, record, data):
self.target = record
self.target_type = data["name"]
self.target_color = data["color"]
self.target_scale = data["scale"]
self.target_id = data["id"]
self.replace_target = True
def _set_test_mode_colors(self, commands) -> None:
tcolor = {'r': 1.0, 'g': 0.0, 'b': 0.0, 'a': 1.0}
zcolor = {'r': 1.0, 'g': 1.0, 'b': 0.0, 'a': 1.0}
exclude = {'r': 1.0, 'g': 0.0, 'b': 0.0}
exclude_range = 0.25
for c in commands:
if "set_color" in c.values():
o_id = c['id']
if o_id == self.target_id:
c['color'] = tcolor
elif o_id == self.zone_id:
c['color'] = zcolor
elif any((np.abs(exclude[k] - c['color'][k]) < exclude_range for k in exclude.keys())):
rgb = self.random_color_from_rng(exclude=[exclude[k] for k in ['r','g','b']],
exclude_range=exclude_range,
seed=self.trial_seed)
c['color'] = {'r': rgb[0], 'g': rgb[1], 'b': rgb[2], 'a': 1.0}
def _build_intermediate_structure(self) -> List[dict]:
"""
Abstract method for building a physically interesting intermediate structure between the probe and the target.
"""
commands = []
return commands
def _set_distractor_objects(self) -> None:
self.distractors = OrderedDict()
for i in range(self.num_distractors):
record, data = self.random_model(self.distractor_types, add_data=True)
self.distractors[data['id']] = record
def _set_occluder_objects(self) -> None:
self.occluders = OrderedDict()
for i in range(self.num_occluders):
record, data = self.random_model(self.occluder_types, add_data=True)
self.occluders[data['id']] = record
@staticmethod
def get_record_dimensions(record: ModelRecord) -> List[float]:
length = np.abs(record.bounds['left']['x'] - record.bounds['right']['x'])
height = np.abs(record.bounds['top']['y'] - record.bounds['bottom']['y'])
depth = np.abs(record.bounds['front']['z'] - record.bounds['back']['z'])
return (length, height, depth)
@staticmethod
def aspect_ratios(record: ModelRecord) -> List[float]:
l,h,d = Dominoes.get_record_dimensions(record)
a1 = float(h) / l
a2 = float(h) / d
min_ar = min(a1, a2)
max_ar = max(a1, a2)
return (min_ar, max_ar)
@staticmethod
def scale_to(current_scale : float, target_scale : float) -> float:
return target_scale / current_scale
def _set_avatar_attributes(self, avatar_position) -> None:
a_pos = avatar_position
## camera position and ray
self.camera_position = a_pos
self.camera_rotation = np.degrees(np.arctan2(a_pos['z'], a_pos['x']))
dist = TDWUtils.get_distance(a_pos, self.camera_aim)
self.camera_altitude = np.degrees(np.arcsin((a_pos['y'] - self.camera_aim['y'])/dist))
camera_ray = np.array([self.camera_position['x'], 0., self.camera_position['z']])
self.camera_radius = np.linalg.norm(camera_ray)
camera_ray /= np.linalg.norm(camera_ray)
self.camera_ray = arr_to_xyz(camera_ray)
## unit vector that points opposite the camera
opposite = np.array([-self.camera_position['x'], 0., -self.camera_position['z']])
opposite /= np.linalg.norm(opposite)
opposite = arr_to_xyz(opposite)
self.opposite_unit_vector = opposite
if self.PRINT:
print("camera distance", self.camera_radius)
print("camera ray", self.camera_ray)
print("camera angle", self.camera_rotation)
print("camera altitude", self.camera_altitude)
print("camera position", self.camera_position)
def _set_occlusion_attributes(self) -> None:
self.occluder_angular_spacing = 10
self.occlusion_distance_fraction = [0.6, 0.8]
self.occluder_rotation_jitter = 30.
self.occluder_min_z = self.middle_scale['z'] + 0.25
self.occluder_min_size = 0.25
self.occluder_max_size = 1.5
self.rescale_occluder_height = True
def _get_occluder_position_pose_scale(self, record, unit_position_vector):
"""
Given a unit vector direction in world coordinates, adjust in a Controller-specific
manner to avoid interactions with the physically relevant objects.
"""
o_len, o_height, o_dep = self.get_record_dimensions(record)
## get the occluder pose
ang = self.camera_rotation
rot = self.get_y_rotation(
[ang - self.occluder_rotation_jitter, ang + self.occluder_rotation_jitter])
bounds = {'x': o_len, 'y': o_height, 'z': o_dep}
bounds_rot = self.rotate_vector_parallel_to_floor(bounds, rot['y'])
bounds = {k:np.maximum(np.abs(v), bounds[k]) for k,v in bounds_rot.items()}
# make sure it's in a reasonable size range
size = max(list(bounds.values()))
size = np.minimum(np.maximum(size, self.occluder_min_size), self.occluder_max_size)
scale = size / max(list(bounds.values()))
bounds = self.scale_vector(bounds, scale)
## choose the initial position of the object, before adjustment
occ_distance = random.uniform(*get_range(self.occlusion_distance_fraction))
pos = self.scale_vector(
unit_position_vector, occ_distance * self.camera_radius)
## reposition and rescale it so it's not too close to the "physical dynamics" axis (z)
if np.abs(pos['z']) < (self.occluder_min_z + self.occluder_min_size):
pos.update({'z' : np.sign(pos['z']) * (self.occluder_min_z + self.occluder_min_size)})
reach_z = np.abs(pos['z']) - 0.5 * bounds['z']
if reach_z < self.occluder_min_z: # scale down
scale_z = (np.abs(pos['z']) - self.occluder_min_z) / (0.5 * bounds['z'])
else:
scale_z = 1.0
bounds = self.scale_vector(bounds, scale_z)
scale *= scale_z
## reposition and rescale it so it's not too close to other occluders
if self.num_occluders > 1 and len(self.occluder_positions):
last_pos_x = self.occluder_positions[-1]['x']
last_bounds_x = self.occluder_dimensions[-1]['x']
if (pos['x'] + self.occluder_min_size) > (last_pos_x - 0.5 * last_bounds_x):
pos.update({'x': (last_pos_x - 0.5 * last_bounds_x) - self.occluder_min_size})
reach_x = pos['x'] + 0.5 * bounds['x']
if reach_x > (last_pos_x - 0.5 * last_bounds_x): # scale down
scale_x = (last_pos_x - 0.5 * last_bounds_x - pos['x']) / (0.5 * bounds['x'])
else:
scale_x = 1.0
bounds = self.scale_vector(bounds, scale_x)
scale *= scale_x
# do some trigonometry to figure out the scale of the occluder
if self.rescale_occluder_height:
occ_dist = np.sqrt(pos['x']**2 + pos['z']**2)
occ_target_height = self.camera_aim['y'] + occ_dist * np.tan(np.radians(self.camera_altitude))
occ_target_height *= self.occlusion_scale
occ_target_height = np.minimum(occ_target_height, self.occluder_max_size)
scale_y = occ_target_height / bounds['y']
scale_y = np.minimum(
scale_y, (np.abs(pos['z']) - self.occluder_min_z) / (0.5 * bounds['z']))
bounds = self.scale_vector(bounds, scale_y)
scale *= scale_y
scale = arr_to_xyz([scale] * 3)
self.occluder_positions.append(pos)
self.occluder_dimensions.append(bounds)
return (pos, rot, scale)
def _set_distractor_attributes(self) -> None:
self.distractor_angular_spacing = 15
self.distractor_distance_fraction = [0.4,1.0]
self.distractor_rotation_jitter = 30
self.distractor_min_z = self.middle_scale['z'] + 0.25
self.distractor_min_size = 0.5
self.distractor_max_size = 1.5
def _get_distractor_position_pose_scale(self, record, unit_position_vector):
d_len, d_height, d_dep = self.get_record_dimensions(record)
## get distractor pose and initial bounds
ang = 0 if (self.camera_rotation > 0) else 180
rot = self.get_y_rotation(
[ang - self.distractor_rotation_jitter, ang + self.distractor_rotation_jitter])
bounds = {'x': d_len, 'y': d_height, 'z': d_dep}
bounds_rot = self.rotate_vector_parallel_to_floor(bounds, rot['y'])
bounds = {k:np.maximum(np.abs(v), bounds[k]) for k,v in bounds_rot.items()}
## make sure it's in a reasonable size range
size = max(list(bounds.values()))
size = np.minimum(np.maximum(size, self.distractor_min_size), self.distractor_max_size)
scale = size / max(list(bounds.values()))
bounds = self.scale_vector(bounds, scale)
## choose the initial position of the object
distract_distance = random.uniform(*get_range(self.distractor_distance_fraction))
pos = self.scale_vector(
unit_position_vector, distract_distance * self.camera_radius)
## reposition and rescale it away from the "physical dynamics axis"
if np.abs(pos['z']) < (self.distractor_min_z + self.distractor_min_size):
pos.update({'z':
|
np.sign(pos['z'])
|
numpy.sign
|
# -*- coding: utf-8 -*-
"""
This module is part of the spafe library and has the purpose of of computing the following spectral stats:
- meanfreq : mean frequency (in kHz)
- sd : standard deviation of frequency
- median : median frequency (in kHz)
- Q25 : first quantile (in kHz)
- Q75 : third quantile (in kHz)
- IQR : interquantile range (in kHz)
- skew : skewness (see note in specprop description)
- kurt : kurtosis (see note in specprop description)
- sp.ent : spectral entropy
- sfm : spectral flatness
- mode : mode frequency
- centroid : frequency centroid (see specprop)
- peakf : peak frequency (frequency with highest energy)
- meanfun : average of fundamental frequency measured across acoustic signal
- minfun : minimum fundamental frequency measured across acoustic signal
- maxfun : maximum fundamental frequency measured across acoustic signal
- meandom : average of dominant frequency measured across acoustic signal
- mindom : minimum of dominant frequency measured across acoustic signal
- maxdom : maximum of dominant frequency measured across acoustic signal
- dfrange : range of dominant frequency measured across acoustic signal
- modindx : modulation index. Calculated as the accumulated absolute difference
between adjacent measurements of fundamental frequencies divided
by the frequency range
- label : male or female
Todo:
* For module TODOs
* You have to also use ``sphinx.ext.todo`` extension
Reference:
http://ijeee.iust.ac.ir/article-1-1074-en.pdf
"""
import scipy
import numpy as np
from ..utils.spectral import stft, rfft
from ..frequencies.dominant_frequencies import get_dominant_frequencies
from ..frequencies.fundamental_frequencies import FundamentalFrequenciesExtractor
def compute_fund_freqs(sig, fs):
"""
compute fundamental frequencies.
Args:
centroid (float) : spectral centroid.
spectrum (array) : spectrum array.
Returns:
(float) spectral spread.
"""
# fundamental frequencies calculations
fund_freqs_extractor = FundamentalFrequenciesExtractor(debug=False)
pitches, harmonic_rates, argmins, times = fund_freqs_extractor.main(
sig=sig, fs=fs)
return pitches
def compute_dom_freqs_and_mod_index(sig,
fs,
lower_cutoff=50,
upper_cutoff=3000,
nfft=512,
win_len=0.03,
win_hop=0.015,
win_type='hamming',
debug=False):
"""
compute dominant frequencies and modulation index.
Args:
sig (array) : spectral centroid.
fs (int) : spectrum array.
Returns:
(float) spectral spread.
"""
# dominant frequencies calculations
dom_freqs = get_dominant_frequencies(sig=sig,
fs=fs,
lower_cutoff=50,
upper_cutoff=upper_cutoff,
nfft=nfft,
win_len=win_len,
win_hop=win_hop,
win_type=win_type,
debug=debug)
# modulation index calculation
changes = np.abs(dom_freqs[:-1] - dom_freqs[1:])
dfrange = dom_freqs.max() - dom_freqs.min()
if dom_freqs.min() == dom_freqs.max():
mod_index = 0
else:
mod_index = changes.mean() / dfrange
return dom_freqs, mod_index
def spectral_centroid(sig, fs):
"""
compute spectral centroid.
"""
# compute magnitude spectrum
magnitude_spectrum = np.fft.rfft(sig)
# compute positive frequencies
freqs = np.abs(np.fft.fftfreq(len(sig), 1.0 / fs)[:len(sig) // 2 + 1])
# return weighted mean
sc = np.sum(magnitude_spectrum * freqs) / np.sum(magnitude_spectrum)
return sc
def spectral_flatness(sig):
"""
compute spectral flatness.
"""
# compute magnitude spectrum
magnitude_spectrum = np.fft.rfft(sig)
# select half of the spectrum due to symetrie
magnitude_spectrum = magnitude_spectrum[:len(sig) // 2 + 1]
sf = scipy.stats.mstats.gmean(magnitude_spectrum) / np.mean(
magnitude_spectrum)
return sf
def spectral_rolloff(sig, fs, k=0.85):
# convert to frequency domain
magnitude_spectrum, _ = stft(sig=sig, fs=fs)
power_spectrum = np.abs(magnitude_spectrum)**2
tbins, fbins = np.shape(magnitude_spectrum)
# when do these blocks begin (time in seconds)?
tstamps = (np.arange(0, tbins - 1) * (tbins / float(fs)))
# compute the spectral sum
spectral_sum = np.sum(power_spectrum, axis=1)
# find frequency-bin indeces where the cummulative sum of all bins is higher
# than k-percent of the sum of all bins. Lowest index = Rolloff
sr = [
np.where(np.cumsum(power_spectrum[t, :]) >= k * spectral_sum[t])[0][0]
for t in range(tbins - 1)
]
sr = np.asarray(sr).astype(float)
# convert frequency-bin index to frequency in Hz
sr = (sr / fbins) * (fs / 2.0)
return sr, np.asarray(tstamps)
def spectral_flux(sig, fs):
# convert to frequency domain
magnitude_spectrum, _ = stft(sig=sig, fs=fs)
tbins, fbins = np.shape(magnitude_spectrum)
# when do these blocks begin (time in seconds)?
tstamps = (np.arange(0, tbins - 1) * (tbins / float(fs)))
sf = np.sqrt(np.sum(np.diff(np.abs(magnitude_spectrum))**2,
axis=1)) / fbins
return sf[1:], np.asarray(tstamps)
def spectral_spread(centroid, spectrum, fs):
"""
Compute the spectral spread (basically a variance of the spectrum around the spectral centroid)
Args:
centroid (float) : spectral centroid.
spectrum (array) : spectrum array.
Returns:
(float) spectral spread.
"""
bin_count, numerator, denominator = 0, 0, 0
for bin_i in spectrum:
# Compute center frequency
f = ((fs / 2.0) / len(spectrum)) * bin_count
numerator = numerator + (((f - centroid)**2) * abs(bin_i))
denominator = denominator + abs(bin_i)
bin_count = bin_count + 1
return np.sqrt((numerator * 1.0) / denominator)
def zero_crossing_rate(sig, fs, block_length=256):
# how many blocks have to be processed?
num_blocks = int(np.ceil(len(sig) / block_length))
# when do these blocks begin (time in seconds)?
timestamps = (np.arange(0, num_blocks - 1) * (block_length / float(fs)))
zcr = []
for i in range(0, num_blocks - 1):
start = i * block_length
stop = np.min([(start + block_length - 1), len(sig)])
zc = 0.5 * np.mean(np.abs(np.diff(np.sign(sig[start:stop]))))
zcr.append(zc)
return np.asarray(zcr), np.asarray(timestamps)
def root_mean_square(sig, fs, block_length=256):
# how many blocks have to be processed?
num_blocks = int(np.ceil(len(sig) / block_length))
# when do these blocks begin (time in seconds)?
tstamps = (np.arange(0, num_blocks - 1) * (block_length / float(fs)))
rms = []
for i in range(0, num_blocks - 1):
start = i * block_length
stop = np.min([(start + block_length - 1), len(sig)])
# This is wrong but why? rms_seg = np.sqrt(np.mean(sig[start:stop]**2))
rms_seg = np.sqrt(np.mean(np.power(sig[start:stop], 2)))
rms.append(rms_seg)
return
|
np.asarray(rms)
|
numpy.asarray
|
# toy model for use on stream
# CLimate Analysis using Digital Estimations (CLAuDE)
import numpy as np
import matplotlib.pyplot as plt
import time, sys, pickle
import claude_low_level_library as low_level
import claude_top_level_library as top_level
from scipy.interpolate import interp2d, RectBivariateSpline
# from twitch import prime_sub
######## CONTROL ########
day = 60*60*24 # define length of day (used for calculating Coriolis as well) (s)
resolution = 3 # how many degrees between latitude and longitude gridpoints
planet_radius = 6.4E6 # define the planet's radius (m)
insolation = 1370 # TOA radiation from star (W m^-2)
gravity = 9.81 # define surface gravity for planet (m s^-2)
axial_tilt = 23.5/2 # tilt of rotational axis w.r.t. solar plane
year = 365*day # length of year (s)
pressure_levels = np.array([1000,950,900,800,700,600,500,400,350,300,250,200,150,100,75,50,25,10,5,2,1])
pressure_levels *= 100
nlevels = len(pressure_levels)
top = -1
dt_spinup = 60*137
dt_main = 60*3.5
spinup_length = 21*day
###
advection = True # if you want to include advection set this to be True
smoothing = True
smoothing_parameter_t = 1.0
smoothing_parameter_u = 0.9
smoothing_parameter_v = 0.9
smoothing_parameter_w = 0.2
smoothing_parameter_add = 0.6
save = False # save current state to file?
load = False # load initial state from file?
above = False
pole = 'n'
above_level = -1
plot = False # display plots of output?
diagnostic = False # display raw fields for diagnostic purposes
level_plots = False # display plots of output on vertical levels?
nplots = 3 # how many levels you want to see plots of (evenly distributed through column)
verbose = False
pole_lower_latitude_limit = -60
pole_higher_latitude_limit = -75
###########################
# define coordinate arrays
lat = np.arange(-90,91,resolution)
lon = np.arange(0,360,resolution)
nlat = len(lat)
nlon = len(lon)
lon_plot, lat_plot = np.meshgrid(lon, lat)
heights_plot, lat_z_plot = np.meshgrid(lat,pressure_levels[:top]/100)
# initialise arrays for various physical fields
temperature_world = np.zeros((nlat,nlon)) + 290
potential_temperature = np.zeros((nlat,nlon,nlevels))
u = np.zeros_like(potential_temperature)
v = np.zeros_like(potential_temperature)
w = np.zeros_like(potential_temperature)
atmosp_addition = np.zeros_like(potential_temperature)
##########################
# read temperature and density in from standard atmosphere
f = open("standard_atmosphere.txt", "r")
standard_temp = []
standard_pressure = []
for x in f:
h, t, r, p = x.split()
standard_temp.append(float(t))
standard_pressure.append(float(p))
f.close()
# density_profile = np.interp(x=heights/1E3,xp=standard_height,fp=standard_density)
temp_profile = np.interp(x=pressure_levels[::-1],xp=standard_pressure[::-1],fp=standard_temp[::-1])[::-1]
for k in range(nlevels):
potential_temperature[:,:,k] = temp_profile[k]
potential_temperature = low_level.t_to_theta(potential_temperature,pressure_levels)
geopotential = np.zeros_like(potential_temperature)
sigma = np.zeros_like(pressure_levels)
kappa = 287/1000
for i in range(len(sigma)):
sigma[i] = 1E3*(pressure_levels[i]/pressure_levels[0])**kappa
###########################
heat_capacity_earth = np.zeros_like(temperature_world) + 1E6
albedo_variance = 0.001
albedo = np.random.uniform(-albedo_variance,albedo_variance, (nlat, nlon)) + 0.2
specific_gas = 287
thermal_diffusivity_roc = 1.5E-6
# define planet size and various geometric constants
circumference = 2*np.pi*planet_radius
circle = np.pi*planet_radius**2
sphere = 4*np.pi*planet_radius**2
# define how far apart the gridpoints are: note that we use central difference derivatives, and so these distances are actually twice the distance between gridboxes
dy = circumference/nlat
dx = np.zeros(nlat)
coriolis = np.zeros(nlat) # also define the coriolis parameter here
angular_speed = 2*np.pi/day
for i in range(nlat):
dx[i] = dy*np.cos(lat[i]*np.pi/180)
coriolis[i] = angular_speed*np.sin(lat[i]*np.pi/180)
#################### SHOW TIME ####################
pole_low_index_S = np.where(lat > pole_lower_latitude_limit)[0][0]
pole_high_index_S = np.where(lat > pole_higher_latitude_limit)[0][0]
# initialise grid
polar_grid_resolution = dx[-pole_low_index_S]
size_of_grid = planet_radius*np.cos(lat[-pole_low_index_S]*np.pi/180)
### south pole ###
grid_x_values_S = np.arange(-size_of_grid,size_of_grid,polar_grid_resolution)
grid_y_values_S = np.arange(-size_of_grid,size_of_grid,polar_grid_resolution)
grid_xx_S,grid_yy_S =
|
np.meshgrid(grid_x_values_S,grid_y_values_S)
|
numpy.meshgrid
|
#Author: <NAME> (c) 2018
from collections import Counter
import dill
import glob
import gzip
import igraph as ig
import itertools
import leidenalg
#import magic
import matplotlib
from matplotlib import pyplot
import numba
import numpy
import os
import pickle
from plumbum import local
import random
import re
import scipy
from scipy.cluster import hierarchy
import scipy.sparse as sps
from scipy.spatial import distance
import scipy.stats as stats
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn import neighbors
from sklearn import metrics
import sys
import umap
def find_nearest_genes(peak_files, out_subdir, refseq_exon_bed):
#get unix utilities
bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']
#process the peak files to find nearest genes
nearest_genes = []
for path in sorted(peak_files):
out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))
cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |
cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance
awk['BEGIN{OFS="\t"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |
sort['-k5,5', '-k6,6nr'] |
cut['-f5,6'])()
with open(out_path, 'w') as out:
prev_gene = None
for idx, line in enumerate(str(cmd).strip().split('\n')):
if prev_gene is None or not line.startswith(prev_gene):
# print(line)
line_split = line.strip().split()
prev_gene = line_split[0]
out.write(line + '\n')
nearest_genes.append(out_path)
return nearest_genes
def load_expr_db(db_path):
if os.path.basename(db_path) == 'RepAvgGeneTPM.csv':
with open(db_path) as lines_in:
db_headers = lines_in.readline().strip().split(',')[1:]
db_vals = numpy.loadtxt(db_path, delimiter=',', skiprows=1, dtype=object)[:,1:]
elif os.path.basename(db_path) == 'gexplore_l2_tissue_expr.jonathan_updated.txt.gz':
db_headers, db_vals = load_expr_db2(db_path)
else:
with open(db_path) as lines_in:
db_headers = lines_in.readline().strip().split('\t')
db_vals = numpy.loadtxt(db_path, delimiter='\t', skiprows=1, dtype=object)
print('Loaded DB shape: {!s}'.format(db_vals.shape))
return (db_headers, db_vals)
def load_expr_db2(db_path):
db_vals =
|
numpy.loadtxt(db_path, delimiter='\t', skiprows=1, dtype=object)
|
numpy.loadtxt
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import copy
import os
import numpy as np
from collections import Counter, OrderedDict
from tfbldr.datasets import notes_to_midi
from tfbldr.datasets import midi_to_notes
from functools import reduce
basedir = "/u/kastner/music_npz_jos"
"""
cnt = Counter()
for fnpz in sorted(os.listdir(basedir)):
print(fnpz)
d = np.load(basedir + os.sep + fnpz)
if len(d['centered']) < 1:
print(fnpz + " had zero length")
continue
for mi in range(len(d['centered'])):
measure = d['centered'][mi]
cnt.update(measure.ravel())
"""
all_filenames = []
all_measurenums = []
all_piano_rolls = []
all_pitch_duration = []
all_functional_notes = []
all_functional_notes_idx = []
all_functional_notes_kv = []
all_functional_voicings_idx = []
all_functional_voicings_kv = []
all_keyframes = []
all_chords_names = []
all_indexed = []
all_absolutes = []
all_keys = []
all_modes = []
all_scalenotes = []
for fnpz in sorted(os.listdir(basedir)):
print(fnpz)
try:
d = np.load(basedir + os.sep + fnpz)
except:
print("Unable to load {}, continuing".format(fnpz))
if len(d["centered"]) < 1 or 'keyname' not in d:
print(fnpz + " had zero length or no key")
continue
prs = copy.deepcopy(d["piano_rolls"])
pds = copy.deepcopy(d["pitch_duration"])
loaded_chords_names = copy.deepcopy(d["chords_names"])
key = d["keyname"]
mode = d["keymode"]
# last note is octave of the root, skip it
notes = d["keynotes"][:-1]
assert sorted(list(set(d["keynotes"]))) == sorted(list(notes))
scale_lu = {}
scale_lu["R"] = 0
ordered_scale = ["R"]
counter = 1
for octave in ["1", "2", "3", "4", "5"]:
for note in notes:
ordered_scale.append(note + octave)
scale_lu[note + octave] = counter
counter += 1
norm_lu = {v: k for k, v in scale_lu.items()}
notes_lu = {os: notes_to_midi([[os]])[0][0] for os in ordered_scale}
notes_lu["R"] = notes_to_midi([["R"]])[0][0]
midi_lu = {v: k for k, v in notes_lu.items()}
filename = fnpz
keyframe_lu = {v: k for k, v in enumerate(np.arange(-13, 14 + 1))}
diff_lu = {v: k for k, v in keyframe_lu.items()}
piano_rolls = []
pitch_duration = []
measurenums = []
keyframes = []
indexed = []
absolutes = []
keys = []
modes = []
chords_names = []
scalenotes = []
functional_notes = []
functional_notes_idx = []
functional_voicings_idx = []
func_notes_lu = {}
func_notes_lu["R"] = 0
# R is always in the lowest voicing -> R0
counter = 1
notes = [n for n in notes]
for n1 in notes + ["R"]:
for n2 in notes + ["R"]:
for n3 in notes + ["R"]:
for n4 in notes + ["R"]:
# hack to represent it in the form we get from midi_to_notes
# basically changing E-3 -> Eb3 , etc
if n1 != "R":
nnn1 = midi_to_notes(notes_to_midi([[n1 + octave]]))[0][0][:-1]
else:
nnn1 = n1
if n2 != "R":
nnn2 = midi_to_notes(notes_to_midi([[n2 + octave]]))[0][0][:-1]
else:
nnn2 = n2
if n3 != "R":
nnn3 = midi_to_notes(notes_to_midi([[n3 + octave]]))[0][0][:-1]
else:
nnn3 = n3
if n4 != "R":
nnn4 = midi_to_notes(notes_to_midi([[n4 + octave]]))[0][0][:-1]
else:
nnn4 = n4
func_notes_lu[tuple([nnn1, nnn2, nnn3, nnn4])] = counter
counter += 1
func_voicings_lu = {}
# hardcode for 4 voices for now
count = 0
for o1 in [0, 1, 2, 3, 4, 5]:
for o2 in [0, 1, 2, 3, 4, 5]:
for o3 in [0, 1, 2, 3, 4, 5]:
for o4 in [0, 1, 2, 3, 4, 5]:
oo = [o1, o2, o3, o4]
nz = [ooi for ooi in oo if ooi != 0]
# can only be an ordering with at least 2
if len(nz) == 0 or len(nz) == 1:
func_voicings_lu[tuple(oo)] = count
count += 1
else:
rr = range(len(nz))
ordered = True
maxv = 5
for i in rr:
if nz[i] <= maxv:
maxv = nz[i]
else:
ordered = False
# allow voice crossing in the middle 2 voices?
if ordered:
func_voicings_lu[tuple(oo)] = count
count += 1
inv_func_voicings = {v: k for k, v in func_voicings_lu.items()}
last_non_rest = [0, 0, 0, 0]
for n in range(len(prs)):
# key and mode delta normalized repr
# 0 rest, 1:28 is [-13, 14]
pr_i = prs[n]
pd_i = pds[n]
chords_names_i = loaded_chords_names[n]
if len(set([lcn for lcn in loaded_chords_names[n]])) != 1:
print("got multiple chords")
from IPython import embed; embed(); raise ValueError()
if pr_i.shape[-1] != 4:
#print("3 voices, skip for now")
continue
if pr_i.shape[0] != 48:
new_pr_i = np.zeros((48, pr_i.shape[-1]))
if pr_i.shape[0] == 32:
# 32 into 48 is 4 into 6
ii = 0
oi = 0
while True:
nt = pr_i[ii:ii + 4]
for v in range(pr_i.shape[-1]):
if len(np.unique(nt[:, v])) != 1:
if len(np.unique(nt[:, v])) == 2:
mn = np.min(nt[:, v])
mx = np.max(nt[:, v])
if np.sum(nt[:, v] == mn) == 2:
nt[:, v] = mn
else:
nt[:, v] = mx
else:
print("note changed :|")
from IPython import embed; embed(); raise ValueError()
new_pr_i[oi:oi + 6] = nt[0][None] # ii:ii + 3 all forced the same above
oi = oi + 6
ii = ii + 4
if ii >= 32:
break
pr_i = new_pr_i
else:
#print("not length 48, needs normalization")
continue
loop_reset = False
for unote in
|
np.unique(pr_i)
|
numpy.unique
|
# -*- coding: utf-8 -*-
# Built-in
# Common
import numpy as np
import scipy.sparse as scpsp
# specific
from . import _generic_check
_LOPERATORS_INT = [
'D1',
'D2',
'D3',
'D0N1',
'D0N2',
'D1N2',
'D2N2',
'D3N2',
]
# #############################################################################
# #############################################################################
# Mesh2DRect - bsplines - operators
# #############################################################################
def _get_mesh2dRect_operators_check(
deg=None,
operator=None,
geometry=None,
sparse_fmt=None,
):
# deg
deg = _generic_check._check_var(
deg, 'deg',
types=int,
allowed=[0, 1, 2, 3],
)
# operator
operator = _generic_check._check_var(
operator, 'operator',
default='D0N1',
types=str,
allowed=_LOPERATORS_INT,
)
# geometry
geometry = _generic_check._check_var(
geometry, 'geometry',
default='toroidal',
types=str,
allowed=['toroidal', 'linear'],
)
# sparse_fmt
sparse_fmt = _generic_check._check_var(
sparse_fmt, 'sparse_fmt',
default='csc',
types=str,
allowed=['dia', 'csr', 'csc', 'lil'],
)
# dim
if operator == 'D1':
dim = 'origin / m'
elif operator == 'D2':
dim = 'origin / m2'
elif operator == 'D3':
dim = 'origin / m3'
elif operator == 'D0N1':
if geometry == 'linear':
dim = 'origin x m2'
else:
dim = 'origin x m3/rad'
elif operator == 'D0N2':
if geometry == 'linear':
dim = 'origin2 x m2'
else:
dim = 'origin2 x m3/rad'
elif operator == 'D1N2':
if geometry == 'linear':
dim = 'origin2'
else:
dim = 'origin2 x m/rad'
elif operator == 'D2N2':
if geometry == 'linear':
dim = 'origin2 / m2'
else:
dim = 'origin2 / (m2.rad)'
return operator, geometry, sparse_fmt, dim
def get_mesh2dRect_operators(
operator=None,
geometry=None,
deg=None,
knotsx_mult=None,
knotsy_mult=None,
knotsx_per_bs=None,
knotsy_per_bs=None,
overlap=None,
sparse_fmt=None,
cropbs_flat=None,
# specific to deg = 0
cropbs=None,
centered=None,
):
# ------------
# check inputs
operator, geometry, sparse_fmt, dim = _get_mesh2dRect_operators_check(
deg=deg,
operator=operator,
geometry=geometry,
sparse_fmt=sparse_fmt,
)
# ------------
# prepare
nx, ny = knotsx_per_bs.shape[1], knotsy_per_bs.shape[1]
kR = np.tile(knotsx_per_bs, ny)
kZ = np.repeat(knotsy_per_bs, nx, axis=1)
nbs = nx*ny
if cropbs_flat is None:
cropbs_flat = False
if cropbs_flat is not False:
c0 = (
isinstance(cropbs_flat, np.ndarray)
and cropbs_flat.shape == (nbs,)
and cropbs_flat.dtype == np.bool_
)
if not c0:
msg = (
f"Arg cropbs_flat must be a bool array of shape {(nbs,)}\n"
f"Provided: {cropbs_flat.shape}"
)
raise Exception(msg)
nbscrop = cropbs_flat.sum()
shape = (nbscrop, nbscrop)
indbs = -np.ones((nbs,), dtype=int)
indbs[cropbs_flat] = np.arange(0, nbscrop)
else:
shape = (nbs, nbs)
indbs = np.arange(0, nbs)
if 'N2' in operator and deg >= 1:
# get intersection indices array
if cropbs_flat is False:
nbtot = np.sum(overlap >= 0)
else:
ind = cropbs_flat[None, :] & cropbs_flat[overlap]
nbtot = np.sum(ind)
# prepare data and indices arrays
if operator == 'D0N2':
data = np.full((nbtot,), np.nan)
row = np.zeros((nbtot,), dtype=int)
column = np.zeros((nbtot,), dtype=int)
elif operator == 'D1N2':
datadR = np.full((nbtot,), np.nan)
datadZ = np.full((nbtot,), np.nan)
row = np.zeros((nbtot,), dtype=int)
column = np.zeros((nbtot,), dtype=int)
elif operator == 'D2N2':
datad2R = np.full((nbtot,), np.nan)
datad2Z = np.full((nbtot,), np.nan)
datadRZ = np.full((nbtot,), np.nan)
row = np.zeros((nbtot,), dtype=int)
column = np.zeros((nbtot,), dtype=int)
# ------------
# D0 - integral
if operator == 'D0N1':
if deg == 0 and geometry == 'linear':
opmat = (kR[1, :] - kR[0, :]) * (kZ[1, :] - kZ[0, :])
elif deg == 0 and geometry == 'toroidal':
opmat = 0.5 * (kR[1, :]**2 - kR[0, :]**2) * (kZ[1, :] - kZ[0, :])
elif deg == 1 and geometry == 'linear':
opmat = 0.25 * (kR[2, :] - kR[0, :]) * (kZ[2, :] - kZ[0, :])
elif deg == 1 and geometry == 'toroidal':
opmat = (
0.5
* (kR[2, :]**2 - kR[0, :]**2 + kR[1, :]*(kR[2, :]-kR[0, :]))
* (kZ[2, :] - kZ[0, :])
) / 6.
elif deg == 2:
iZ1 = (kZ[1, :] - kZ[0, :])**2 / (3.*(kZ[2, :] - kZ[0, :]))
iZ21 = (
(
kZ[2, :]**2
- 2*kZ[1, :]**2
+ kZ[1, :]*kZ[2, :]
+ 3.*kZ[0, :]*(kZ[1, :] - kZ[2, :])
)
/ (6.*(kZ[2, :]-kZ[0, :]))
)
iZ22 = (
(
-2.*kZ[2, :]**2
+ kZ[1, :]**2
+ kZ[1, :]*kZ[2, :]
+ 3.*kZ[3, :]*(kZ[2, :] - kZ[1, :])
)
/ (6.*(kZ[3, :] - kZ[1, :]))
)
iZ3 = (kZ[3, :] - kZ[2, :])**2 / (3.*(kZ[3, :] - kZ[1, :]))
if geometry == 'linear':
iR1 = (kR[1, :] - kR[0, :])**2 / (3.*(kR[2, :] - kR[0, :]))
iR21 = (
(
kR[2, :]**2
- 2. * kR[1, :]**2
+ kR[1, :] * kR[2, :]
+ 3. * kR[0, :] * (kR[1, :] - kR[2, :])
)
/ (6. * (kR[2, :] - kR[0, :]))
)
iR22 = (
(
-2. * kR[2, :]**2
+ kR[1, :]**2
+ kR[1, :] * kR[2, :]
+ 3. * kR[3, :] * (kR[2, :] - kR[1, :])
)
/ (6.*(kR[3, :] - kR[1, :]))
)
iR3 = (kR[3, :] - kR[2, :])**2 / (3.*(kR[3, :] - kR[1, :]))
else:
iR1 = (
(
3.*kR[1, :]**3
+ kR[0, :]**3
- 5.*kR[0, :] * kR[1, :]**2
+ kR[0, :]**2 * kR[1, :]
)
/ (12. * (kR[2, :] - kR[0, :]))
)
iR21 = (
(
kR[2, :]**3
- 3.*kR[1, :]**3
+ kR[1, :]**2 * kR[2, :]
+ kR[1, :] * kR[2, :]**2
- 2.*kR[0, :] * kR[2, :]**2
- 2.*kR[0, :] * kR[1, :] * kR[2, :]
+ 4.*kR[0, :] * kR[1, :]**2
)
/ (12. * (kR[2, :] - kR[0, :]))
)
iR22 = (
(
-3.*kR[2, :]**3
+ kR[1, :]**3
+ kR[1, :] * kR[2, :]**2
+ kR[1, :]**2 * kR[2, :]
+ 4.*kR[2, :]**2 * kR[3, :]
- 2.*kR[1, :]*kR[2, :]*kR[3, :]
- 2.*kR[1, :]**2 * kR[3, :]
)
/ (12. * (kR[3, :] - kR[1, :]))
)
iR3 = (
(
kR[3, :]**3
+ 3.*kR[2, :]**3
- 5.*kR[2, :]**2 * kR[3, :]
+ kR[2, :]*kR[3, :]**2
) / (12. * (kR[3, :] - kR[1, :]))
)
opmat = (iR1 + iR21 + iR22 + iR3) * (iZ1 + iZ21 + iZ22 + iZ3)
elif deg == 3:
msg = "Integral D0N1 not implemented for deg=3 yet!"
raise NotImplementedError(msg)
# crop
if cropbs_flat is not False:
opmat = opmat[cropbs_flat]
# ------------
# D1 - gradient
elif operator == 'D1':
# Treat separately discrete case
if deg == 0:
gradR, gradZ = _D1_Deg0(
knotsx_mult=knotsx_mult,
knotsy_mult=knotsy_mult,
cropbs=cropbs,
cropbs_flat=cropbs_flat,
nx=nx,
ny=ny,
nbs=nbs,
centered=centered,
)
opmat = (
scpsp.csc_matrix(gradR),
scpsp.csc_matrix(gradZ),
)
elif deg >= 1:
raise NotImplementedError()
# ------------
# D0N2
elif operator == 'D0N2' and deg == 0:
iZ = kZ[1, :] - kZ[0, :]
if geometry == 'linear':
iR = kR[1, :] - kR[0, :]
else:
iR = 0.5 * (kR[1, :]**2 - kR[0, :]**2)
if cropbs_flat is not False:
iR = iR[cropbs_flat]
iZ = iZ[cropbs_flat]
opmat = scpsp.diags(
[iR*iZ],
[0],
shape=None,
format=sparse_fmt,
dtype=float,
)
elif operator == 'D0N2':
# pre-compute integrals
if deg == 1:
iR = _D0N2_Deg1(knotsx_mult, geometry=geometry)
iZ = _D0N2_Deg1(knotsy_mult, geometry='linear')
elif deg == 2:
iR = _D0N2_Deg2(knotsx_mult, geometry=geometry)
iZ = _D0N2_Deg2(knotsy_mult, geometry='linear')
elif deg == 3:
msg = "Integral D0N2 not implemented for deg=3!"
raise NotImplementedError(msg)
# set non-diagonal elements
i0 = 0
for ir in range(nx):
for iz in range(ny):
iflat = ir + iz*nx
if cropbs_flat is not False and not cropbs_flat[iflat]:
continue
# general case
overlapi = overlap[:, iflat][overlap[:, iflat] > iflat]
# diagonal element
data[i0] = iR[0, ir] * iZ[0, iz]
row[i0] = indbs[iflat]
column[i0] = indbs[iflat]
i0 += 1
# non-diagonal elements (symmetric)
for jflat in overlapi:
if cropbs_flat is not False and not cropbs_flat[jflat]:
continue
jr = jflat % nx
jz = jflat // nx
# store (i, j) and (j, i) (symmetric matrix)
if jr >= ir:
iiR = iR[jr - ir, ir]
else:
iiR = iR[abs(jr - ir), jr]
if jz >= iz:
iiZ = iZ[jz - iz, iz]
else:
iiZ = iZ[abs(jz - iz), jz]
data[i0:i0+2] = iiR * iiZ
row[i0:i0+2] = (indbs[iflat], indbs[jflat])
column[i0:i0+2] = (indbs[jflat], indbs[iflat])
i0 += 2
assert i0 == nbtot
opmat = scpsp.csc_matrix((data, (row, column)), shape=shape)
# ------------
# D1N2
elif operator == 'D1N2':
# Treat separately discrete case
if deg == 0:
gradR, gradZ = _D1_Deg0(
knotsx_mult=knotsx_mult,
knotsy_mult=knotsy_mult,
cropbs=cropbs,
cropbs_flat=cropbs_flat,
nx=nx,
ny=ny,
nbs=nbs,
)
# surface elements
dZ = np.repeat(knotsy_mult[1:] - knotsy_mult[:-1], nx)
if geometry == 'linear':
dR = np.tile(
knotsx_mult[1:] - knotsx_mult[:-1],
ny,
)
else:
dR = np.tile(
0.5*(knotsx_mult[1:]**2 - knotsx_mult[:-1]**2),
ny,
)
if cropbs_flat is not False:
dR = dR[cropbs_flat]
dZ = dZ[cropbs_flat]
opmat = (
scpsp.csc_matrix(gradR.T.dot(gradR*(dR*dZ)[:, None])),
scpsp.csc_matrix(gradZ.T.dot(gradZ*(dR*dZ)[:, None])),
)
else:
# pre-compute integrals for exact operator deg >= 1
if deg == 1:
idR = _D1N2_Deg1(knotsx_mult, geometry=geometry)
idZ = _D1N2_Deg1(knotsy_mult, geometry='linear')
iR = _D0N2_Deg1(knotsx_mult, geometry=geometry)
iZ = _D0N2_Deg1(knotsy_mult, geometry='linear')
elif deg == 2:
idR = _D1N2_Deg2(knotsx_mult, geometry=geometry)
idZ = _D1N2_Deg2(knotsy_mult, geometry='linear')
iR = _D0N2_Deg2(knotsx_mult, geometry=geometry)
iZ = _D0N2_Deg2(knotsy_mult, geometry='linear')
elif deg == 3:
msg = "Integral D1N2 not implemented for deg=3!"
raise NotImplementedError(msg)
# set non-diagonal elements
i0 = 0
for ir in range(nx):
for iz in range(ny):
iflat = ir + iz*nx
if cropbs_flat is not False and not cropbs_flat[iflat]:
continue
# general case
overlapi = overlap[:, iflat][overlap[:, iflat] > iflat]
# diagonal element
datadR[i0] = idR[0, ir] * iZ[0, iz]
datadZ[i0] = iR[0, ir] * idZ[0, iz]
row[i0] = indbs[iflat]
column[i0] = indbs[iflat]
i0 += 1
# non-diagonal elements (symmetric)
for jflat in overlapi:
if cropbs_flat is not False and not cropbs_flat[jflat]:
continue
jr = jflat % nx
jz = jflat // nx
# store (i, j) and (j, i) (symmetric matrix)
if jr >= ir:
iidR = idR[jr - ir, ir]
iiR = iR[jr - ir, ir]
else:
iidR = idR[abs(jr - ir), jr]
iiR = iR[abs(jr - ir), jr]
if jz >= iz:
iidZ = idZ[jz - iz, iz]
iiZ = iZ[jz - iz, iz]
else:
iidZ = idZ[abs(jz - iz), jz]
iiZ = iZ[abs(jz - iz), jz]
datadR[i0:i0+2] = iidR * iiZ
datadZ[i0:i0+2] = iiR * iidZ
row[i0:i0+2] = (indbs[iflat], indbs[jflat])
column[i0:i0+2] = (indbs[jflat], indbs[iflat])
i0 += 2
assert i0 == nbtot
opmat = (
scpsp.csc_matrix((datadR, (row, column)), shape=shape),
scpsp.csc_matrix((datadZ, (row, column)), shape=shape),
)
# ------------
# D2N2
elif operator == 'D2N2':
# pre-compute integrals
if deg == 2:
id2R = _D2N2_Deg2(knotsx_mult, geometry=geometry)
id2Z = _D2N2_Deg2(knotsy_mult, geometry='linear')
idR = _D1N2_Deg2(knotsx_mult, geometry=geometry)
idZ = _D1N2_Deg2(knotsy_mult, geometry='linear')
iR = _D0N2_Deg2(knotsx_mult, geometry=geometry)
iZ = _D0N2_Deg2(knotsy_mult, geometry='linear')
elif deg == 3:
msg = "Integral D2N2 not implemented for deg=3!"
raise NotImplementedError(msg)
# set non-diagonal elements
i0 = 0
for ir in range(nx):
for iz in range(ny):
iflat = ir + iz*nx
if cropbs_flat is not False and not cropbs_flat[iflat]:
continue
# general case
overlapi = overlap[:, iflat][overlap[:, iflat] > iflat]
# diagonal element
datad2R[i0] = id2R[0, ir] * iZ[0, iz]
datad2Z[i0] = iR[0, ir] * id2Z[0, iz]
datadRZ[i0] = idR[0, ir] * idZ[0, iz]
row[i0] = indbs[iflat]
column[i0] = indbs[iflat]
i0 += 1
# non-diagonal elements (symmetric)
for jflat in overlapi:
if cropbs_flat is not False and not cropbs_flat[jflat]:
continue
jr = jflat % nx
jz = jflat // nx
# store (i, j) and (j, i) (symmetric matrix)
if jr >= ir:
iid2R = id2R[jr - ir, ir]
iidR = idR[jr - ir, ir]
iiR = iR[jr - ir, ir]
else:
iid2R = id2R[abs(jr - ir), jr]
iidR = idR[abs(jr - ir), jr]
iiR = iR[abs(jr - ir), jr]
if jz >= iz:
iid2Z = id2Z[jz - iz, iz]
iidZ = idZ[jz - iz, iz]
iiZ = iZ[jz - iz, iz]
else:
iid2Z = id2Z[abs(jz - iz), jz]
iidZ = idZ[abs(jz - iz), jz]
iiZ = iZ[abs(jz - iz), jz]
datad2R[i0:i0+2] = iid2R * iiZ
datad2Z[i0:i0+2] = iiR * iid2Z
datadRZ[i0:i0+2] = iidR * iidZ
row[i0:i0+2] = (indbs[iflat], indbs[jflat])
column[i0:i0+2] = (indbs[jflat], indbs[iflat])
i0 += 2
assert i0 == nbtot
opmat = (
scpsp.csc_matrix((datad2R, (row, column)), shape=shape),
scpsp.csc_matrix((datad2Z, (row, column)), shape=shape),
scpsp.csc_matrix((datadRZ, (row, column)), shape=shape),
)
# ------------
# D3N2
elif operator == 'D3N2' and deg == 3:
raise NotImplementedError("Integral D3N2 not implemented for deg=3!")
return opmat, operator, geometry, dim
# #############################################################################
# #############################################################################
# Operator sub-routines: D0N2
# #############################################################################
def _D0N2_Deg1_full_linear(k0, k2):
""" from 1d knots, return int_0^2 x b**2(x) dx """
return (k2 - k0) / 3.
def _D0N2_Deg1_full_toroidal(k0, k1, k2):
""" from 1d knots, return int_0^2 x b**2(x) dx """
intt = np.zeros((k0.size,))
intt[1:] += (
(3. * k1**3 - 5.*k0*k1**2 + k1*k0**2 + k0**3)[1:]
/ (12. * (k1 - k0))[1:]
)
intt[:-1] = (
+ (3.*k1**3 - 5.*k2*k1**2 + k1*k2**2 + k2**3)[:-1]
/ (12. * (k2 - k1))[:-1]
)
return intt
def _D0N2_Deg1_2_linear(k1, k2):
""" from 1d knots, return int_0^2 x b**2(x) dx """
return (k2 - k1) / 6.
def _D0N2_Deg1_2_toroidal(k1, k2):
""" from 1d knots, return int_0^2 x b**2(x) dx """
return (k2**2 - k1**2) / 12.
def _D0N2_Deg1(knots, geometry=None):
if geometry == 'linear':
integ = np.array([
_D0N2_Deg1_full_linear(
knots[:-2],
knots[2:],
),
_D0N2_Deg1_2_linear(
knots[1:-1],
knots[2:]
),
])
else:
integ = np.array([
_D0N2_Deg1_full_toroidal(
knots[:-2],
knots[1:-1],
knots[2:],
),
_D0N2_Deg1_2_toroidal(
knots[1:-1],
knots[2:]
),
])
return integ
def _D0N2_Deg2(knots, geometry=None):
if geometry == 'linear':
ffull = _D0N2_Deg2_full_linear
f3 = _D0N2_Deg2_3_linear
f2 = _D0N2_Deg2_2_linear
else:
ffull = _D0N2_Deg2_full_toroidal
f3 = _D0N2_Deg2_3_toroidal
f2 = _D0N2_Deg2_2_toroidal
integ = np.array([
ffull(
knots[:-3],
knots[1:-2],
knots[2:-1],
knots[3:],
),
f3(
knots[:-3],
knots[1:-2],
knots[2:-1],
knots[3:],
np.r_[knots[4:], np.nan],
),
f2(
knots[1:-2],
knots[2:-1],
knots[3:],
np.r_[knots[4:], np.nan],
),
])
return integ
def _D0N2_Deg2_full_linear(k0, k1, k2, k3):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k0.size,))
intt[1:] += (
(k1 - k0)[1:]**3 / (5.*(k2 - k0)[1:]**2)
+ (k2 - k1)[1:]
* (
10.*k0**2 + 6.*k1**2 + 3.*k1*k2 + k2**2 - 5.*k0*(3.*k1 + k2)
)[1:] / (30.*(k2 - k0)[1:]**2)
)
intt[1:-1] += (
(k2 - k1)[1:-1]
* (
-3.*k1**2 - 4.*k1*k2 - 3.*k2**2 + 5.*k0*(k1 + k2 - 2.*k3)
+ 5.*k3*(k1 + k2)
)[1:-1] / (60.*(k2 - k0)*(k3 - k1))[1:-1]
)
intt[:-1] += (
(k2 - k1)[:-1]
* (
10.*k3**2 + 6.*k2**2 + 3.*k1*k2 + k1**2 - 5.*k3*(3.*k2 + k1)
)[:-1] / (30.*(k3 - k1)[:-1]**2)
+ (k3 - k2)[:-1]**3 / (5.*(k3 - k1)[:-1]**2)
)
return intt
def _D0N2_Deg2_full_toroidal(k0, k1, k2, k3):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k0.size,))
intt[1:] += (
(5.*k1 + k0)[1:]*(k1 - k0)[1:]**3 / (30.*(k2 - k0)[1:]**2)
+ (k2 - k1)[1:]
* (
10*k1**3 + 6.*k1**2*k2 + 3.*k1*k2**2
+ k2**3 + 5.*k0**2*(3.*k1 + k2)
- 4.*k0*(6.*k1**2 + 3.*k1*k2 + k2**2)
)[1:] / (60.*(k2 - k0)**2)[1:]
)
intt[:-1] += (
(5.*k2 + k3)[:-1]*(k3 - k2)[:-1]**3 / (30.*(k3 - k1)[:-1]**2)
+ (k2 - k1)[1:]
* (
10*k2**3 + 6.*k2**2*k1 + 3.*k2*k1**2
+ k1**3 + 5.*k3**2*(3.*k2 + k1)
- 4.*k3*(6.*k2**2 + 3.*k2*k1 + k1**2)
)[:-1] / (60.*(k3 - k1)**2)[:-1]
)
intt[1:-1] += (
(k2 - k1)[1:-1]
* (
- 2.*k1**3 - 2.*k2**3
- 3.*k1*k2*(k1 + k2)
- 5.*k0*k3*(k1 + k2)
+ (k0 + k3)*(3.*k2**2 + 4.*k1*k2 + 3.*k1**2)
)[1:-1] / (30.*(k3 - k1)*(k2 - k0))[1:-1]
)
return intt
def _D0N2_Deg2_3_linear(k0, k1, k2, k3, k4):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k0.size,))
intt[1:-1] += (
(3.*k2 + 2.*k1 - 5.*k0)[1:-1]*(k2 - k1)[1:-1]**2
/ (60.*(k3 - k1)*(k2 - k0))[1:-1]
)
intt[:-2] += (
+ (5.*k4 - 2.*k3 - 3.*k2)[:-2]*(k3 - k2)[:-2]**2
/ (60.*(k4 - k2)*(k3 - k1))[:-2]
)
intt[:-1] += (
+ (5.*k3 - 4.*k2 - k1)[:-1]*(k2 - k1)[:-1]**2
/ (20.*(k3 - k1)**2)[:-1]
+ (4.*k2 + k3 - 5.*k1)[:-1]*(k3 - k2)[:-1]**2
/ (20.*(k3 - k1)**2)[:-1]
)
return intt
def _D0N2_Deg2_3_toroidal(k0, k1, k2, k3, k4):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k0.size,))
intt[:-1] = (
(k2 - k1)[:-1]**2
* (-10*k2**2 - 4*k1*k2 - k1**2 + 3*k3*(4*k2 + k1))[:-1]
/ (60.*(k3 - k1)**2)[:-1]
+ (k3 - k2)[:-1]**2
* (k3**2 + 4*k3*k2 + 10*k2**2 - 3*k1*(k3 + 4*k2))[:-1]
/ (60*(k3 - k1)**2)[:-1]
)
intt[1:-1] = (
(k2 - k1)[1:-1]**2
* (2*k2**2 + 2*k1*k2 + k1**2 - k0*(3.*k2 + 2.*k1))[1:-1]
/ (60.*(k3 - k1)*(k2 - k0))[1:-1]
)
intt[:-2] = (
+ (k3 - k2)[:-2]**2
* (-k3**2 - 2*k3*k2 - 2*k2**2 + k4*(2*k3 + 3*k2))[:-2]
/ (60*(k4 - k2)*(k3 - k1))[:-2]
)
return intt
def _D0N2_Deg2_2_linear(k1, k2, k3, k4):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k1.size,))
intt[:-2] = (
(k3 - k2)[:-2]**3
/ (30.*(k4 - k2)*(k3 - k1))[:-2]
)
return intt
def _D0N2_Deg2_2_toroidal(k1, k2, k3, k4):
""" from 1d knots, return int_0^3 b**2(x) dx """
intt = np.zeros((k1.size,))
intt[:-2] = (
(k3 + k2)[:-2]*(k3 - k2)[:-2]**3
/ (60.*(k4 - k2)[:-2]*(k3 - k1)[:-2])
)
return intt
# #############################################################################
# #############################################################################
# Operator sub-routines: D1 - deg = 0 - discrete
# #############################################################################
def _D1_Deg0(
knotsx_mult=None,
knotsy_mult=None,
cropbs=None,
cropbs_flat=None,
nx=None,
ny=None,
nbs=None,
centered=None,
):
""" Discrete apprmixation of the gradient for pixels
Centered when possible
Non-centered otherwise
"""
# check input
centered = _generic_check._check_var(
centered, 'centered',
types=bool,
default=False,
)
# initialize output
datadR = np.zeros((nbs, nbs), dtype=float)
datadZ = np.zeros((nbs, nbs), dtype=float)
if cropbs is False:
cropbs = np.ones((nx, ny), dtype=bool)
# positions of centers
centsR = 0.5*(knotsx_mult[1:] + knotsx_mult[:-1])
centsZ = 0.5*(knotsy_mult[1:] + knotsy_mult[:-1])
if centered is True:
# Determine points that have 2 neighbours in R (centered)
n2R = np.zeros(cropbs.shape, dtype=bool)
n2R[1:-1, :] = cropbs[1:-1, :] & cropbs[2:, :] & cropbs[:-2, :]
# points with a neighbours at higher R
npR = cropbs & (~n2R)
npR[-1, :] = False
npR[:-1, :] &= cropbs[1:, :]
# points with a neighbours at lower R
nmR = cropbs & (~n2R) & (~npR)
nmR[0, :] = False
nmR[1:, :] &= cropbs[:-1, :]
# Determine points that have 2 neighbours in Z (centered)
n2Z =
|
np.zeros(cropbs.shape, dtype=bool)
|
numpy.zeros
|
import os
import numpy as np
from astropy.convolution import Gaussian2DKernel, convolve
import bokeh.plotting as bkp
from bokeh.models import (HoverTool, Whisker, CDSView, BooleanFilter,
ColorBar, LinearColorMapper)
from bokeh.models.tickers import SingleIntervalTicker, DatetimeTicker, FixedTicker
from bokeh.layouts import column, gridplot
from bokeh.palettes import Set1, Turbo256
from bokeh.io import export_png, curdoc
from bokeh.themes import Theme
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash.dependencies as ddep
import plotly.io as pio
import plotly.graph_objects as go
WIDTH, HEIGHT = 1000, 500
FONTSIZE, NUMFONTSIZE = '16pt', '12pt'
BOKEH_THEME_FILE = os.path.join(os.path.dirname(__file__), 'data', 'bokeh.yml')
def color_generator(pallette=Set1[9]):
i = -1
while True:
i = (i + 1) % len(pallette)
yield pallette[i]
def plot_bokeh(self, filename=None, show=True, savepng=False):
curdoc().theme = Theme(BOKEH_THEME_FILE)
if filename is not None:
if not filename.endswith('.html'):
filename += '.html'
else:
pass
bkp.output_file(filename)
else:
pass
# Format the date correction
self.data['utcstr'] = self.data['utc'].apply(
lambda x:x.isoformat()[0:19])
# Put the dataframe in a useable form
self.data['lower'] = self.data.radiance-self.data.sigma
self.data['upper'] = self.data.radiance+self.data.sigma
self.data['lattandeg'] = self.data.lattan*180/np.pi
mask = self.data.alttan != self.data.alttan.max()
if np.any(mask):
m = self.data[self.data.alttan != self.data.alttan.max()].alttan.max()
else:
m = 1e10
col = np.interp(self.data.alttan,
|
np.linspace(0, m, 256)
|
numpy.linspace
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pybie2d
from ipde.embedded_boundary import EmbeddedBoundary
from ipde.ebdy_collection import EmbeddedBoundaryCollection, EmbeddedFunction
from ipde.heavisides import SlepianMollifier
from ipde.derivatives import fd_x_4, fd_y_4, fourier
from ipde.solvers.multi_boundary.stokes import StokesSolver
from qfs.two_d_qfs import QFS_Evaluator, QFS_Evaluator_Pressure
from personal_utilities.arc_length_reparametrization import arc_length_parameterize
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
Stokes_Layer_Singular_Form = pybie2d.kernels.high_level.stokes.Stokes_Layer_Singular_Form
Stokes_Layer_Form = pybie2d.kernels.high_level.stokes.Stokes_Layer_Form
Stokes_Layer_Apply = pybie2d.kernels.high_level.stokes.Stokes_Layer_Apply
Singular_DLP = lambda src, _: Stokes_Layer_Singular_Form(src, ifdipole=True) - 0.5*np.eye(2*src.N)
Naive_SLP = lambda src, trg: Stokes_Layer_Form(src, trg, ifforce=True)
Naive_DLP = lambda src, trg: Stokes_Layer_Form(src, trg, ifdipole=True)
from pyfmmlib2d import SFMM
def Layer_Apply(src, trg, f):
s = src.get_stacked_boundary()
t = trg.get_stacked_boundary()
out = SFMM(source=s, target=t, forces=f*src.weights, compute_target_velocity=True, compute_target_stress=True)
u = out['target']['u']
v = out['target']['v']
p = out['target']['p']
return u, v, p
ns = [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000 ]
# velocity errors with no pressure correction
errs_u = [1.09e-02, 4.77e-05, 6.65e-07, 7.98e-09, 9.75e-11, 1.27e-12, 2.13e-12, 2.58e-12, 2.59e-12, 1.87e-12 ]
# velocity/pressure errors with pressure correction
errs_u = [1.09e-02, 4.77e-05, 6.65e-07, 7.99e-09, 9.74e-11, 1.91e-12, 4.02e-11, 1.86e-11, 2.58e-12, 1.29e-10 ]
errs_p = [3.17e-01, 3.15e-03, 6.55e-05, 1.26e-06, 1.83e-08, 3.25e-10, 3.45e-09, 3.62e-09, 5.55e-10, 1.58e-08 ]
nb = 400
ng = int(nb/2)
M = 4*int(nb/100)
M = max(4, M)
M = min(30, M)
pad_zone = 0
verbose = True
plot = True
reparametrize = False
slepian_r = 1.5*M
solver_type = 'spectral' # fourth or spectral
grid_upsample = 2
fix_pressure = True
# get heaviside function
MOL = SlepianMollifier(slepian_r)
# construct boundary
bdy = GSB(c=star(nb, a=0.1, f=5))
if reparametrize:
bdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))
bh = bdy.dt*bdy.speed.min()
# construct a grid
grid = Grid([-np.pi/2, np.pi/2], ng, [-np.pi/2, np.pi/2], ng, x_endpoints=[True, False], y_endpoints=[True, False])
# construct embedded boundary
ebdy = EmbeddedBoundary(bdy, True, M, grid.xh*0.75, pad_zone=pad_zone, heaviside=MOL.step, qfs_tolerance=1e-14, qfs_fsuf=2)
ebdys = [ebdy,]
ebdyc = EmbeddedBoundaryCollection([ebdy,])
# register the grid
print('\nRegistering the grid')
ebdyc.register_grid(grid)
# give ebdyc a bumpy function
ebdyc.ready_bump(MOL.bump, (np.pi/2-ebdy.radial_width, np.pi/2-ebdy.radial_width), ebdyc[0].radial_width)
################################################################################
# Extract radial information from ebdy and construct annular solver
# Testing the radial Stokes solver
print(' Testing Radial Stokes Solver')
a = 3.0
b = 2.0
p_a = 2.0
p_b = 1.0
sin = np.sin
cos = np.cos
esin = lambda x: np.exp(sin(x))
psix = lambda x, y: esin(a*x)*cos(b*y)
psiy = lambda x, y: esin(a*x)*sin(b*y)
u_function = lambda x, y: psix(x, y)
v_function = lambda x, y: -a/b*cos(a*x)*psiy(x, y)
p_function = lambda x, y: cos(p_a*x) + esin(p_b*y)
fu_function = lambda x, y: (a**2*(sin(a*x)-cos(a*x)**2) + b**2)*psix(x, y) - p_a*sin(p_a*x)
fv_function = lambda x, y: -a*b*cos(a*x)*psiy(x, y)*(1 + (a/b)**2*sin(a*x)*(3+sin(a*x))) + p_b*cos(p_b*y)*esin(p_b*y)
fu = EmbeddedFunction(ebdyc)
fv = EmbeddedFunction(ebdyc)
ua = EmbeddedFunction(ebdyc)
va = EmbeddedFunction(ebdyc)
pa = EmbeddedFunction(ebdyc)
fu.define_via_function(fu_function)
fv.define_via_function(fv_function)
ua.define_via_function(u_function)
va.define_via_function(v_function)
pa.define_via_function(p_function)
bcu = u_function(ebdyc[0].bdy.x, ebdyc[0].bdy.y)
bcv = v_function(ebdyc[0].bdy.x, ebdyc[0].bdy.y)
# setup the solver
solver = StokesSolver(ebdyc, solver_type=solver_type)
u, v, p = solver(fu, fv, tol=1e-14, verbose=verbose, maxiter=200, restart=50)
# this isn't correct yet because we haven't applied boundary conditions
def Stokes_Pressure_Fix(src, trg):
Nxx = trg.normal_x[:,None]*src.normal_x
Nxy = trg.normal_x[:,None]*src.normal_y
Nyx = trg.normal_y[:,None]*src.normal_x
Nyy = trg.normal_y[:,None]*src.normal_y
NN = np.array(np.bmat([[Nxx, Nxy], [Nyx, Nyy]]))
return NN
# SLP with pressure evaluation at 0th target point
def PSLP(src, trg):
out = np.zeros([2*trg.N+1, 2*src.N])
out[:-1,:] = Naive_SLP(src, trg)
dx = trg.x[0] - src.x
dy = trg.y[0] - src.y
r2 = dx*dx + dy*dy
sir2 = 0.5/r2/np.pi
out[-1, 0*src.N:1*src.N] = dx*sir2*src.weights
out[-1, 1*src.N:2*src.N] = dy*sir2*src.weights
return out
# DLP with pressure evaluation at 0th target point
def PDLP(src, trg):
out = np.zeros([2*trg.N+1, 2*src.N])
out[:-1,:] = Naive_DLP(src, trg)
dx = trg.x[0] - src.x
dy = trg.y[0] - src.y
r2 = dx*dx + dy*dy
rdotn = dx*src.normal_x + dy*src.normal_y
ir2 = 1.0/r2
rdotnir4 = rdotn*ir2*ir2
out[-1, 0*src.N:1*src.N] = (-src.normal_x*ir2 + 2*rdotnir4*dx)*src.weights
out[-1, 1*src.N:2*src.N] = (-src.normal_y*ir2 + 2*rdotnir4*dy)*src.weights
out[-1] /= np.pi
return out
# SLP with pressure null-space correction; fixing scale to eval at 0th target point
def Pressure_SLP(src, trg):
out = np.zeros([2*trg.N+1, 2*src.N+1])
out[:-1,:-1] = Naive_SLP(src, trg)
dx = trg.x[0] - src.x
dy = trg.y[0] - src.y
r2 = dx*dx + dy*dy
sir2 = 0.5/r2/np.pi
out[-1, 0*src.N:1*src.N] = dx*sir2*src.weights
out[-1, 1*src.N:2*src.N] = dy*sir2*src.weights
out[0*trg.N:1*trg.N, -1] = trg.normal_x*trg.weights
out[1*trg.N:2*trg.N, -1] = trg.normal_y*trg.weights
return out
def Fixed_SLP(src, trg):
return Naive_SLP(src, trg) + Stokes_Pressure_Fix(src, trg)
A = Stokes_Layer_Singular_Form(bdy, ifdipole=True) - 0.5*np.eye(2*bdy.N) + Stokes_Pressure_Fix(bdy, bdy)
bu = solver.get_boundary_values(u)
bv = solver.get_boundary_values(v)
buc = np.concatenate([bu, bv])
bc = np.concatenate([bcu, bcv])
tau = np.linalg.solve(A, bc-buc)
if fix_pressure:
qfs = QFS_Evaluator_Pressure(ebdy.bdy_qfs, True, [PDLP,], Pressure_SLP, form_b2c=False)
sigma = qfs([tau,])
nsigma2 = int(sigma.size/2)
out = Layer_Apply(ebdy.bdy_qfs.interior_source_bdy, ebdyc.grid_and_radial_pts, sigma.reshape(2, nsigma2))
else:
qfs = QFS_Evaluator(ebdy.bdy_qfs, True, [Singular_DLP,], Fixed_SLP, on_surface=True, form_b2c=False, vector=True)
sigma = qfs([tau,])
nsigma2 = int(sigma.size/2)
out = Layer_Apply(ebdy.bdy_qfs.interior_source_bdy, ebdyc.grid_and_radial_pts, sigma.reshape(2, nsigma2))
u += out[0]
v += out[1]
p += out[2]
# normalize p/pa
pd = ebdyc[0].interpolate_radial_to_boundary(p[0])
pad = ebdyc[0].interpolate_radial_to_boundary(pa[0])
p -= np.sum(pd*bdy.weights)/np.sum(bdy.weights)
pa -= np.sum(pad*bdy.weights)/np.sum(bdy.weights)
# compute the error
u_err =
|
np.abs(ua-u)
|
numpy.abs
|
#!/usr/bin/env python
import sys
import math
import numpy as np
import ast
from scipy.signal import butter, lfilter, freqz, lfilter_zi, filtfilt
#from sympy import Derivative
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from timeit import default_timer as timer
import smoother
import Vec3D
import Bezier_curves
def read_files(files):
num_files = len(files)
raw_coord_list = []
time_list = []
for file in files:
if 'x_' in file or 'y_' in file:
x,y,z,t = read_BT_file(file)
else:
x,y,z,t = read_OP_file(file)
raw_coord_list.append([x,y,z])
time_list.append(t)
return raw_coord_list,time_list
def read_OP_file(file):
#OpenPose case
tmp = open(file, "r")
fl = tmp.readlines()
x = []
y = []
z = []
time = []
for i in range(len(fl)):
if 'Wrist' in fl[i]:
tmp1 = fl[i-2].split()
tmp2 = fl[i-1].split()
t = float(tmp1[-1])+(0.000000001*float(tmp2[-1]))
time.append(t)
x.append(float(fl[i+2][4:len(fl[i])-1]))
y.append(float(fl[i+3][4:len(fl[i])-1]))
z.append(float(fl[i+4][4:len(fl[i])-1]))
time = np.array(time) - time[0]
return x,y,z,time
def read_BT_file(file):
f = open(file, "r")
fl = f.readlines()
x = []
y = []
z = []
time = []
for i in range(len(fl)):
if 'Frame' in fl[i]:
if '[]' in fl[i+1]:
continue
temp = fl[i+1]
if temp:
temp = temp[3:]
a = ast.literal_eval(temp)
cox=[]
coy=[]
coz=[]
for k in range(len(a)):
cox.append(a[k][0][0])
coy.append(a[k][0][1])
coz.append(a[k][0][2])
if cox[8] != 0.0:
time.append(float(fl[i-1][41:56]))
x.append(cox[8]/1000)
y.append(coy[8]/1000)
z.append(coz[8]/1000)
if time != []:
time = np.array(time) - time[0]
method = "BT"
return x,y,z,time
def print2D(raw_coords,smooth_coords,p=[],case=0,outliers=[],filtered=[]):
f_info = 'Smoothed trajectory'
fig,ax = plt.subplots()
ax.set_aspect('equal')
ax.grid('on')
ax.scatter(np.array(raw_coords[0]),np.array(raw_coords[1]),alpha=0.3,label="Initial Points")
if outliers != []:
ax.scatter(np.array(outliers[0]),np.array(outliers[1]),alpha=0.3,label="Points after excluding outliers")
if filtered != []:
ax.scatter(np.array(filtered[0]),np.array(filtered[1]),alpha=0.3,label="Points after Filtering")
if case == 0 or case == 1:
if case == 0:
plt.title('Single Bezier Smoothing')
else:
plt.title('Piecewise Bezier Smoothing')
ax.scatter(np.array(smooth_coords[0]),
np.array(smooth_coords[1]),linewidth=1.5,label='%s' %(f_info))
elif case == 2:
ax.plot(np.array(smooth_coords[0]),
np.array(smooth_coords[1]),linewidth=1.5,label='%s, p = %.4f' %(f_info,p))
plt.title('Cubic Spline Smoothing')
ax.set_xlabel('X - axis (m)')
ax.set_ylabel('Y - axis (m)')
ax.legend()
#manager = plt.get_current_fig_manager()
#manager.window.showMaximized()
plt.show()
def printDemo2D(raw_coord_list,smooth_coord_list,num_files,f_info,case=-1,scatter=True):
f_info = ['line','line','line','line','line','line','line','line','line','line','line','line']
p = [0,0,0,0]
fig1, ax = plt.subplots()
ax.scatter(np.array(raw_coord_list[0][0]),np.array(raw_coord_list[0][1]),alpha=0.3,label="Initial Points")
if scatter:
if num_files <= 2:
for i in range(num_files):
ax.scatter(np.array(raw_coord_list[i][0]),np.array(raw_coord_list[i][1]),alpha=0.3,label="Initial Points")
ax.set_aspect('equal')
ax.grid('on')
print(case)
if case == 0 or case == 1:
if case == 0:
plt.title('Single Bezier Smoothing')
else:
plt.title('Piecewise Bezier Smoothing')
for i in range(num_files):
ax.plot(np.array(smooth_coord_list[i][0]),
np.array(smooth_coord_list[i][1]),linewidth=1.0,label='%s' %(f_info[i]))
elif case == 2:
for i in range(num_files):
ax.plot(np.array(smooth_coord_list[i][0]),
np.array(smooth_coord_list[i][1]),linewidth=1.0,label='%s' %(f_info[i]))
plt.title('Cubic Spline Smoothing')
else:
for i in range(num_files):
ax.plot(np.array(smooth_coord_list[i][0]),
np.array(smooth_coord_list[i][1]))
plt.title('Initial Trajectories WITHOUT smoothing')
ax.set_xlabel('X - axis (m)')
ax.set_ylabel('Y - axis (m)')
ax.legend()
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
plt.show()
def print3D(raw_coord_list,smooth_coord_l):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(np.array(smooth_coord_l[0]),np.array(smooth_coord_l[1]), np.array(smooth_coord_l[2]))
ax.scatter(np.array(raw_coord_list[0]),np.array(raw_coord_list[1]), np.array(raw_coord_list[2]))
ax.plot([0,0],[-0.5,.5],color='dimgrey')
ax.plot([-.6,.6],[0,0],color='dimgrey')
ax.plot([0,0],[0,0],[-0.1,0.3],color='dimgrey')
ax.set_xlabel('X - axis')
ax.set_ylabel('Y - axis')
ax.set_zlabel('Z - axis')
#ax.set_xlim([-0.6,0.6])
#ax.set_ylim([-0.6,0.6])
#ax.set_zlim([-0.1,0.3])
plt.show()
#<--------------------- ButterWorth Filter ------------------------>#
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = filtfilt(b, a, data)
return y
#<----------------------------------------------------------------->#
def filtering(x_raw,y_raw,z_raw,time,cutoff=1.5,plot=False):
order = 6
fs = 20.0 # sample rate, Hz
# desired cutoff frequency of the filter, Hz
points = []
x = butter_lowpass_filter(x_raw, cutoff, fs, order)
y = butter_lowpass_filter(y_raw, cutoff, fs, order)
z = butter_lowpass_filter(z_raw, cutoff, fs, order)
if plot:
plotter(x_raw,x,time)
return x,y,z,time
#<----------------- Downsampling Auxiliaries ---------------------->#
def exclude_outliers(x_raw,y_raw,z_raw,time):
mean_z, std_z = np.mean(z_raw), np.std(z_raw)
mean_y, std_y = np.mean(y_raw),
|
np.std(y_raw)
|
numpy.std
|
import numpy as np
import pandas as pd
import logging
import operator
logging.basicConfig(level=logging.WARNING)
test_input = [
[1, 1],
[1, 6],
[8, 3],
[3, 4],
[5, 5],
[8, 9],
]
data = np.array(test_input)
input_file = "day06_input.txt"
data =
|
np.loadtxt(input_file, delimiter=',', dtype=int)
|
numpy.loadtxt
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 17:11:30 2017
General Oceanographic tools
NOTES: geostrophy is wrong.. need to go back and try with geopotentials?
@author: manishdevana
"""
import numpy as np
import pandas as pd
import scipy.interpolate as interp
import scipy
import matplotlib.pyplot as plt
import gsw
import cmocean
from netCDF4 import Dataset
from matplotlib.collections import LineCollection
import datetime as dt
default_params = {
'bin_size': 200,
'overlap': 100,
'm_0': 1. / 300., # 1/lamda limits
'm_c': 1. / 10., # 1/lamda limits
'order': 1,
'nfft': 256,
'plot_data': 'on',
'transect_fig_size': (6, 4),
'reference pressure': 0,
'plot_check_adiabatic': False,
'plot_spectrums': False,
}
def loadCTD(ctd):
"""
Loads standard ctd data from a dictionary of ctd values
"""
S = ctd['s']
T = ctd['t']
p = ctd['p']
lat = ctd['lat']
lon = ctd['lon']
return S, T, p, lat, lon
def loadLADCP(ladcp, full_set=False):
"""
Loads standard ctd data from a dictionary of ctd values
"""
U = ladcp['u']
V = ladcp['v']
p = ladcp['p_grid']
uup = ladcp['uup']
vup = ladcp['vup']
udo = ladcp['udo']
vdo = ladcp['vdo']
if full_set:
return U, V, p, uup, vup, udo, vdo
else:
return U, V, p
def depthTrim(data, z, maxDepth):
"""
Trims array to a max depth with corresponding depth or pressure grid
"""
idx = np.squeeze(z <= maxDepth)
dataTrimmed = data[idx,:]
return dataTrimmed, z[idx]
def rhoFromCTD(S, T, p, lon, lat):
"""
Rho from measured salinity and temperature values
"""
SA = gsw.SA_from_SP(S, p, lon, lat)
CT = gsw.CT_from_t(SA, T, p)
rho = gsw.density.rho(SA, CT, p)
return rho
def sigma0FromCTD(S, T, p, lon, lat):
"""
Rho from measured salinity and temperature values
"""
SA = gsw.SA_from_SP(S, p, lon, lat)
CT = gsw.CT_from_t(SA, T, p)
rho = gsw.density.sigma0(SA, CT)
return rho
def gswN2(S, T, p, lat, lon, axis=0):
SA = gsw.SA_from_SP(S, p, lon, lat)
CT = gsw.CT_from_t(SA, T, p)
N2, dump = gsw.stability.Nsquared(SA, CT, p, lat, axis=axis)
N2poly = []
# for cast in N2.T:
# fitrev = vert_polyFit(cast, p[:,1], 100, deg=1)
# N2poly.append(fitrev)
#
# N2poly = np.vstack(N2poly).T
bottom_row = np.full((1,21), np.nan)
N2 = np.vstack((N2, bottom_row))
return N2
def bfrq(T, S, z, lat, lon):
"""
Calculate Bouyancy frequency from practical salinity and temperature.
Converts to potential temperature and absolute salinity to calculate density
"""
SA = gsw.conversions.SA_from_SP(S, z, lon, lat)
g = gsw.grav(lat, z)
pdens = gsw.rho_t_exact(SA, T, z)
dpdens = np.gradient(gsw.rho_t_exact(SA, T, z), axis=0)
dz = np.gradient(z, axis=0)
N2 = (g/pdens)*(dpdens/dz)
return N2
def sliding_bins(data, p, bin_size, step=100):
"""
Bin data in sliding vertical bins with a specified overlap
"""
dz = np.nanmean(np.gradient(p))
bin_step = int(np.ceil(bin_size/dz))
step = int(np.ceil(step/dz))
# start point of each bin
starts = np.arange(0, data.shape[0], step)
idx = []
for start in starts:
if start+bin_step <= data.shape[0]:
idx.append(np.arange(start, start+bin_step, dtype=int))
return np.vstack(idx)
def binData(data, p, bin_size):
"""
This function takes an array of hydrographic casts (each column representing
an indivdual cast) and bins the data into half overlapping bins, returning
only the indices i.e. no modifications are made to the original data
"""
# Pressure steps
dp = np.nanmean(np.gradient(p))
steps = int(np.ceil(bin_size/dp))
starts = np.arange(0, data.shape[0], steps/2)
idx = []
for i in starts:
if i+steps <= data.shape[0]:
idx.append(np.arange(i,i+steps, dtype=int))
idx = np.vstack(idx)
return idx
def depthAvgFlow(U, V, dz, depthRange=500):
"""
Depth averaged flow for flow from bottom up to depthrange.
"""
steps = int(np.ceil(depthRange/dz))
Umeans = np.empty(0)
Vmeans = np.empty(0)
for Ui, Vi in zip(U.T, V.T):
mask = np.where(np.isfinite(Ui))[0]
mask = mask[-1:np.max(mask)-steps:-1]
Umeans = np.append(Umeans, np.nanmean(Ui[mask]))
Vmeans = np.append(Vmeans, np.nanmean(Vi[mask]))
return Umeans, Vmeans
def bathyLoadNc(fname, lat, lon, add_buffer=True, buffer=.1):
"""
Loads bathymetric data from Netcdf file and extracts box around transect
"""
file = Dataset(fname)
bathyDict = {key:file.variables[key][:] for key in file.variables.keys()}
file.close()
lat2 = bathyDict['lat']
lon2 = bathyDict['lon']
if add_buffer:
# go back and actually add the buffer in
latidx = np.where(np.logical_and(lat2 < np.nanmax(lat)+buffer\
, lat2 > np.nanmin(lat)-buffer))[0]
lonidx = np.where(np.logical_and(lon2 < np.nanmax(lon)+buffer\
, lon2 > np.nanmin(lon)-buffer))[0]
else:
latidx = np.where(np.logical_and(lat2 < np.nanmax(lat)\
, lat2 > np.nanmin(lat)))[0]
lonidx = np.where(np.logical_and(lon2 < np.nanmax(lon)\
, lon2 >
|
np.nanmin(lon)
|
numpy.nanmin
|
# Copyright (c) Facebook, Inc. and its affiliates
# Copyright (c) MTRF authors
import numpy as np
MOCAP_POS_PALMDOWN = np.array([0.705, 0.185, 1.0])
MOCAP_QUAT_PALMDOWN = np.array([0, 1, 0, 0])
MOCAP_EULER_PALMDOWN = np.array([-np.pi, 0, 0])
ARM_QPOS_PALMDOWN = np.array([0.7, 0.133, -0.503, 1.067, -2.308, 0.976, 0.0973])
MOCAP_POS_PALMUP = np.array([0.705, 0.185, 1.0])
MOCAP_QUAT_PALMUP = np.array([1, 0, 0, 0])
MOCAP_EULER_PALMUP = np.array([0, 0, 0])
ARM_QPOS_PALMUP = np.array([0.758, 0.403, -0.953, 0.935, -2.019, 0.774, -2.811])
DEFAULT_MOCAP_RANGE =
|
np.array([.01, .01, .01, .25, .25, .25])
|
numpy.array
|
# This is a python port of the C++ code of qvality (https://github.com/percolator/percolator/)
# It does not include the mix-max corrections, nor the pi0 corrections
from __future__ import print_function
import subprocess
import tempfile
import csv
import os
import sys
import numpy as np
import bisect
tao = 2.0 / (1 + np.sqrt(5.0)) # inverse of golden section
scaleAlpha = 1
stepEpsilon = 1e-8
gRange = 35.0
weightSlope = 1e1
VERB = 3
# pi0 estimation parameters
numLambda = 100
maxLambda = 0.5
# this function returns PEPs in ascending order (lowest PEP first)
def getQvaluesFromScores(targetScores, decoyScores, includePEPs = False, includeDecoys = False, tdcInput = False, pi0 = 1.0, plotRegressionCurve = False, numBins = 500):
if type(targetScores) is not np.ndarray:
targetScores = np.array(targetScores)
if type(decoyScores) is not np.ndarray:
decoyScores = np.array(decoyScores)
if len(targetScores) == 0:
sys.exit("ERROR: no target hits available for PEP calculation")
if len(decoyScores) == 0:
sys.exit("ERROR: no decoy hits available for PEP calculation")
targetScores.sort()
decoyScores.sort()
allScores = np.concatenate((targetScores, decoyScores))
allScores.sort()
medians, negatives, sizes = binData(allScores, decoyScores, numBins)
medians, negatives, sizes = np.array(medians), np.array(negatives), np.array(sizes)
# sort in descending order, highest score first
if includeDecoys:
evalScores = allScores[::-1]
#evalScores = np.array([x[0] for x in combined])
else:
evalScores = targetScores[::-1]
if VERB > 3:
print(medians, negatives, sizes)
variables = roughnessPenaltyIRLS(medians, negatives, sizes)
if pi0 < 1.0:
factor = pi0 * float(len(targetScores)) / len(decoyScores)
else:
factor = 1.0
if plotRegressionCurve:
scoresForPlot = evalScores.copy()
probs = factor * np.exp(splineEval(evalScores, medians, variables))
probs = monotonize(probs)
if plotRegressionCurve:
import matplotlib.pyplot as plt
plt.plot(medians, (1.0*negatives) / sizes, '*-')
plt.plot(scoresForPlot, probs)
plt.figure()
plt.plot(medians, (1.0*negatives) / sizes, '*-')
plt.plot(scoresForPlot, probs)
plt.yscale("log")
plt.show()
return None, probs
def getQvaluesFromPvalues(pvalues, includePEPs = False):
targetScores = sorted(pvalues)
pi0 = estimatePi0(targetScores)
if VERB > 2:
print("Estimating pi0 = %f" % pi0)
step = 1.0 / 2.0 / len(pvalues)
decoyScores = np.arange(step, 1 - step + 1e-10, step*2)
targetScores = pvaluesToScores(targetScores)
decoyScores = pvaluesToScores(decoyScores)
return getQvaluesFromScores(targetScores, decoyScores, includePEPs, includeDecoys = False, pi0 = pi0)
def pvaluesToScores(pvalues):
return np.array(list(map(lambda x : -1*np.log(x / (1 - x)), pvalues)))
def monotonize(peps):
return np.minimum(1.0, np.maximum.accumulate(peps))
def binData(allScores, decoyScores, numBins):
binEdges = list(map(lambda x : int(np.floor(x)), np.linspace(0, len(allScores), numBins+1)))
bins = list()
startIdx = 0
for endIdx in binEdges[1:]:
if startIdx < endIdx:
while endIdx < len(allScores) and allScores[endIdx-1] == allScores[endIdx]:
endIdx += 1
bins.append(allScores[startIdx:endIdx])
startIdx = endIdx
results = list()
for b in bins:
m = np.median(b)
numNegs = np.searchsorted(decoyScores, b[-1], side = 'right') - np.searchsorted(decoyScores, b[0], side = 'left')
numTot = len(b)
results.append([m, numNegs, numTot])
return zip(*results)
def roughnessPenaltyIRLS(medians, negatives, sizes):
Q, R = initQR(medians)
g, w, z, gamma, p, gnew = initg(negatives, sizes)
variables = (Q, R, g, w, z, gamma, p, gnew)
p1 = 1.0 - tao
p2 = tao
cv1 = evaluateSlope(medians, negatives, sizes, variables, -scaleAlpha * np.log(p1))
cv2 = evaluateSlope(medians, negatives, sizes, variables, -scaleAlpha * np.log(p2))
alpha = alphaLinearSearchBA(0.0, 1.0, p1, p2, cv1, cv2, medians, negatives, sizes, variables)
if VERB > 3:
print("Alpha selected to be", alpha)
variables = iterativeReweightedLeastSquares(medians, negatives, sizes, variables, alpha)
return variables
def alphaLinearSearchBA(min_p, max_p, p1, p2, cv1, cv2, medians, negatives, sizes, variables):
# Minimize Slope score
# Use neg log of 0<p<1 so that we allow for searches 0<alpha<inf
oldCV = 0.0
if cv2 < cv1:
# keep point 2
min_p = p1
p1 = p2
p2 = min_p + tao * (max_p - min_p)
oldCV = cv1
cv1 = cv2
cv2 = evaluateSlope(medians, negatives, sizes, variables, -1*scaleAlpha*np.log(p2))
if VERB > 3:
print("New point with alpha=", -scaleAlpha*np.log(p2), ", giving slopeScore=", cv2)
else:
# keep point 1
max_p = p2
p2 = p1
p1 = min_p + (1 - tao) * (max_p - min_p)
oldCV = cv2
cv2 = cv1
cv1 = evaluateSlope(medians, negatives, sizes, variables, -1*scaleAlpha*np.log(p1))
if VERB > 3:
print("New point with alpha=", -scaleAlpha*np.log(p1), ", giving slopeScore=", cv1)
if (oldCV - min(cv1, cv2)) / oldCV < 1e-5 or abs(p2 - p1) < 1e-10:
return -scaleAlpha*np.log(p1) if cv1 < cv2 else -scaleAlpha*np.log(p2)
return alphaLinearSearchBA(min_p, max_p, p1, p2, cv1, cv2, medians, negatives, sizes, variables)
def evaluateSlope(medians, negatives, sizes, variables, alpha):
# Calculate a spline for current alpha
variables = iterativeReweightedLeastSquares(medians, negatives, sizes, variables, alpha)
_, _, g, _, _, _, _, _ = variables
# Find highest point (we only want to evaluate things to the right of that point)
n = len(medians)
mixg = 1 # Ignore 0 and n-1
maxg = g[mixg]
for ix in range(mixg, n-1):
if g[ix] >= maxg:
maxg = g[ix]
mixg = ix
maxSlope = -10e6
slopeix = -1
for ix in range(mixg+1, n-2):
slope = g[ix-1]-g[ix]
if slope > maxSlope:
maxSlope = slope
slopeix = ix
# Now score the fit based on a linear combination between
# The bump area and alpha
if VERB > 3:
print("mixg=", mixg, ", maxg=", maxg, ", maxBA=", maxSlope, " at ix=", slopeix, ", alpha=", alpha)
return maxSlope * weightSlope + alpha
def iterativeReweightedLeastSquares(medians, negatives, sizes, variables, alpha, epsilon = stepEpsilon, maxiter = 50):
Q, R, g, w, z, gamma, p, gnew = variables
for it in range(maxiter):
g = gnew
p, z, w = calcPZW(g, negatives, sizes)
aWiQ = np.multiply((alpha / w)[:,None], Q)
M = R + Q.T.dot(aWiQ)
gamma = np.linalg.solve(M, Q.T.dot(z))
gnew = z - aWiQ.dot(gamma)
gnew = np.minimum(gRange, np.maximum(-1*gRange, gnew))
difference = g - gnew
step = np.linalg.norm(difference) / len(medians)
if VERB > 3:
print("Step size:", step)
if step < epsilon:
return (Q, R, g, w, z, gamma, p, gnew)
if VERB > 1:
print("Warning: IRLS did not converge with maxIter =", maxiter)
return (Q, R, g, w, z, gamma, p, gnew)
def calcPZW(g, negatives, sizes, epsilon = 1e-15):
e = np.exp(g)
p = np.minimum(1 - epsilon, np.maximum(epsilon, e / (1+e)))
w = np.maximum(epsilon, sizes * p * (1 - p))
z = np.minimum(gRange, np.maximum(-1*gRange, g + (negatives - p * sizes) / w))
return p, z, w
def initg(negatives, sizes):
n = len(negatives)
g = np.zeros(n)
w = np.zeros(n)
z = np.ones(n) * 0.5
gamma = np.zeros(n-2)
p = (negatives + 0.05) / (sizes + 0.1)
gnew = np.log(p / (1-p))
return g, w, z, gamma, p, gnew
def initQR(medians):
n = len(medians)
dx = medians[1:] - medians[:-1]
Q = np.zeros((n, n -2))
Q[range(n-2), range(n-2)] = 1.0 / dx[:-1]
Q[range(1,n-1), range(n-2)] = - 1.0 / dx[:-1] - 1.0 / dx[1:]
Q[range(2,n), range(n-2)] = 1.0 / dx[1:]
R = np.zeros((n-2, n-2))
R[range(n-2), range(n-2)] = (dx[:-1] + dx[1:]) / 3
R[range(n-3), range(1,n-2)] = dx[1:-1] / 6
R[range(1,n-2), range(n-3)] = dx[1:-1] / 6
return Q, R
def splineEval(scores, medians, variables):
_, _, g, _, _, gamma, _, _ = variables
#score = np.exp(score)
n = len(medians)
rights = np.searchsorted(medians, scores)
derr = (g[1] - g[0]) / (medians[1] - medians[0]) - (medians[1] - medians[0]) / 6 * gamma[0]
scores[rights == 0] = g[0] - (medians[0] - scores[rights == 0]) * derr # reuse "scores" array to save memory
derl = (g[-1] - g[-2]) / (medians[-1] - medians[-2]) + (medians[-1] - medians[-2]) / 6 * gamma[-3]
scores[rights == n] = g[-1] + (scores[rights == n] - medians[-1]) * derl
idxs = np.where((rights > 0) & (rights < n))
rights = rights[idxs] # reuse "rights" array to save memory
hs = medians[rights] - medians[rights - 1]
drs = medians[rights] - scores[idxs]
dls = scores[idxs] - medians[rights - 1]
gamr = np.zeros_like(hs)
gamr[rights < (n - 1)] = gamma[rights[rights < (n - 1)] - 1]
gaml = np.zeros_like(hs)
gaml[rights > 1] = gamma[rights[rights > 1] - 2]
scores[idxs] = (dls * g[rights] + drs * g[rights - 1]) / hs - dls * drs / 6 * ((1.0 + dls / hs) * gamr + (1.0 + drs / hs) * gaml)
return scores
def estimatePi0(pvalues, numBoot = 100):
pvalues = np.array(pvalues)
lambdas, pi0s = list(), list()
numPvals = len(pvalues)
for lambdaIdx in range(numLambda + 1):
l = ((lambdaIdx + 1.0) / numLambda) * maxLambda
startIdx = np.searchsorted(pvalues, l)
Wl = numPvals - startIdx
pi0 = Wl / (1.0 - l) / numPvals
if pi0 > 0.0:
lambdas.append(l)
pi0s.append(pi0)
if len(pi0s) == 0:
print("Error in the input data: too good separation between target and decoy PSMs.\nImpossible to estimate pi0, setting pi0 = 1")
return 1.0
minPi0 = min(pi0s)
mse = [0.0] * len(pi0s)
# Examine which lambda level is most stable under bootstrap
for boot in range(numBoot):
pBoot = bootstrap(pvalues)
n = len(pBoot)
for idx, l in enumerate(lambdas):
startIdx =
|
np.searchsorted(pvalues, l)
|
numpy.searchsorted
|
from typing import Tuple
from tunepy2.interfaces import AbstractValidator, AbstractLearner
from tunepy2.internal import *
import numpy as np
from copy import deepcopy
class CrossValidator(AbstractValidator):
"""
Trains a set number of models on subsets of the provided data and returns the average fitness
of the trained models.
"""
def __init__(self, bins: int):
"""
Creates a new CrossValidator.
:param bins: number of bins (must be at least 2)
"""
self._bins = bins
self._feature_bins_train = []
self._feature_bins_test = []
self._label_bins_train = []
self._label_bins_test = []
def validate(self, x, y, model: AbstractLearner) -> float:
"""
Creates a fitness score for the provided model and data
:param x: array-like dataset features
:param y: array-like dataset labels
:param model: untrained learner
:return: a fitness score
"""
if self._bins < 2:
raise CrossValidatorBinException
if len(x) != len(y):
raise DimensionsMismatchException
self.build_test_bins(x, y)
self.build_train_bins(x, y)
total_fitness = 0.0
for index in range(self._bins):
model_copy = deepcopy(model)
model_copy.fit(self._feature_bins_train[index], self._label_bins_train[index])
model_copy.evaluate(self._label_bins_test[index], self._label_bins_train[index])
total_fitness += model_copy.fitness
return float(total_fitness / self._bins)
def build_test_bins(self, x, y):
if len(x) != len(y):
raise DimensionsMismatchException
total_rows = len(y)
extra_rows = total_rows % self._bins
bin_size = int((total_rows - extra_rows) / self._bins)
self._feature_bins_test = [None for _ in range(self._bins)]
self._label_bins_test = [None for _ in range(self._bins)]
for index in range(self._bins):
start_slice = index * bin_size
end_slice = (index + 1) * bin_size
if index == self._bins - 1:
self._feature_bins_test[index] = x[start_slice:]
self._label_bins_test[index] = y[start_slice:]
else:
self._feature_bins_test[index] = x[start_slice:end_slice]
self._label_bins_test[index] = y[start_slice:end_slice]
def build_train_bins(self, x, y):
if len(x) != len(y):
raise DimensionsMismatchException
total_rows = len(y)
extra_rows = total_rows % self._bins
bin_size = int((total_rows - extra_rows) / self._bins)
self._feature_bins_train = [None for _ in range(self._bins)]
self._label_bins_train = [None for _ in range(self._bins)]
for index in range(self._bins):
start_slice = index * bin_size
end_slice = (index + 1) * bin_size
if index == 0:
self._feature_bins_train[index] = x[end_slice:]
self._label_bins_train[index] = y[end_slice:]
elif index == self._bins - 1:
self._feature_bins_train[index] = x[:start_slice]
self._label_bins_train[index] = y[:start_slice]
else:
self._feature_bins_train[index] =
|
np.concatenate((x[:start_slice], x[end_slice:]), axis=0)
|
numpy.concatenate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.