prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import astropy.io.fits as fits
import click
import os
from scipy import ndimage
from operator import itemgetter, attrgetter
import numpy as np
import matplotlib.pyplot as plt
@click.command()
@click.argument('path' )
def stat(path):
dirs = os.listdir( path )
dirs.sort()
resultLst = []
for f in dirs:
if not "OBJECT" in f:
continue
filePath = path+'/'+f
hdulist = fits.open(filePath)
header = hdulist[0].header
dateObs = header['DATE-OBS']
img=ndimage.median_filter(hdulist[0].data,5)
maxFlux =
|
np.max(img)
|
numpy.max
|
# Test stochastic collocation module
import unittest
import numpy as np
import chaospy as cp
from pystatreduce.stochastic_collocation import StochasticCollocation
from pystatreduce.quantity_of_interest import QuantityOfInterest
from pystatreduce.dimension_reduction import DimensionReduction
import pystatreduce.examples as examples
class StochasticCollocationTest(unittest.TestCase):
def test_normalStochasticCollocation2D(self):
systemsize = 2
x =
|
np.random.rand(systemsize)
|
numpy.random.rand
|
import torch
import numpy as np
from deep_sprl.util.torch import to_float_tensor
from deep_sprl.util.gaussian_torch_distribution import GaussianTorchDistribution
from deep_sprl.teachers.abstract_teacher import AbstractTeacher
from scipy.optimize import minimize, NonlinearConstraint, Bounds
class AbstractSelfPacedTeacher:
def __init__(self, init_mean, flat_init_chol, target_mean, flat_target_chol, alpha_function, max_kl):
self.context_dist = GaussianTorchDistribution(init_mean, flat_init_chol, use_cuda=False, dtype=torch.float64)
self.target_dist = GaussianTorchDistribution(target_mean, flat_target_chol, use_cuda=False, dtype=torch.float64)
self.alpha_function = alpha_function
self.max_kl = max_kl
self.iteration = 0
def target_context_kl(self, numpy=True):
kl_div = torch.distributions.kl.kl_divergence(self.context_dist.distribution_t,
self.target_dist.distribution_t).detach()
if numpy:
kl_div = kl_div.numpy()
return kl_div
def save(self, path):
weights = self.context_dist.get_weights()
np.save(path, weights)
def load(self, path):
self.context_dist.set_weights(np.load(path))
def _compute_context_kl(self, old_context_dist):
return torch.distributions.kl.kl_divergence(old_context_dist.distribution_t, self.context_dist.distribution_t)
def _compute_context_loss(self, dist, cons_t, old_c_log_prob_t, c_val_t, alpha_cur_t):
con_ratio_t = torch.exp(dist.log_pdf_t(cons_t) - old_c_log_prob_t)
kl_div = torch.distributions.kl.kl_divergence(dist.distribution_t, self.target_dist.distribution_t)
return torch.mean(con_ratio_t * c_val_t) - alpha_cur_t * kl_div
class SelfPacedTeacher(AbstractTeacher, AbstractSelfPacedTeacher):
def __init__(self, target_mean, target_variance, initial_mean, initial_variance, context_bounds, alpha_function,
max_kl=0.1, std_lower_bound=None, kl_threshold=None, use_avg_performance=False):
# The bounds that we show to the outside are limited to the interval [-1, 1], as this is typically better for
# neural nets to deal with
self.context_dim = target_mean.shape[0]
self.context_bounds = context_bounds
self.use_avg_performance = use_avg_performance
if std_lower_bound is not None and kl_threshold is None:
raise RuntimeError("Error! Both Lower Bound on standard deviation and kl threshold need to be set")
else:
if std_lower_bound is not None:
if isinstance(std_lower_bound, np.ndarray):
if std_lower_bound.shape[0] != self.context_dim:
raise RuntimeError("Error! Wrong dimension of the standard deviation lower bound")
elif std_lower_bound is not None:
std_lower_bound = np.ones(self.context_dim) * std_lower_bound
self.std_lower_bound = std_lower_bound
self.kl_threshold = kl_threshold
# Create the initial context distribution
if isinstance(initial_variance, np.ndarray):
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance, tril=False)
else:
flat_init_chol = GaussianTorchDistribution.flatten_matrix(initial_variance * np.eye(self.context_dim),
tril=False)
# Create the target distribution
if isinstance(target_variance, np.ndarray):
flat_target_chol = GaussianTorchDistribution.flatten_matrix(target_variance, tril=False)
else:
flat_target_chol = GaussianTorchDistribution.flatten_matrix(target_variance * np.eye(self.context_dim),
tril=False)
super(SelfPacedTeacher, self).__init__(initial_mean, flat_init_chol, target_mean, flat_target_chol,
alpha_function, max_kl)
def update_distribution(self, avg_performance, contexts, values):
self.iteration += 1
old_context_dist = GaussianTorchDistribution.from_weights(self.context_dim, self.context_dist.get_weights(),
dtype=torch.float64)
contexts_t = to_float_tensor(contexts, use_cuda=False, dtype=torch.float64)
old_c_log_prob_t = old_context_dist.log_pdf_t(contexts_t).detach()
# Estimate the value of the state after the policy update
c_val_t = to_float_tensor(values, use_cuda=False, dtype=torch.float64)
# Add the penalty term
cur_kl_t = self.target_context_kl(numpy=False)
if self.use_avg_performance:
alpha_cur_t = self.alpha_function(self.iteration, avg_performance, cur_kl_t)
else:
alpha_cur_t = self.alpha_function(self.iteration, torch.mean(c_val_t).detach(), cur_kl_t)
# Define the KL-Constraint
def kl_con_fn(x):
dist = GaussianTorchDistribution.from_weights(self.context_dim, x, dtype=torch.float64)
kl_div = torch.distributions.kl.kl_divergence(old_context_dist.distribution_t, dist.distribution_t)
return kl_div.detach().numpy()
def kl_con_grad_fn(x):
dist = GaussianTorchDistribution.from_weights(self.context_dim, x, dtype=torch.float64)
kl_div = torch.distributions.kl.kl_divergence(old_context_dist.distribution_t, dist.distribution_t)
mu_grad, chol_flat_grad = torch.autograd.grad(kl_div, dist.parameters())
return np.concatenate([mu_grad.detach().numpy(), chol_flat_grad.detach().numpy()])
kl_constraint = NonlinearConstraint(kl_con_fn, -np.inf, self.max_kl, jac=kl_con_grad_fn, keep_feasible=True)
constraints = [kl_constraint]
if self.kl_threshold is not None and self.target_context_kl() > self.kl_threshold:
# Define the variance constraint as bounds
cones = np.ones_like(self.context_dist.get_weights())
lb = -np.inf * cones.copy()
lb[self.context_dim: 2 * self.context_dim] = np.log(self.std_lower_bound)
ub = np.inf * cones.copy()
bounds = Bounds(lb, ub, keep_feasible=True)
x0 = np.clip(self.context_dist.get_weights().copy(), lb, ub)
else:
bounds = None
x0 = self.context_dist.get_weights().copy()
# Define the objective plus Jacobian
def objective(x):
dist = GaussianTorchDistribution.from_weights(self.context_dim, x, dtype=torch.float64)
val = self._compute_context_loss(dist, contexts_t, old_c_log_prob_t, c_val_t, alpha_cur_t)
mu_grad, chol_flat_grad = torch.autograd.grad(val, dist.parameters())
return -val.detach().numpy(), \
-np.concatenate([mu_grad.detach().numpy(), chol_flat_grad.detach().numpy()]).astype(np.float64)
res = minimize(objective, x0, method="trust-constr", jac=True, bounds=bounds,
constraints=constraints, options={"gtol": 1e-4, "xtol": 1e-6})
if res.success:
self.context_dist.set_weights(res.x)
else:
# If it was not a success, but the objective value was improved and the bounds are still valid, we still
# use the result
old_f = objective(self.context_dist.get_weights())[0]
kl_ok = kl_con_fn(res.x) <= self.max_kl
std_ok = bounds is None or (
|
np.all(bounds.lb <= res.x)
|
numpy.all
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import sys
import math
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense,Flatten,Dropout, BatchNormalization, Reshape
from tensorflow.python.keras.layers.convolutional import Conv2D,MaxPooling2D,Conv1D,MaxPooling1D
import tensorflow.python.keras.backend as K
from sklearn.metrics import mean_squared_error, mean_absolute_error
import util
def ivecs_read(fname):
a = np.fromfile(fname, dtype='int32')
d = a[0]
return a.reshape(-1, d + 1)[:, 1:].copy()
def fvecs_read(fname):
return ivecs_read(fname).view('float32')
def bvecs_mmap(fname):
x = np.memmap(fname, dtype='uint8', mode='r')
d = x[:4].view('int32')[0]
return x.reshape(-1, d + 4)[:, 4:]
def mmap_fvecs(fname):
x = np.memmap(fname, dtype='int32', mode='r')
d = x[0]
return x.view('float32').reshape(-1, d + 1)[:, 1:]
def sanitize(x):
return np.ascontiguousarray(x.astype('float32'))
def lid_kx(knn_distances):
n, k = knn_distances.shape
knn_distances = np.clip(knn_distances, 10**-10, None)
log_dist_ratio = np.log(knn_distances[:, -1].reshape((n, 1)))-np.log(knn_distances)
return np.sum(log_dist_ratio[:,:], axis=1)
# pred_lid.py <dataset> <real lid or predict lid>
# datasets: SIFT10M DEEP10M
# real lid or predict lid: 1, 0
# example: python pred_tao.py SIFT10M 1
def main(argv):
n_classes = 1
training_epochs = 10
batch_size = 10000
n_hidden_1 = 200
n_hidden_2 = 200
k = 1000
if argv[0] == 'SIFT10M':
train_x = sanitize(bvecs_mmap('/home/wanghongya/sift1B/learn.bvecs')[:1000000,:])
train_y = (lid_kx(np.load('/home/wanghongya/tmp/sift10m_gt_D.npy')[:,:k]))
test_x = sanitize(bvecs_mmap('/home/wanghongya/sift1B/queries.bvecs'))
test_y = (lid_kx(np.load('/home/wanghongya/tmp/sift10m_query_gt_D.npy')[:,:k]))
n_input = 128
elif argv[0] == 'DEEP10M':
train_x = sanitize(mmap_fvecs('/home/wanghongya/deep1b/deep1B_learn.fvecs')[:1000000,:])
train_y = (lid_kx(np.load('/home/wanghongya/tmp/deep10m_gt_D.npy')[:,:k]))
test_x = fvecs_read('/home/wanghongya/deep1b/deep1B_queries.fvecs')
test_y = (lid_kx(np.load('/home/wanghongya/tmp/deep10m_query_gt_D.npy')[:,:k]))
n_input = 96
elif argv[0] == 'GIST':
train_x = fvecs_read('/home/wanghongya/gist/gist_learn.fvecs')
train_y = (lid_kx(np.load('/home/wanghongya/tmp/gist_gt_D.npy')[:,:k]))
test_x = fvecs_read('/home/wanghongya/gist/gist_query.fvecs')
test_y = (lid_kx(np.load('/home/wanghongya/tmp/gist_query_gt_D.npy')[:,:k]))
n_input = 960
elif argv[0] == 'ImageNet':
train_x = fvecs_read('/home/wanghongya/dataset/imageNet/imageNet_base.fvecs')[:200000]
train_y = (lid_kx(np.load('/home/wanghongya/tmp/imageNet_gt_D.npy')[:,:k]))
test_x = fvecs_read('/home/wanghongya/dataset/imageNet/imageNet_base.fvecs')[200000:210000]
test_y = (lid_kx(
|
np.load('/home/wanghongya/tmp/imageNet_query_gt_D.npy')
|
numpy.load
|
""""
The goal of this module is to implement all the visualization
tools needed to graph the data and results of the computations
for the Task 8 from the coding homeworks in the Machine Learning
course on coursera.com.
"""
import numpy as np
import matplotlib.pyplot as plt
import algorithms
def plot_data(data: tuple, title: str = "Scatter Plot Of Training Data") -> None:
"""
Scatter plot of training data.
Args:
data:
A tuple of x and y values for the points to be plotted.
title:
A string that serves as both the plot's title and the saved figure's filename.
Returns:
None
"""
x, y = data
plt.figure(figsize=(10, 6))
plt.scatter(x, y, marker="x", c="g", s=100)
plt.xlabel("Latency (ms)")
plt.ylabel("Throughput (mb/s)")
plt.title(title)
plt.savefig(title.lower().replace(" ", "_"))
def plot_gaussian_contours(data: tuple, n: int = 200) -> None:
"""
Plot the contours of the gaussian function. The contours are computed by sampling the function at n points.
The contours are plotted as a black line.
Args:
data:
A tuple containing extreme values of the x and y axes.
n:
The number of points to be sampled.
Returns:
None
"""
x, x_min, x_max, y_min, y_max = data
x_range = np.array(np.linspace(x_min, x_max, n))
y_range = np.array(np.linspace(y_min, y_max, n))
u, v = np.meshgrid(x_range, y_range)
grid = np.array(list(zip(u.flatten(), v.flatten())))
mu, sigma_2 = algorithms.get_gaussian_parameters(x)
z = algorithms.compute_gauss(grid, mu, sigma_2)
z = z.reshape(u.shape)
plt.contour(
x_range,
y_range,
z,
|
np.array([10.0])
|
numpy.array
|
import cv2
import numpy as np
import tensorflow.keras.backend as tfback
from pathlib import Path
from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate
from tensorflow.keras.models import Model
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
from tensorflow.keras.layers import Lambda, Flatten, Dense
tfback.set_image_data_format('channels_first')
def conv2d_bn(x, layer_name, filters, kernel_size=(1, 1), strides=(1, 1), i='', epsilon=0.00001):
'''2D Convolutional Block with Batch normalization and ReLU activation.
Args:
x (tf.Tensor): Input tensor.
layer_name (str): Name of layer.
filters (int): Number of filters to apply in 1st convolutional operation.
kernel_size (Tuple[int, int]): Kernel size of filter to apply.
strides (Tuple[int, int]): Strides of filter.
i (str): index to append layer name, eg. 2 for conv2.
epsilon (float): epsilon for batch normalization
Returns:
tensor (tf.Tensor): Tensor with graph applied.
'''
if layer_name:
conv_name = f'{layer_name}_conv{i}'
bn_name = f'{layer_name}_bn{i}'
else:
conv_name = f'conv{i}'
bn_name = f'bn{i}'
tensor = Conv2D(filters, kernel_size, strides=strides, data_format='channels_first', name=conv_name)(x)
tensor = BatchNormalization(axis=1, epsilon=epsilon, name=bn_name)(tensor)
tensor = Activation('relu')(tensor)
return tensor
def inception_block_4a(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_4a_3x3', 96, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_4a_3x3', 128, kernel_size=(3, 3), i='2')
# 5x5 Block
X_5x5 = conv2d_bn(X, 'inception_4a_5x5', 16, i='1')
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = conv2d_bn(X_5x5, 'inception_4a_5x5', 32, kernel_size=(5, 5), i='2')
# Max Pooling Block
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = conv2d_bn(X_pool, 'inception_4a_pool', 32)
X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)
# 1x1 Block
X_1x1 = conv2d_bn(X, 'inception_4a_1x1', 64)
return concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)
def inception_block_4b(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_4b_3x3', 96, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_4b_3x3', 128, kernel_size=(3, 3), i='2')
# 5x5 Block
X_5x5 = conv2d_bn(X, 'inception_4b_5x5', 32, i='1')
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = conv2d_bn(X_5x5, 'inception_4b_5x5', 64, kernel_size=(5, 5), i='2')
# Average Pooling Block
X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)
X_pool = conv2d_bn(X_pool, 'inception_4b_pool', 64)
X_pool = ZeroPadding2D(padding=(4, 4), data_format='channels_first')(X_pool)
# 1x1 Block
X_1x1 = conv2d_bn(X, 'inception_4b_1x1', 64)
return concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)
def inception_block_4c(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_4c_3x3', 128, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_4c_3x3', 256, kernel_size=(3, 3), strides=(2, 2), i='2')
# 5x5 Block
X_5x5 = conv2d_bn(X, 'inception_4c_5x5', 32, i='1')
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = conv2d_bn(X_5x5, 'inception_4c_5x5', 64, kernel_size=(5, 5), strides=(2, 2), i='2')
# Max Pooling Block
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)
return concatenate([X_3x3, X_5x5, X_pool], axis=1)
def inception_block_5a(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_5a_3x3', 96, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_5a_3x3', 192, kernel_size=(3, 3), i='2')
# 5x5 Block
X_5x5 = conv2d_bn(X, 'inception_5a_5x5', 32, i='1')
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = conv2d_bn(X_5x5, 'inception_5a_5x5', 64, kernel_size=(5, 5), i='2')
# Average Pooling Block
X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)
X_pool = conv2d_bn(X_pool, 'inception_5a_pool', 128)
X_pool = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_pool)
# 1x1 Block
X_1x1 = conv2d_bn(X, 'inception_5a_1x1', 256)
return concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)
def inception_block_5b(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_5b_3x3', 160, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_5b_3x3', 256, kernel_size=(3, 3), strides=(2, 2), i='2')
# 5x5 Block
X_5x5 = conv2d_bn(X, 'inception_5b_5x5', 64, i='1')
X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)
X_5x5 = conv2d_bn(X_5x5, 'inception_5b_5x5', 128, kernel_size=(5, 5), strides=(2, 2), i='2')
# Max Pooling Block
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool)
return concatenate([X_3x3, X_5x5, X_pool], axis=1)
def inception_block_6a(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_6a_3x3', 96, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_6a_3x3', 384, kernel_size=(3, 3), i='2')
# Average Pooling Block
X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X)
X_pool = conv2d_bn(X_pool, 'inception_6a_pool', 96)
X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool)
# 1x1 Block
X_1x1 = conv2d_bn(X, 'inception_6a_1x1', 256)
return concatenate([X_3x3, X_pool, X_1x1], axis=1)
def inception_block_6b(X):
# 3x3 Block
X_3x3 = conv2d_bn(X, 'inception_6b_3x3', 96, i='1')
X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)
X_3x3 = conv2d_bn(X_3x3, 'inception_6b_3x3', 384, kernel_size=(3, 3), i='2')
# Max Pooling Block
X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)
X_pool = conv2d_bn(X_pool, 'inception_6b_pool', 96)
X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool)
# 1x1 Block
X_1x1 = conv2d_bn(X, 'inception_6b_1x1', 256)
return concatenate([X_3x3, X_pool, X_1x1], axis=1)
def facenet_model(input_shape):
'''Implementation of the Inception model used for FaceNet.
Arguments:
input_shape (Tuple[int]): Shape of the images of the dataset.
Returns:
model (keras.models.Model): FaceNet model.
'''
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# First Block
X = conv2d_bn(X, '', 64, kernel_size=(7, 7), strides=(2, 2), i='1', epsilon=0.001)
# Zero-Padding + MAXPOOL
X = ZeroPadding2D((1, 1))(X)
X = MaxPooling2D((3, 3), strides = 2)(X)
# Second Block
X = conv2d_bn(X, '', 64, i='2')
# Zero-Padding + MAXPOOL
X = ZeroPadding2D((1, 1))(X)
# Third Block
X = conv2d_bn(X, '', 192, kernel_size=(3, 3), i='3')
# Zero-Padding + MAXPOOL
X = ZeroPadding2D((1, 1))(X)
X = MaxPooling2D(pool_size = 3, strides = 2)(X)
# Fourth Block (Inception)
X = inception_block_4a(X)
X = inception_block_4b(X)
X = inception_block_4c(X)
# Fifth Block (Inception)
X = inception_block_5a(X)
X = inception_block_5b(X)
# Sixth Block (Inception)
X = inception_block_6a(X)
X = inception_block_6b(X)
# Top layer
X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), data_format='channels_first')(X)
X = Flatten()(X)
X = Dense(128, name='dense')(X)
# L2 normalization
X = Lambda(lambda x: tfback.l2_normalize(x,axis=1))(X)
# Create model instance
model = Model(inputs = X_input, outputs = X, name='FaceNetModel')
weight_fpath = Path(__file__).parent.joinpath('weights', 'facenet_weights.h5')
model.load_weights(weight_fpath)
return model
def img_to_encoding(image, model):
# Resize for model
resized = cv2.resize(image, (96, 96))
# Swap channel dimensions
input_img = resized[...,::-1]
# Switch to channels first and round to specific precision.
input_img = np.around(np.transpose(input_img, (2,0,1))/255.0, decimals=12)
x_train =
|
np.array([input_img])
|
numpy.array
|
# isochrones.py
# <NAME> (<EMAIL>)
"""Define the Isocrhone_Model class"""
import numpy as np
import pandas as pd
import os
import glob
import sys
from warnings import warn
from pkg_resources import resource_filename
##########################
# Useful Utilities
def load_MIST_dir(dir_path, iso_append='.iso.cmd'):
df = pd.DataFrame()
for MIST_doc in glob.glob(os.path.join(dir_path, '*'+iso_append)):
try:
with open(MIST_doc, 'r') as f:
lines = [f.readline() for _ in range(13)]
colnames = lines[-1].strip('#\n').split()
assert ('EEP' in colnames)
dtypes = {c: float for c in colnames}
dtypes['EEP'] = int
new_df = pd.read_table(MIST_doc, names=colnames,
comment='#', delim_whitespace=True,
dtype=dtypes, na_values=['Infinity'])
new_df[new_df.isna()] = 100.
df = df.append([new_df], ignore_index=True)
except Exception:
warn('File not properly formatted: %s' % (MIST_doc))
sys.exit(1)
return df
def _interp_arrays(arr1, arr2, f):
"""Linearly interpolate between two (potentially unequal length) arrays
Arguments:
arr1 -- first (lower) array (len N1 or N1xD)
arr2 -- second (upper) array (len N2 or N2xD, N2 doesn't have to equal N1)
f -- linear interpolation fraction (float between 0 and 1)
Output: interpolated array (len max(N1,N2) or max(N1,N2)xD)
"""
assert (arr1.ndim == arr2.ndim), (
"The two interpolated arrays must have same dimensions")
l1, l2 = len(arr1), len(arr2)
# If arrays are unequal length, extrapolate shorter using trend of longer
if (l1 < l2):
delta = arr2[l1:] - arr2[l1-1]
added = arr1[-1] + delta
arr1 = np.append(arr1, added, axis=0)
elif (l1 > l2):
delta = arr1[l2:] - arr1[l2-1]
added = arr2[-1] + delta
arr2 = np.append(arr2, added, axis=0)
return (1-f)*arr1 + f*arr2
def _feh_from_str(feh_str):
"""Converts a metallicity value to MIST string
Example Usage:
_feh_from_str("m0.53") -> -0.53
_feh_from_str("p1.326") -> 1.326
Arguments:
feh_str -- metallicity (as a string)
Output: float value of metallicity
"""
value = float(feh_str[1:])
if feh_str[0] == 'm':
value *= -1
elif feh_str[0] != 'p':
raise ValueError('feh string not of valid format')
return value
def _feh_to_str(feh):
"""Converts a metallicity value to MIST string
Example Usage:
_feh_to_str(-0.5313) -> "m0.53"
_feh_to_str(1.326) -> "p1.33"
Arguments:
feh -- metallicity (float)
Output: string representing metallicity
"""
result = ''
if (feh < 0):
result += 'm'
else:
result += 'p'
result += '%1.2f' % (np.abs(feh))
return result
def _interp_df_by_mass(df, dm_min):
ages = np.unique(df.age.values)
fehs = np.unique(df['[Fe/H]_init'].values)
new_rows = []
for age in ages:
for feh in fehs:
iso_df = df[
|
np.isclose(df.age, age)
|
numpy.isclose
|
import copy
import git
import torch
import multiprocessing as mp
import multiagent.scenarios as scenarios
import numpy as np
from maml_rl.envs.subproc_vec_env import SubprocVecEnv
from maml_rl.episode import BatchEpisodes
from multiagent.environment import MultiAgentEnv
def make_env(args, i_worker):
def check_github(path, branch_name):
"""Checks whether the path has a correct branch name"""
repo = git.Repo(path)
branch = repo.active_branch
assert branch.name == branch_name, "Branch name does not equal the desired branch"
def _make_env():
"""Load multi-agent particle environment
This code is modified from: https://github.com/openai/maddpg/blob/master/experiments/train.py
"""
# Check github branch
check_github(
path="./thirdparty/multiagent-particle-envs",
branch_name="predator_prey")
# Load multi-agent particle env
scenario = scenarios.load(args.env_name + ".py").Scenario()
world = scenario.make_world(
n_prey=args.n_prey,
n_predator=args.n_predator)
done_callback = scenario.done_callback
env = MultiAgentEnv(
world,
reset_callback=scenario.reset_world,
reward_callback=scenario.reward,
observation_callback=scenario.observation,
done_callback=done_callback)
print("i_worker:", i_worker)
env.seed(i_worker)
assert env.discrete_action_space is False, "For cont. action, this flag must be False"
assert env.shared_reward is False, "For predator-prey, this must be False"
return env
return _make_env
class BatchSampler(object):
def __init__(self, env_name, batch_size, num_workers=mp.cpu_count() - 1, args=None):
self.env_name = env_name
self.batch_size = batch_size # NOTE # of trajectories in each env
self.num_workers = num_workers
self.args = args
self.queue = mp.Queue()
self.envs = SubprocVecEnv(
[make_env(args, i_worker) for i_worker in range(num_workers)], queue=self.queue)
self._env = make_env(args, i_worker=99)()
def sample(self, policy, params=None, prey=None, gamma=0.95, device='cpu'):
"""Sample # of trajectories defined by "self.batch_size". The size of each
trajectory is defined by the Gym env registration defined at:
./maml_rl/envs/__init__.py
"""
assert prey is not None
episodes = BatchEpisodes(batch_size=self.batch_size, gamma=gamma, device=device)
for i in range(self.batch_size):
self.queue.put(i)
for _ in range(self.num_workers):
self.queue.put(None)
observations, worker_ids = self.envs.reset() # TODO reset needs to be fixed
dones = [False]
while (not all(dones)) or (not self.queue.empty()):
with torch.no_grad():
# Get observations
predator_observations, prey_observations = self.split_observations(observations)
predator_observations_torch = torch.from_numpy(predator_observations).to(device=device)
prey_observations_torch = torch.from_numpy(prey_observations).to(device=device)
# Get actions
predator_actions = policy(predator_observations_torch, params=params).sample()
predator_actions = predator_actions.cpu().numpy()
prey_actions = prey.select_deterministic_action(prey_observations_torch)
prey_actions = prey_actions.cpu().numpy()
actions = np.concatenate([predator_actions, prey_actions], axis=1)
new_observations, rewards, dones, new_worker_ids, _ = self.envs.step(copy.deepcopy(actions))
assert np.sum(dones[:, 0]) == np.sum(dones[:, 1])
dones = dones[:, 0]
# Get new observations
new_predator_observations, _ = self.split_observations(new_observations)
# Get rewards
predator_rewards = rewards[:, 0]
episodes.append(
predator_observations,
predator_actions,
predator_rewards,
worker_ids)
observations, worker_ids = new_observations, new_worker_ids
return episodes
def reset_task(self, task):
tasks = [task for _ in range(self.num_workers)]
reset = self.envs.reset_task(tasks)
return all(reset)
def sample_tasks(self, num_tasks, test=False):
if test is False:
i_agents = np.random.randint(low=0, high=16, size=(num_tasks, ))
else:
i_agents = np.random.randint(low=16, high=21, size=(num_tasks, ))
tasks = [{"i_agent": i_agent} for i_agent in i_agents]
return tasks
def split_observations(self, observations):
predator_observations = []
prey_observations = []
for obs in observations:
assert len(obs) == 2
predator_observations.append(obs[0])
prey_observations.append(obs[1])
return \
|
np.asarray(predator_observations, dtype=np.float32)
|
numpy.asarray
|
__author__ = 'arosado'
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scistats
import pickle
import json
import csv
class BFPData:
currentDirectory = None
currentFile = None
currentFileName = None
currentFileData = None
currentCycleData = None
currentCycleIndex = None
allData = []
def parseFile(self):
fileText = self.currentFile.read()
fileLines = fileText.split('\n')
expParaLines = fileLines[0:3]
expLines = fileLines[4:len(fileLines)]
expPara = {}
startData = True
startTime = False
expData = {}
cycleData = {}
timeStamps = []
bfpStates = []
piezoVoltages = []
peakPositions = []
count = 0
firstExpParaLine = True
secondExpParaLine = False
thirdExpParaLine = False
allFileData = []
for line in expParaLines:
lineData = line.split('\t')
if firstExpParaLine:
expPara['experimentMode'] = float(lineData[0])
expPara['edgesNumber'] = float(lineData[1])
expPara['u2ratio'] = float(lineData[2])
expPara['velocityBias'] = float(lineData[3])
firstExpParaLine = False
secondExpParaLine = True
elif secondExpParaLine:
expPara['springConstant'] = float(lineData[0])
expPara['pipetteDiameter'] = float(lineData[1])
expPara['rbcCellDiameter'] = float(lineData[2])
expPara['contactDiscDiameter'] = float(lineData[3])
expPara['beadDiameter'] = float(lineData[4])
expPara['aspiratedLength'] = float(lineData[5])
expPara['aspirationPressure'] = float(lineData[6])
expPara['temperature'] = float(lineData[7])
expPara['viscosity'] = float(lineData[8])
expPara['corticalTension'] = float(lineData[9])
secondExpParaLine = False
thirdExpParaLine = True
elif thirdExpParaLine:
expPara['impingingRate'] = float(lineData[0])
expPara['loadingRate'] = float(lineData[1])
expPara['primingRate'] = float(lineData[2])
expPara['retractingRate'] = float(lineData[3])
expPara['impingmentForce'] = float(lineData[4])
expPara['clampForce'] = float(lineData[5])
expPara['activationForce'] = float(lineData[6])
expPara['timeoutAtClamp'] = float(lineData[7])
expPara['contactTimeInSeconds'] = float(lineData[8])
expPara['cycleInterval'] = float(lineData[9])
firstExpParaLine = True
secondExpParaLine = False
thirdExpParaLine = False
for line in expLines:
lineData = line.split('\t')
if startData:
eventNumber = float(lineData[2])
startTime = True
startData = False
indexFileInFile = 0
else:
if len(lineData) == 4:
if (0.000000 == float(lineData[0])) and (0.000000 == float(lineData[1])) and (0.000000 == float(lineData[3])):
eventNumber = float(lineData[2])
startData = True
startTime = True
cycleData['timeStamps'] = timeStamps
cycleData['bfpStates'] = bfpStates
cycleData['piezoVoltages'] = piezoVoltages
cycleData['mainPeakPositions'] = peakPositions
cycleData['eventNumber'] = eventNumber
cycleData['experimentParameters'] = expPara
allFileData.append(cycleData)
cycleData = {}
timeStamps = []
bfpStates = []
piezoVoltages = []
peakPositions = []
else:
timeStamp = float(lineData[0])
timeStamps.append(timeStamp)
bfpState = float(lineData[1])
bfpStates.append(bfpState)
piezoVoltage = float(lineData[2])
piezoVoltages.append(piezoVoltage)
peakPosition = float(lineData[3])
peakPositions.append(peakPosition)
self.currentFileData = allFileData
self.allData.append(allFileData)
def analyzeAllData(self):
for fileData in self.allData:
for cycleData in fileData:
self.analyzeExperimentalData(cycleData)
def analyzeCycleInCurrentFile(self, cycleIndex):
self.currentCycleIndex = cycleIndex
for i in range(0, len(self.currentFileData)):
if cycleIndex == i:
self.analyzeExperimentalData(self.currentFileData[i])
def determineZeroForcePixelPosition(self, zeroPeakPositions):
zeroForceMean = np.mean(zeroPeakPositions)
zeroForceStd = np.std(zeroPeakPositions)
return zeroForceMean, zeroForceStd
def convertToForce(self, peakPositionArray, zeroForcePP, expPara):
springConstant = expPara['springConstant']
differenceFromZero = np.array(peakPositionArray) - zeroForcePP
timesSpringConstant = differenceFromZero * expPara['u2ratio'] * 1e3 * springConstant
return timesSpringConstant
def analyzeExperimentalData(self, cycleData):
expParameters = cycleData['experimentParameters']
timeStamps = cycleData['timeStamps']
bfpStates = cycleData['bfpStates']
piezoVoltages = cycleData['piezoVoltages']
mainPeakPositions = cycleData['mainPeakPositions']
zeroStateTimepoints = []
zeroStatePositions = []
oneStateTimepoints = []
oneStatePositions = []
twoStateTimepoints = []
twoStatePositions = []
threeStateTimepoints = []
threeStatePositions = []
fourStateTimepoints = []
fourStatePositions = []
fiveStateTimepoints = []
fiveStatePositions = []
for i in range(0, len(timeStamps)):
if bfpStates[i] == 0.000:
zeroStatePositions.append(mainPeakPositions[i])
zeroStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 1.000:
oneStatePositions.append(mainPeakPositions[i])
oneStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 2.000:
twoStatePositions.append(mainPeakPositions[i])
twoStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 3.000:
threeStatePositions.append(mainPeakPositions[i])
threeStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 4.000:
fourStatePositions.append(mainPeakPositions[i])
fourStateTimepoints.append(timeStamps[i])
if bfpStates[i] == 5.000:
fiveStatePositions.append(mainPeakPositions[i])
fiveStateTimepoints.append(timeStamps[i])
# plt.figure(1)
# plt.plot(zeroStateTimepoints, zeroStatePositions)
# plt.plot(oneStateTimepoints, oneStatePositions)
# plt.plot(twoStateTimepoints, twoStatePositions)
# plt.plot(threeStateTimepoints, threeStatePositions)
# plt.plot(fourStateTimepoints, fourStatePositions)
# plt.plot(fiveStateTimepoints, fiveStatePositions)
# plt.xlabel('Time in Seconds (s)')
# plt.ylabel('Position of edge in pixels (px)')
# plt.show()
#
# pass
#
# zeroStateAverage = np.average(zeroStatePositions)
# zeroStateStd = np.std(zeroStatePositions)
#
# oneStateAverage = np.average(oneStatePositions)
# oneStateStd = np.std(oneStatePositions)
test = self.testChangesInState(cycleData, 10)
#self.movingAverage(cycleData, 10)
pass
def testChangesInState(self, cycleData, frameHolderSize):
expParameters = np.array(cycleData['experimentParameters'])
timeStamps =
|
np.array(cycleData['timeStamps'])
|
numpy.array
|
"""
Copyright 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pytest
import numpy as np
import numpy.testing as npt
from peekingduck.pipeline.nodes.model.posenetv1.posenet_files.decode_multi import \
_calculate_keypoint_coords_on_image, _within_nms_radius_fast, _get_instance_score_fast, \
_change_dimensions, _sort_scored_parts
TEST_DIR = os.path.join(os.getcwd(), 'images')
NP_FILE = np.load(os.path.join(os.getcwd(), 'tests', 'pipeline', 'nodes',
'model', 'posenetv1', 'posenet.npz'))
@pytest.fixture
def offsets():
return NP_FILE['offsets']
@pytest.fixture
def root_image_coords():
return NP_FILE['root_image_coords']
@pytest.fixture
def dst_scores():
return NP_FILE['dst_scores']
@pytest.fixture
def dst_keypoints():
return NP_FILE['dst_keypoints']
class TestDecodeMulti:
def test_calculate_keypoint_coords_on_image(self, offsets, root_image_coords):
root_coords = _calculate_keypoint_coords_on_image(heatmap_positions=np.array([4, 6]),
output_stride=16,
offsets=offsets,
keypoint_id=6)
npt.assert_almost_equal(root_coords, root_image_coords,
2), "Incorrect image coordinates"
def test_within_nms_radius_fast(self, root_image_coords):
squared_nms_radius = 400
pose_coords =
|
np.zeros((0, 2))
|
numpy.zeros
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import cPickle as pickle
import numpy as np
import cv2
import math
import numpy.random as npr
import utils.segms as segm_utils
import utils.boxes_3d as box_utils_3d
from core.config import cfg
def get_image_blob(im):
"""Convert an image into a network input.
Arguments:
im (ndarray): a gray scale image
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(im, entry = None, phase = 'test')
blob = im_list_to_blob(processed_im)
slices, height, width = blob.shape[2], blob.shape[3], blob.shape[4]
im_info = np.hstack((slices, height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent
Output is a 5D HCSHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = get_max_shape([im.shape[:3] for im in ims]) # np array [max_s, max_h, max_w]
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], max_shape[2], 1), dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], 0:im.shape[2], 0] = im
# Move channels (axis 4) to axis 1
# Axis order will become: (batch elem, channel, slices, height, width)
channel_swap = (0, 4, 1, 2, 3)
blob = blob.transpose(channel_swap)
return blob
def get_max_shape(im_shapes):
"""Calculate max spatial size (s, h, w) for batching given a list of image shapes
"""
max_shape = np.array(im_shapes).max(axis=0)
assert max_shape.size == 3
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
max_shape[2] = int(np.ceil(max_shape[2] / stride) * stride)
return max_shape
def crop_data_3d(im, entry):
#random select the cropping start index and crop with half-overlap
#select the cropped block containing most positive voxels because of the sparsity
data_slices, data_height, data_width = map(int, im.shape[:])
boxes = entry['boxes'].copy()
segms = entry['segms'].copy()
ss = np.array(cfg.TRAIN.IN_SIZE, dtype=np.int16)
x_min = math.floor(np.min(boxes[:, 0]))
y_min = math.floor(np.min(boxes[:, 1]))
z_min = math.floor(np.min(boxes[:, 2]))
x_s_min = 0
x_s_max = min(x_min, data_width - ss[2])
y_s_min = 0
y_s_max = min(y_min, data_height - ss[1])
z_s_min = 0
z_s_max = min(z_min, data_slices - ss[0])
x_s = x_s_min if x_s_min == x_s_max else \
npr.choice(range(x_s_min, x_s_max + 1))
y_s = y_s_min if y_s_min == y_s_max else \
npr.choice(range(y_s_min, y_s_max + 1))
z_s = z_s_min if z_s_min == z_s_max else \
npr.choice(range(z_s_min, z_s_max + 1))
s_list = list(range(z_s, data_slices - ss[0], int(ss[0] / 2)))
h_list = list(range(y_s, data_height - ss[1], int(ss[1] / 2)))
w_list = list(range(x_s, data_width - ss[2], int(ss[2] / 2)))
s_list.append(data_slices - ss[0])
h_list.append(data_height - ss[1])
w_list.append(data_width - ss[2])
max_pos_num = 0
posit = []
for z in s_list:
for y in h_list:
for x in w_list:
boxes[:, 0::3] -= x
boxes[:, 1::3] -= y
boxes[:, 2::3] -= z
np.clip(boxes[:, 0::3], 0, ss[2] - 1, out=boxes[:, 0::3])
np.clip(boxes[:, 1::3], 0, ss[1] - 1, out=boxes[:, 1::3])
np.clip(boxes[:, 2::3], 0, ss[0] - 1, out=boxes[:, 2::3])
invalid = (boxes[:, 0] == boxes[:, 3]) | (boxes[:, 1] == boxes[:, 4]) | (boxes[:, 2] == boxes[:, 5])
valid_inds = np.nonzero(~ invalid)[0]
pos_box_volumes, _ = box_utils_3d.boxes_volume(boxes[valid_inds, :])
tmp_pos_num = np.sum(pos_box_volumes)
if tmp_pos_num > max_pos_num:
max_pos_num = tmp_pos_num
posit = [x, y, z]
boxes = entry['boxes'].copy()
x, y, z = posit[:]
im = im[z: z+ss[0], y: y+ss[1], x: x+ss[2]]
boxes[:, 0::3] -= x
boxes[:, 1::3] -= y
boxes[:, 2::3] -= z
segms[:, 0] -= x
segms[:, 1] -= y
segms[:, 2] -= z
np.clip(boxes[:, 0::3], 0, ss[2] - 1, out=boxes[:, 0::3])
np.clip(boxes[:, 1::3], 0, ss[1] - 1, out=boxes[:, 1::3])
np.clip(boxes[:, 2::3], 0, ss[0] - 1, out=boxes[:, 2::3])
np.clip(segms[:, 0], 0, ss[2] - 1, out=segms[:, 0])
np.clip(segms[:, 1], 0, ss[1] - 1, out=segms[:, 1])
np.clip(segms[:, 2], 0, ss[0] - 1, out=segms[:, 2])
entry['boxes'] = boxes
entry['segms'] = segms
entry['slices'] = ss[0]
entry['height'] = ss[1]
entry['width'] = ss[2]
return im
def prep_im_for_blob(im, entry, phase):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
- crop if need
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
if cfg.PP_METHOD == 'norm1':
mask = im > 0
mean_val =
|
np.mean(im[mask])
|
numpy.mean
|
"""
Defines the penetration loss
"""
import torch
import torch.nn.functional as F
import trimesh
import numpy as np
import os
from scipy.spatial import cKDTree as KDTree
# import torch.functional as F
import json
def create_grid_points_from_bounds(minimun, maximum, res):
x = np.linspace(minimun, maximum, res)
X, Y, Z =
|
np.meshgrid(x, x, x, indexing='ij')
|
numpy.meshgrid
|
# June 2017 : <NAME> : <EMAIL>
# --------------------------------------------------
#
# Adapted Mathis Hain's original code to:
# 1. Work with Python 3.
# 2. Vectorise with numpy, for speed.
# 3. Conform to PEP8 formatting.
# 4. Condense functions into two files
# 5. Make it work with the cbsyst module
# (https://github.com/oscarbranson/cbsyst) for
# calculating seawater carbonate and B chem in seawater.
#
# Original Header
# ---------------
# MyAMI Specific Ion Interaction Model (Version 1.0):
# This is a Python script to calculate thermodynamic pK's and conditional pK's
# Author: <NAME> -- <EMAIL>
#
# Reference:
# <NAME>., <NAME>., <NAME>., and <NAME>. (2015) The effects of secular calcium and magnesium concentration changes on the thermodynamics of seawater acid/base chemistry: Implications for Eocene and Cretaceous ocean carbon chemistry and buffering, Global Biogeochemical Cycles, 29, doi:10.1002/2014GB004986
#
# For general context on the calculations see Millero, 2007 (Chemical Reviews) and Millero and Pierrot, 1998 (Aquatic Geochemistry)
import itertools
import numpy as np
from tqdm import tqdm
from scipy.optimize import curve_fit
from cbsyst.helpers import Bunch, prescorr
# Functions from K_thermo_conditional.py
# --------------------------------------
# definition of the function that takes (Temp) as input and returns the K at that temp
def CalculateKcond(Tc, Sal):
"""
Calculate thermodynamic Ks adjusted for salinity.
Parameters
----------
Tc : float or array-like
Temperature in C
Sal : float or array-like
Salinity in PSU
P : float of array-like:
Pressure in bar.
"""
sqrtSal = np.sqrt(Sal)
T = Tc + 273.15
lnT =
|
np.log(T)
|
numpy.log
|
#!/usr/bin/env python
# coding: utf-8
# # CE-40717: Machine Learning
# ## HW8-Clustering & Reinforcement Learning
#
# <NAME> - 99210259
# ### Kmeans & GMM:
#
# At this question, we tend to implement Kmeans & GMM algorithms. For this purpose, `DO NOT EMPLOY` ready-for-use python libraries. Use this implementation for solving the following questions. Kmeans should continue till centeroids won't change. Furthermore, GMM also should continue till the difference of two consecutive likelihood logarithm would be less than 0.1. Notice that after executing the Kmeans part, the primitive centroids of GMM should be identical with ultimate Kmeans centroids.
# In[8]:
from sklearn.datasets.samples_generator import make_classification, make_moons, make_circles
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# #### Part 1:
#
# Utilize the subsequent cell in order to create the Dataset. Afterwards, try to execute the algorithm with k=2 centroids. At Kmeans, it is recommended to execute the algorithm with several various starting states in order to eventually choose the best respective result.
# In[9]:
X,Y = make_classification(n_samples=700, n_features=10, n_informative=5,
n_redundant=0, n_clusters_per_class=2, n_classes=3)
# ## KMeans Implementation
# In[10]:
class KMeans:
def __init__(self, n_clusters = 3, tolerance = 0.01, max_iter = 100, runs = 1):
self.n_clusters = n_clusters
self.tolerance = tolerance
self.cluster_means = np.zeros(n_clusters)
self.max_iter = max_iter
self.runs = runs
def fit(self, X,Y):
row_count, col_count = X.shape
X_values = self.__get_values(X)
X_labels = np.zeros(row_count)
costs = np.zeros(self.runs)
all_clusterings = []
for i in range(self.runs):
cluster_means = self.__initialize_means(X_values, row_count)
for _ in range(self.max_iter):
previous_means =
|
np.copy(cluster_means)
|
numpy.copy
|
import numpy as np
from tensorflow import keras
from tensorflow.keras.layers import Dense
from kitt.dataloading import BatchLoader, ListDataLoader
from kitt.dataloading.preprocessing import Preprocessing, ScalePreprocessing
from kitt.dataloading.tf import KerasSequence
from kitt.model import ModelWrapper
def test_model_map_loader():
a = [np.array([v]) for v in range(5)]
b = [np.array([v]) for v in range(5, 10)]
loader = ListDataLoader(list(zip(a, b)))
class Model(ModelWrapper):
def input_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(2.0)
def output_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(3.0)
model = Model()
model_loader = model.map_loader(loader)
data = list(model_loader)
a_mapped = [v * 2 for v in a]
b_mapped = [v * 3 for v in b]
assert data == list(zip(a_mapped, b_mapped))
def test_model_parallel_train():
a = [
|
np.array([v])
|
numpy.array
|
import numpy as np
from itertools import chain
from cvxopt import matrix, solvers
solvers.options['show_progress'] = False
# DATA CREATION
def n_random_datapoint(n):
'''
[[ 1. 0.09762701 0.43037873]
[ 1. 0.20552675 0.08976637]
[ 1. -0.1526904 0.29178823]
[ 1. -0.12482558 0.783546 ]
[ 1. 0.92732552 -0.23311696]
[ 1. 0.58345008 0.05778984]
[ 1. 0.13608912 0.85119328]
[ 1. -0.85792788 -0.8257414 ]]
'''
return add_constant(np.random.uniform(-1,1, size=(n,2)))
def random_set(n, target_function, transform=None):
x = n_random_datapoint(n)
y = get_y(target_function, x)
if transform == None:
return DataML((x, y))
else:
return DataML((x, y), transform)
class DataML:
def __init__(self, data, transform=None):
xy_as_tuple = type(data) == tuple or type(data) == list and len(data) == 2
if xy_as_tuple:
self.x = np.copy(data[0])
self.y = np.copy(data[1])
else:
self.x = data[:,:data.shape[1]-1]
self.y = data[:,data.shape[1]-1]
if transform is None:
self.z = self.x
else:
self.z = transform(self.x)
if transform is None and not xy_as_tuple :
self.z_y = data
else:
self.z_y = np.concatenate([self.z, np.array([self.y]).T], axis=1)
def __repr__(self):
z_repr = "input : z\n" + str(self.z)
y_repr = "output : y\n" + str(self.y)
return z_repr + "\n" + y_repr
def get_y(target_function, x):
return np.apply_along_axis(target_function, 1, x)
# LINEAR FUNCTION CREATION
def rand_line():
x1, y1, x2, y2 = np.random.uniform(-1,1, size=4)
m = (y1 - y2) / (x1 - x2)
c = y1 - x1 * m
return m, c
def create_linear_function(m,c):
def linear_function(x):
return x*m + c
return linear_function
def create_linear_target_function(linear_function):
def linear_target_function(data_point):
if data_point[2] - linear_function(data_point[1]) < 0:
return -1
else:
return 1
return linear_target_function
def random_target_function():
return create_linear_target_function(create_linear_function(*rand_line()))
# TRANSFORMS
def transform(x):
"""
transform
x1 x2 ---> 1 x1 x2 x1**2 x2**2 x1x2 |x1 - x2| |x1 + x2|
"""
ones = np.ones(len(x))
x1 = x[:,0]
x2 = x[:,1]
x1_sqr = x1**2
x2_sqr = x2**2
x1x2 = x1 * x2
abs_x1_minus_x2 = abs(x1-x2)
abs_x1_plus_x2 = abs(x1+x2)
return np.stack([ones, x1, x2, x1_sqr, x2_sqr, x1x2, abs_x1_minus_x2, abs_x1_plus_x2], axis=1)
def add_constant(x):
"""
transform
x1 x2 ---> 1 x1 x2
"""
return np.insert(x,0,1, axis=1)
def second_order(x):
"""
transform
1 x1 x2 ---> 1 x1 x2 x1x2 x1**2 x2**2
"""
ones = x[:, 0]
x1 = x[:, 1]
x2 = x[:, 2]
x1_sqr = x1**2
x2_sqr = x2**2
x1x2 = x1 * x2
return np.stack([ones, x1, x2, x1x2, x1_sqr, x2_sqr], axis=1)
def second_order_nic(x):
"""
transform
x1 x2 ---> 1 x1 x2 x1x2 x1**2 x2**2
nic : no initial constant
"""
ones = np.ones(len(x))
x1 = x[:, 0]
x2 = x[:, 1]
x1_sqr = x1**2
x2_sqr = x2**2
x1x2 = x1 * x2
return np.stack([ones, x1, x2, x1x2, x1_sqr, x2_sqr], axis=1)
# STOCHASTIC GRADIENT DESCENT
def stochastic_gradient_descent(z, y, derivative, initial_alphas, kwargs=dict()):
"""
optimizing for alphas
must provide a function 'derivative' that takes as an arguement 'i'
and returns the derivative in that direction
"""
def gen_ith_derivatives(derivative, i, kwargs):
def ith_derivative(x, y, alphas):
return derivative(x, y, alphas, i=i, **kwargs)
return ith_derivative
gradient = [ gen_ith_derivatives(derivative, i, kwargs)
for i in range(len(initial_alphas)) ]
old_run_alphas = epoch(z, y, initial_alphas, gradient)
new_run_alphas = epoch(z, y, old_run_alphas, gradient)
i = 0
while np.linalg.norm(old_run_alphas - new_run_alphas) > 0.01:
i += 1
old_run_alphas = new_run_alphas
new_run_alphas = epoch(z, y, new_run_alphas, gradient)
return new_run_alphas, i
def epoch(z, y, alphas, gradient):
LEARNING_RATE = 0.01
data_index_iter = np.random.permutation(len(z))
for i in data_index_iter:
alphas = alphas - LEARNING_RATE * np.array(
[ derivative(z[i], y[i], alphas) for derivative in gradient ])
return alphas
# PERCEPTON LEARNING ALGORITHM
def pla(x, y, weight=None, return_iterations=False):
"""
Perceptron Learning Algorithm (PLA)
Returns: weights
Caveat: only works for linearly separable data, otherwise it will never stop
"""
if weight is None:
weight = linear_perceptron(x,y)
iterations = 0
while True:
mis_point_index = a_misclassified_point(x, y, weight)
if mis_point_index is None:
if return_iterations:
return weight, iterations
return weight
weight = weight + x[mis_point_index] * y[mis_point_index]
iterations += 1
def a_misclassified_point(x, y, weight):
start = np.random.randint(0, len(x) - 1)
for i in chain(range(start, len(x)), range(start)):
if sign(np.dot(x[i], weight)) != y[i]:
return i
return None
# LINEAR PERCEPTON
def linear_perceptron(x,y):
xt_x = x.transpose().dot(x)
xt_y = x.transpose().dot(y)
inv_xt_x = np.linalg.inv(xt_x)
return inv_xt_x.dot(xt_y)
# CONSTRAINED LEARNING
def minimize_error_aug(z,y,a):
"""
minimize
d_Ein = Z(Z*w - y) + a*w = 0
(Z*Z + a*I)^-1 * Z*y) = w
Returns: weights
"""
zz = z.transpose().dot(z)
zz_plus_ai = zz + a * np.identity(len(zz))
inv_zz_plus_ai = np.linalg.inv(zz_plus_ai)
zy = z.transpose().dot(y)
inv_zz_plus_ai_zy = inv_zz_plus_ai.dot(zy)
return inv_zz_plus_ai_zy
# SUPPORT VECTOR MACHINE
def svm(x, y):
"""
classification SVM
Minimize
1/2 * w^T w
subject to
y_n (w^T x_n + b) >= 1
"""
weights_total = len(x[0])
I_n = np.identity(weights_total-1)
P_int = np.vstack(([
|
np.zeros(weights_total-1)
|
numpy.zeros
|
__doc__ = """Tests for rod initialisation module"""
import numpy as np
from numpy.testing import assert_allclose
from elastica.utils import MaxDimension, Tolerance
import pytest
import sys
from elastica.rod.data_structures import _RodSymplecticStepperMixin
from elastica.rod.factory_function import allocate
class MockRodForTest(_RodSymplecticStepperMixin):
def __init__(
self,
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
):
self.n_elems = n_elements
self._vector_states = _vector_states
self._matrix_states = _matrix_states
self.radius = radius
self.mass_second_moment_of_inertia = mass_second_moment_of_inertia
self.inv_mass_second_moment_of_inertia = inv_mass_second_moment_of_inertia
self.shear_matrix = shear_matrix
self.bend_matrix = bend_matrix
self.density = density
self.volume = volume
self.mass = mass
self.dissipation_constant_for_forces = dissipation_constant_for_forces
self.dissipation_constant_for_torques = dissipation_constant_for_torques
self.internal_forces = internal_forces
self.internal_torques = internal_torques
self.external_forces = external_forces
self.external_torques = external_torques
self.lengths = lengths
self.rest_lengths = rest_lengths
self.tangents = tangents
self.dilatation = dilatation
self.dilatation_rate = dilatation_rate
self.voronoi_dilatation = voronoi_dilatation
self.rest_voronoi_lengths = rest_voronoi_lengths
self.sigma = sigma
self.kappa = kappa
self.rest_sigma = rest_sigma
self.rest_kappa = rest_kappa
self.internal_stress = internal_stress
self.internal_couple = internal_couple
self.damping_forces = damping_forces
self.damping_torques = damping_torques
_RodSymplecticStepperMixin.__init__(self)
@classmethod
def straight_rod(
cls,
n_elements,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
# poisson_ratio,
*args,
**kwargs
):
(
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
) = allocate(
n_elements,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
# poisson_ratio,
alpha_c=4.0 / 3.0,
*args,
**kwargs
)
return cls(
n_elements,
_vector_states,
_matrix_states,
radius,
mass_second_moment_of_inertia,
inv_mass_second_moment_of_inertia,
shear_matrix,
bend_matrix,
density,
volume,
mass,
dissipation_constant_for_forces,
dissipation_constant_for_torques,
internal_forces,
internal_torques,
external_forces,
external_torques,
lengths,
rest_lengths,
tangents,
dilatation,
dilatation_rate,
voronoi_dilatation,
rest_voronoi_lengths,
sigma,
kappa,
rest_sigma,
rest_kappa,
internal_stress,
internal_couple,
damping_forces,
damping_torques,
)
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_input_and_output_position_array(n_elems):
"""
This test, tests the case if the input position array
valid, allocate sets input position as the rod position array.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
# Check if the input position vector and output position vector are valid and same
correct_position = np.zeros((3, n_elems + 1))
correct_position[0] = np.random.randn(n_elems + 1)
correct_position[1] = np.random.randn(n_elems + 1)
correct_position[..., 0] = start
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=correct_position,
)
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_input_and_position_array_for_different_start(n_elems):
"""
This function tests fail check, for which input position array
first element is not user defined start position.
Parameters
----------
n_elems
Returns
-------
"""
start = np.random.randn(3)
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check if the input position vector start position is different than the user defined start position
correct_position = np.random.randn(3, n_elems + 1)
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
position=correct_position,
)
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
def test_compute_position_array_using_user_inputs():
"""
This test checks if the allocate function can compute correctly
position vector using start, direction and base length inputs.
Returns
-------
"""
n_elems = 4
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check if without input position vector, output position vector is valid
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
)
correct_position = np.zeros((3, n_elems + 1))
correct_position[0, :] = np.array([0.0, 0.25, 0.5, 0.75, 1.0])
test_position = mockrod.position_collection
assert_allclose(correct_position, test_position, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_compute_directors_matrix_using_user_inputs(n_elems):
"""
This test checks the director array created by allocate function. For this
test case we use user defined direction, normal to compute directors.
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction = np.array([1.0, 0.0, 0.0])
normal = np.array([0.0, 0.0, 1.0])
base_length = 1.0
base_radius = 0.25
density = 1000
nu = 0.1
youngs_modulus = 1e6
poisson_ratio = 0.3
shear_modulus = youngs_modulus / (poisson_ratio + 1.0)
# Check directors, if we dont input any directors, computed ones should be valid
correct_directors = np.zeros((MaxDimension.value(), MaxDimension.value(), n_elems))
binormal = np.cross(direction, normal)
tangent_collection = np.repeat(direction[:, np.newaxis], n_elems, axis=1)
normal_collection = np.repeat(normal[:, np.newaxis], n_elems, axis=1)
binormal_collection = np.repeat(binormal[:, np.newaxis], n_elems, axis=1)
correct_directors[0, ...] = normal_collection
correct_directors[1, ...] = binormal_collection
correct_directors[2, ...] = tangent_collection
mockrod = MockRodForTest.straight_rod(
n_elems,
start,
direction,
normal,
base_length,
base_radius,
density,
nu,
youngs_modulus,
shear_modulus=shear_modulus,
)
test_directors = mockrod.director_collection
assert_allclose(correct_directors, test_directors, atol=Tolerance.atol())
@pytest.mark.parametrize("n_elems", [5, 10, 50])
def test_directors_using_input_position_array(n_elems):
"""
This test is testing the case for which directors are computed
using the input position array and user defined normal.
Parameters
----------
n_elems
Returns
-------
"""
start = np.array([0.0, 0.0, 0.0])
direction =
|
np.array([1.0, 0.0, 0.0])
|
numpy.array
|
# Copyright 2020 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# See https://floris.readthedocs.io for documentation
import math
import numpy as np
from scipy.stats import norm
from scipy.spatial import distance_matrix
from scipy.interpolate import interp1d
from ..utilities import cosd, sind, tand
from ..logging_manager import LoggerBase
from floris.simulation.wake_vortex.VortexCylinder import vc_tang_u, vc_longi_u, vc_root_u, vcs_tang_u, vcs_longi_u, vc_tang_u_doublet
from floris.simulation.wake_vortex.VortexDoublet import doublet_line_u
from floris.simulation.wake_vortex.SelfSimilar import ss_u
from floris.simulation.wake_vortex.VortexCylinderSkewed import svc_tang_u, svc_longi_u, svc_root_u, svcs_tang_u, svcs_longi_u
from floris.simulation.wake_vortex.Solver import Ct_const_cutoff, WakeVorticityFromCt, WakeVorticityFromGamma
class Turbine(LoggerBase):
"""
Turbine is a class containing objects pertaining to the individual
turbines.
Turbine is a model class representing a particular wind turbine. It
is largely a container of data and parameters, but also contains
methods to probe properties for output.
Args:
instance_dictionary: A dictionary that is generated from the
input_reader; it should have the following key-value pairs:
- **description** (*str*): A string containing a description of
the turbine.
- **properties** (*dict*): A dictionary containing the following
key-value pairs:
- **rotor_diameter** (*float*): The rotor diameter (m).
- **hub_height** (*float*): The hub height (m).
- **blade_count** (*int*): The number of blades.
- **pP** (*float*): The cosine exponent relating the yaw
misalignment angle to power.
- **pT** (*float*): The cosine exponent relating the rotor
tilt angle to power.
- **generator_efficiency** (*float*): The generator
efficiency factor used to scale the power production.
- **power_thrust_table** (*dict*): A dictionary containing the
following key-value pairs:
- **power** (*list(float)*): The coefficient of power at
different wind speeds.
- **thrust** (*list(float)*): The coefficient of thrust
at different wind speeds.
- **wind_speed** (*list(float)*): The wind speeds for
which the power and thrust values are provided (m/s).
- **yaw_angle** (*float*): The yaw angle of the turbine
relative to the wind direction (deg). A positive value
represents a counter-clockwise rotation relative to the
wind direction.
- **tilt_angle** (*float*): The tilt angle of the turbine
(deg). Positive values correspond to a downward rotation of
the rotor for an upstream turbine.
- **TSR** (*float*): The tip-speed ratio of the turbine. This
parameter is used in the "curl" wake model.
- **ngrid** (*int*, optional): The square root of the number
of points to use on the turbine grid. This number will be
squared so that the points can be evenly distributed.
Defaults to 5.
- **rloc** (*float, optional): A value, from 0 to 1, that determines
the width/height of the grid of points on the rotor as a ratio of
the rotor radius.
Defaults to 0.5.
Need to Update _________________________________________
- R: rotor radius
- r_hub: position of the turbine hub in global coordinate system
- e_shaft_yaw0: unit vector along the shaft (untitled for now), going downwind, when the turbine has zero yaw
- e_vert: unit vertical vector, about which positive yawing is done
- U0: Free stream velocity in global coordinates (can be changerd with `update_wind`)
- Ct: Thrust coefficient (can be changed with `update_loading`)
- Ground: Include ground effect in calculations
- Model: one of ['VC','VCFF', 'VD', 'SS']
'VCFF': Vortex cylinder with far-field approximation (fastest)
'VC': Vortex cylinder
'SS': Self similar model of Troldborg et al. (not good close to rotor)
'VD': Self similar model of Troldborg et al. (not good close to rotor)
Returns:
Turbine: An instantiated Turbine object.
"""
def __init__(self, instance_dictionary):
self.description = instance_dictionary["description"]
properties = instance_dictionary["properties"]
self.rotor_diameter = properties["rotor_diameter"]
self.hub_height = properties["hub_height"]
self.blade_count = properties["blade_count"]
self.pP = properties["pP"]
self.pT = properties["pT"]
self.generator_efficiency = properties["generator_efficiency"]
self.power_thrust_table = properties["power_thrust_table"]
self.yaw_angle = properties["yaw_angle"]
self.tilt_angle = properties["tilt_angle"]
self.tsr = properties["TSR"]
# Vortex turbine (for induction computation) parameters
self.R = self.rotor_diameter/2
self.r_hub = [0,0,self.hub_height]
self.e_shaft_yaw0 = [1,0,0]
self.e_vert = [0,0,1]
"""
Specifies vectors to define coordinate notations for transformation
matrices between vortex turbine cylindrical corrdinates and global coordinates
"""
# Specify global coordinate system [TODO ??? need to check]
self.e_shaft_g0 = np.asarray([1,0,0]).reshape(3,1)
self.e_vert_g = np.asarray([0,0,1]).reshape(3,1)
self.e_horz_g = np.asarray([1.,0.,0.]).reshape(3,1)
# Transformation matrix from cylindrical to wind turbine coordinate system
self.T_c2wt = np.asarray([[0,0,1,1,0,0,0,1,0]]).reshape(3,3)
self.set_yaw_angle(self.yaw_angle)
self.update_position(self.r_hub)
self.U0_g = np.asarray([10,0,0]).ravel().reshape(3,1)
#self.update_wind([0,0,10])
self.name=''
self.r=None
self.gamma_t=None
self.gamma_t=None
self.Gamma_r=None
self.Lambda=np.inf
self.Ground=False# Ground effect will be included in calculation of induced velocity
self.chi=None
self.Model='VC'
# initialize to an invalid value until calculated
self.air_density = -1
self.use_turbulence_correction = False
# Initiate to False unless specifically set
# For the following parameters, use default values if not user-specified
self.ngrid = int(properties["ngrid"]) if "ngrid" in properties else 5
self.rloc = float(properties["rloc"]) if "rloc" in properties else 0.5
if "use_points_on_perimeter" in properties:
self.use_points_on_perimeter = bool(properties["use_points_on_perimeter"])
else:
self.use_points_on_perimeter = False
self._initialize_turbine()
# The indices for this Turbine instance's points from the FlowField
# are set in `FlowField._discretize_turbine_domain` and stored
# in this variable.
self.flow_field_point_indices = None
# Private methods
def _initialize_turbine(self):
# Initialize the turbine given saved parameter settings
# Precompute interps
wind_speed = self.power_thrust_table["wind_speed"]
cp = self.power_thrust_table["power"]
self.fCpInterp = interp1d(wind_speed, cp, fill_value="extrapolate")
ct = self.power_thrust_table["thrust"]
self.fCtInterp = interp1d(wind_speed, ct, fill_value="extrapolate")
# constants
self.grid_point_count = self.ngrid * self.ngrid
if np.sqrt(self.grid_point_count) % 1 != 0.0:
raise ValueError("Turbine.grid_point_count must be the square of a number")
self.reset_velocities()
# initialize derived attributes
self.grid = self._create_swept_area_grid()
# Compute list of inner powers
inner_power = np.array([self._power_inner_function(ws) for ws in wind_speed])
self.powInterp = interp1d(wind_speed, inner_power, fill_value="extrapolate")
def _create_swept_area_grid(self):
# TODO: add validity check:
# rotor points has a minimum in order to always include points inside
# the disk ... 2?
#
# the grid consists of the y,z coordinates of the discrete points which
# lie within the rotor area: [(y1,z1), (y2,z2), ... , (yN, zN)]
# update:
# using all the grid point because that how roald did it.
# are the points outside of the rotor disk used later?
# determine the dimensions of the square grid
num_points = int(np.round(np.sqrt(self.grid_point_count)))
pt = self.rloc * self.rotor_radius
# syntax: np.linspace(min, max, n points)
horizontal = np.linspace(-pt, pt, num_points)
vertical = np.linspace(-pt, pt, num_points)
# build the grid with all of the points
grid = [(h, vertical[i]) for i in range(num_points) for h in horizontal]
# keep only the points in the swept area
if self.use_points_on_perimeter:
grid = [
point
for point in grid
if np.hypot(point[0], point[1]) <= self.rotor_radius
]
else:
grid = [
point
for point in grid
if np.hypot(point[0], point[1]) < self.rotor_radius
]
return grid
def _power_inner_function(self, yaw_effective_velocity):
"""
This method calculates the power for an array of yaw effective wind
speeds without the air density and turbulence correction parameters.
This is used to initialize the power interpolation method used to
compute turbine power.
"""
# Now compute the power
cptmp = self._fCp(
yaw_effective_velocity
) # Note Cp is also now based on yaw effective velocity
return (
0.5
* (np.pi * self.rotor_radius ** 2)
* cptmp
* self.generator_efficiency
* yaw_effective_velocity ** 3
)
def _fCp(self, at_wind_speed):
wind_speed = self.power_thrust_table["wind_speed"]
if at_wind_speed < min(wind_speed):
return 0.0
else:
_cp = self.fCpInterp(at_wind_speed)
if _cp.size > 1:
_cp = _cp[0]
return float(_cp)
def _fCt(self, at_wind_speed):
wind_speed = self.power_thrust_table["wind_speed"]
if at_wind_speed < min(wind_speed):
return 0.99
else:
_ct = self.fCtInterp(at_wind_speed)
if _ct.size > 1:
_ct = _ct[0]
if _ct > 1.0:
_ct = 0.9999
return float(_ct)
# Public methods
def change_turbine_parameters(self, turbine_change_dict):
"""
Change a turbine parameter and call the initialize function.
Args:
turbine_change_dict (dict): A dictionary of parameters to change.
"""
for param in turbine_change_dict:
self.logger.info(
"Setting {} to {}".format(param, turbine_change_dict[param])
)
setattr(self, param, turbine_change_dict[param])
self._initialize_turbine()
def calculate_swept_area_velocities(
self, local_wind_speed, coord, x, y, z, additional_wind_speed=None
):
"""
This method calculates and returns the wind speeds at each
rotor swept area grid point for the turbine, interpolated from
the flow field grid.
Args:
wind_direction (float): The wind farm wind direction (deg).
local_wind_speed (np.array): The wind speed at each grid point in
the flow field (m/s).
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
x (np.array): The x-coordinates of the flow field grid.
y (np.array): The y-coordinates of the flow field grid.
z (np.array): The z-coordinates of the flow field grid.
Returns:
np.array: The wind speed at each rotor grid point
for the turbine (m/s).
"""
u_at_turbine = local_wind_speed
# TODO:
# # PREVIOUS METHOD========================
# # UNCOMMENT IF ANY ISSUE UNCOVERED WITH NEW MOETHOD
# x_grid = x
# y_grid = y
# z_grid = z
# yPts = np.array([point[0] for point in self.grid])
# zPts = np.array([point[1] for point in self.grid])
# # interpolate from the flow field to get the flow field at the grid
# # points
# dist = [np.sqrt((coord.x1 - x_grid)**2 \
# + (coord.x2 + yPts[i] - y_grid) **2 \
# + (self.hub_height + zPts[i] - z_grid)**2) \
# for i in range(len(yPts))]
# idx = [np.where(dist[i] == np.min(dist[i])) for i in range(len(yPts))]
# data = [np.mean(u_at_turbine[idx[i]]) for i in range(len(yPts))]
# # PREVIOUS METHOD========================
# Use this if no saved points (curl)
if self.flow_field_point_indices is None:
# # NEW METHOD========================
# Sort by distance
flow_grid_points = np.column_stack([x.flatten(), y.flatten(), z.flatten()])
# Set up a grid array
y_array = np.array(self.grid)[:, 0] + coord.x2
z_array = np.array(self.grid)[:, 1] + self.hub_height
x_array = np.ones_like(y_array) * coord.x1
grid_array = np.column_stack([x_array, y_array, z_array])
ii = np.argmin(distance_matrix(flow_grid_points, grid_array), axis=0)
else:
ii = self.flow_field_point_indices
# return np.array(data)
if additional_wind_speed is not None:
return (
np.array(u_at_turbine.flatten()[ii]),
np.array(additional_wind_speed.flatten()[ii]),
)
else:
return np.array(u_at_turbine.flatten()[ii])
def return_grid_points(self, coord):
"""
Retrieve the x, y, and z grid points on the rotor.
Args:
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
Returns:
np.array, np.array, np.array:
- x grid points on the rotor.
- y grid points on the rotor.
- xzgrid points on the rotor.
"""
y_array = np.array(self.grid)[:, 0] + coord.x2
z_array = np.array(self.grid)[:, 1] + self.hub_height
x_array = np.ones_like(y_array) * coord.x1
return x_array, y_array, z_array
def update_velocities(
self, u_wake, coord, flow_field, rotated_x, rotated_y, rotated_z
):
"""
This method updates the velocities at the rotor swept area grid
points based on the flow field freestream velocities and wake
velocities.
Args:
u_wake (np.array): The wake deficit velocities at all grid points
in the flow field (m/s).
coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
flow_field (:py:class:`~.flow_field.FlowField`): The flow field.
rotated_x (np.array): The x-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_y (np.array): The y-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_z (np.array): The z-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
"""
# reset the waked velocities
local_wind_speed = flow_field.u_initial - u_wake
self.velocities = self.calculate_swept_area_velocities(
local_wind_speed, coord, rotated_x, rotated_y, rotated_z
)
def reset_velocities(self):
"""
This method sets the velocities at the turbine's rotor swept
area grid points to zero.
"""
self.velocities = np.array([0.0] * self.grid_point_count)
def set_yaw_angle(self, yaw_angle):
"""
This method sets the turbine's yaw angle.
Args:
yaw_angle (float): The new yaw angle (deg).
Examples:
To set a turbine's yaw angle:
>>> floris.farm.turbines[0].set_yaw_angle(20.0)
"""
self._yaw_angle = yaw_angle
# Vortex wind turbine
# print('>>> turbine.py : set yaw VC_WT')
self.yaw_pos = yaw_angle * np.pi/180 # Convert from degrees to radians
# print('Yaw Angle',yaw_angle)
# print('Yaw_pos',self.yaw_pos)
# Transformation matrix for rotating vector around yaw angle
c,s=np.cos(self.yaw_pos),np.sin(self.yaw_pos)
self.T_wt2g = np.asarray([c,-s,0,s,c,0,0,0,1]).reshape(3,3)
# Rotating the shaft vector so that its coordinate follow the new yaw position
self.e_shaft_g=np.dot(self.T_wt2g , self.e_shaft_g0)
def update_position(self,r_hub):
self.r_hub=np.asarray(r_hub).ravel().reshape(3,1)
def compute_induction(self, Ind_Opts, rotated_x, rotated_y, rotated_z, CT0=None):
"""
Computes induction from the turbine as a result of the blockage effect. Applied to velocity
field to simulate the induction zone of a turbine.
INPUTS:
Ind_Opts (dict): Dictionary of inputs to model the resulting
turbine induction zone as a result of the blockage effect.
rotated_x (np.array): The x-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_y (np.array): The y-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
rotated_z (np.array): The z-coordinates of the flow field grid
rotated so the new x axis is aligned with the wind direction.
"""
self.Ind_Opts = Ind_Opts
if Ind_Opts['induction']: # Can remove (won't be called unless induction)
if Ind_Opts['Ct_test']:
print('Ct-test')
# update vortex cylinder velocity and loading
r_bar_cut = 0.11
r_bar_tip = 0.9
if CT0 is None:
CT0 = self.Ct
print('CT0: ', CT0)
self.R = self.rotor_diameter/2*Ind_Opts['Rfact']
nCyl = 1 # For now
Lambda = np.inf
vr_bar = np.linspace(0,1.0,100)
Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar,r_bar_tip) # TODO change me to distributed
gamma_t_Ct = None
self.update_loading(r=vr_bar*self.R, VC_Ct=Ct_AD, Lambda=Lambda, nCyl=nCyl, gamma_t_Ct=gamma_t_Ct)
self.gamma_t= self.gamma_t*Ind_Opts['GammaFact']
root = False
longi = False
tang = True
# print('.',end='')
ux,uy,uz = self.compute_u(rotated_x,rotated_y,rotated_z,root=root,longi=longi,tang=tang, only_ind=True, no_wake=False, Decay=False, Model = Ind_Opts['Model'], ground=Ind_Opts['Ground'],R_far_field=Ind_Opts['R_far_field'])
else:
# update vortex cylinder velocity and loading
r_bar_cut = 0.01
# r_bar_cut = 0.11
# r_bar_tip = 0.9
# print("------Ct:", self.Ct)
if CT0 is None:
CT0 = self.Ct
# print('CT0: ', CT0)
self.R = self.rotor_diameter/2*Ind_Opts['Rfact']
nCyl = 1 # For now
Lambda = 30 # if >20 then no swirl
# Lambda = np.inf
vr_bar = np.linspace(0,1.0,100)
Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar) # TODO change me to distributed
# Ct_AD = Ct_const_cutoff(CT0,r_bar_cut,vr_bar,r_bar_tip) # TODO change me to distributed
gamma_t_Ct = None
self.update_loading(r=vr_bar*self.R, VC_Ct=Ct_AD, Lambda=Lambda, nCyl=nCyl, gamma_t_Ct=gamma_t_Ct)
self.gamma_t= self.gamma_t*Ind_Opts['GammaFact']
# print('gamma_t: ', self.gamma_t)
root = False
longi = False
tang = True
# print('.',end='')
ux,uy,uz = self.compute_u(rotated_x,rotated_y,rotated_z,root=root,longi=longi,tang=tang, only_ind=True, no_wake=True, Decay=True, Model = Ind_Opts['Model'], ground=Ind_Opts['Ground'],R_far_field=Ind_Opts['R_far_field'])
return ux,uy,uz
def update_loading(self,r=None,VC_Ct=None,Gamma=None,Lambda=None,nCyl=1,gamma_t_Ct=None):
"""
VC_Ct differs from Ct in that for a vortex cylinder VC_Ct is constant along the blade and
zero at the root and the tip
"""
"""
Computes relevant parameters when the turbine loading is updated, mainly, gamma_t,
the intensity of the tangential vorticity sheet.
The ditributon will be determined based on the inputs, with one these three approaches:
1. VC_Ct(r) distribution
2. Gamma(r) distribution
3. gamma_t(VC_Ct(r)) function
INPUTS:
r: radial coordinates at which VC_Ct or Gamma are provided
VC_Ct: local thrust coefficient (VC_Ct(r), array), or total thrust coefficient (CT, scalar)
Gamma: bound circulation (Gamma(r), array), or total rotor circulation (Gamma_tot, scalar)
Lambda: tip speed ratio (assumed infinite if None)
nCyl : number of cylindrical model used in the spanwise direction (default is 1)
The circulation (gamma_t) will be determined for each of the radial cylinder
gamma_t_Ct: function that provides gamma_t as function of VC_Ct (or gamma_t as function of CT)
"""
# Update vortex cylinder average velocity at turbine
self.U0_g = np.asarray([self.average_velocity,0,0]).ravel().reshape(3,1)
U0=np.linalg.norm(self.U0_g)
# print('Turbineprint('Turbine Avg U:',self.average_velocity)
# --- Reinterpolating loading to number of cylinders if needed
if nCyl is not None:
if nCyl==1:
vr0= np.array([0.995*self.R])
if VC_Ct is not None:
VC_Ct =np.array([np.mean(VC_Ct)])
if Gamma is not None:
Gamma =np.array([np.mean(Gamma)])
else:
vr0= np.linspace(0.005,0.995,nCyl)*self.R
if VC_Ct is not None:
VC_Ct = np.interp(vr0,r,VC_Ct)
else:
Gamma = np.interp(vr0,r,Gamma)
r=vr0
# Updating Lambda
if Lambda is None:
Lambda=self.Lambda
if Lambda is None:
raise Exception('Provide `Lambda` for update_loading. (Note: `Lambda=np.Inf` supported) ')
Omega = Lambda*U0/self.R
#print('U0',U0)
#print('VC_Ct',VC_Ct)
# Computing and storing gamma distribution and loading
if gamma_t_Ct is not None:
if VC_Ct is None:
raise Exception('Provide `Ct` along `gamma_t_Ct`')
self.gamma_t = gamma_t_Ct(VC_Ct)
self.gamma_l=None # TODO
self.Gamma_r=None # TODO
elif VC_Ct is not None:
self.gamma_t,self.gamma_l,self.Gamma_r,misc=WakeVorticityFromCt(r,VC_Ct,self.R,U0,Omega)
elif Gamma is not None:
self.gamma_t,self.gamma_l,self.Gamma_r,misc=WakeVorticityFromGamma(r,Gamma,self.R,U0,Omega)
else:
raise Exception('Unknown loading spec')
#self.gamma_t=self.gamma_t*1.06
#print('gamma_t ',self.gamma_t)
#print('gamma_l ',self.gamma_l)
#print('Gamma_r ',self.Gamma_r)
#print('Gamma_/2piR',-self.Gamma_r/(2*np.pi*self.R))
#print(misc)
self.Lambda=Lambda
self.r=r
self.VC_Ct=VC_Ct
def compute_u(self, Xg, Yg, Zg, only_ind=False, longi=False, tang=True, root=False, no_wake=False, ground=None, Decay=False, Model=None, R_far_field=6):
"""
INPUTS:
Xg, Yg, Zg: Control points in global coordinates where the flow is to be computed.
only_ind: if true, only induction is returned (without the free stream)
longi, tang, root: booleans specifying which component of vorticity is considered.
Default is `tang` only
no_wake: boolean, if true: the induced velocity in the wake is set to 0.
Typically set to true when combining with wake models.
Model : string in ['VC','VCFF','SS','VD']
'VCFF': Vortex cylinder with far-field approximation (fastest)
'VC': Vortex cylinder
'SS': Self similar model of Troldborg et al. (not good close to rotor)
'VD': Self similar model of Troldborg et al. (not good close to rotor)
"""
# --- Optional argument overriding self
if ground is None:
ground=self.Ground
if Model is None:
Model=self.Model
# Control points in "Cylinder coordinate system" (rotation only)
T_c2g=np.dot(self.T_wt2g,self.T_c2wt)
Xc,Yc,Zc = transform_T(T_c2g, Xg,Yg,Zg)
# Detecting whether our vertical convention match, and define chi
e_vert_c = np.dot(T_c2g.T , self.e_vert_g)
# if self.chi is None:
# # TODO TODO chi needs induction effect!
# self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos)
# TODO TODO chi needs induction effect!
# self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos)
# print('Chi: ', self.chi)
if self.VC_Ct > 1:
self.VC_Ct = 1
self.chi= np.sign(e_vert_c.ravel()[1])* (self.yaw_wind-self.yaw_pos) * (1+0.3*(1-np.sqrt(1-self.VC_Ct[0])))
# print('Chi_: ', self.chi)
# self.chi = self.chi*1.5
# print('Chi: ', self.chi)
if self.gamma_t is None:
raise Exception('Please set loading with `update_loading` before calling `compute_u`')
uxc = np.zeros(Xg.shape)
uyc = np.zeros(Xg.shape)
uzc = np.zeros(Xg.shape)
m=np.tan(self.chi)
# Cylinder position in "Cylinder coordinate system) (rotation only)
Xcyl, Ycyl, Zcyl = transform_T(T_c2g,np.array([self.r_hub[0]]), np.array([self.r_hub[1]]), np.array([self.r_hub[2]]))
# Translate control points such that origin is at rotor center. NOTE: not all routines use this
Xc0,Yc0,Zc0=Xc-Xcyl[0],Yc-Ycyl[0],Zc-Zcyl[0]
if ground:
# Mirror control points are two time the hub height above the cylinder
Yc0mirror=Yc0+2*Ycyl[0]
Ylist=[Yc0,Yc0mirror]
#print('>>> Ground effect',Ycyl[0])
else:
Ylist=[Yc0]
# --- Root vortex influence
if root and (self.Gamma_r is not None) and self.Gamma_r!=0:
for Y in Ylist:
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svc_root_u(Xc0,Y,Zc0,Gamma_r=self.Gamma_r,m=m,polar_out=False)
else:
uxc0,uyc0,uzc0 = vc_root_u(Xc0,Y,Zc0,Gamma_r=self.Gamma_r,polar_out=False)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if len(self.gamma_t)==1:
# --- Tangential and longi - ONE Cylinder only
for iY,Y in enumerate(Ylist):
if tang and (self.gamma_t!=0):
if np.abs(self.chi)>1e-7:
if Model =='VC':
uxc0,uyc0,uzc0 = svc_tang_u(Xc0,Y,Zc0,gamma_t=self.gamma_t,R=self.R,m=m,polar_out=False)
# print('-----------------Vortex Cylinder Skewed Model------------------')
else:
pass
# raise NotImplementedError('Model '+Model + ', with yaw.')
else:
if Model =='VC':
uxc0,uyc0,uzc0 = vc_tang_u (Xc0,Y,Zc0, gamma_t=self.gamma_t, R=self.R, polar_out=False)
elif Model =='VCFF':
uxc0,uyc0,uzc0 = vc_tang_u_doublet(Xc0,Y,Zc0, gamma_t=self.gamma_t, R=self.R, polar_out=False,r_bar_Cut=R_far_field)
elif Model =='VD':
uxc0,uyc0,uzc0 = doublet_line_u(Xc0, Y, Zc0, dmz_dz = self.gamma_t * self.R**2 * np.pi)
elif Model =='SS':
uzc0 = ss_u (Xc0, Y, Zc0, gamma_t=self.gamma_t, R=self.R)
uxc0=uzc0*0
uyc0=uzc0*0
else:
raise NotImplementedError('Model'+Model)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if longi and (self.gamma_l is not None) and self.gamma_l!=0 :
if np.abs(self.chi)>1e-7:
if Model =='VC':
uxc0,uyc0,uzc0 = svc_longi_u(Xc0,Y,Zc0,gamma_l=self.gamma_l,R=self.R,m=m,polar_out=False)
else:
raise NotImplementedError('Model '+Model + ', longi component.')
else:
if Model =='VC':
uxc0,uyc0,uzc0 = vc_longi_u (Xc0,Y,Zc0,gamma_l=self.gamma_l,R=self.R ,polar_out=False)
else:
raise NotImplementedError('Model'+Model + ', longi component.')
uxc += uxc0
uyc += uyc0
uzc += uzc0
else:
# --- Tangential and longi - MULTI Cylinders
if Model =='VC':
nr = len(self.r)
nWT = 1
# Control points are directly translated by routine
gamma_t = self.gamma_t.reshape((nWT,nr))
# print('r ',self.r)
# print('gamma_t',gamma_t)
if self.gamma_l is not None:
gamma_l = self.gamma_l.reshape((nWT,nr))
vR = self.r.reshape((nWT,nr))
vm = m* np.ones((nWT,nr))
if tang:
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svcs_tang_u(Xc,Yc,Zc,gamma_t=gamma_t,R=vR,m=vm,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl,Ground=ground)
else:
uxc0,uyc0,uzc0 = vcs_tang_u (Xc,Yc,Zc,gamma_t=gamma_t,R=vR ,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
uxc += uxc0
uyc += uyc0
uzc += uzc0
if longi and (self.gamma_l is not None):
if np.abs(self.chi)>1e-7:
uxc0,uyc0,uzc0 = svcs_longi_u(Xc,Yc,Zc,gamma_l=gamma_l,R=vR,m=vm,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
else:
uxc0,uyc0,uzc0 = vcs_longi_u (Xc,Yc,Zc,gamma_l=gamma_l,R=vR ,Xcyl=Xcyl,Ycyl=Ycyl,Zcyl=Zcyl, Ground=ground)
uxc += uxc0
uyc += uyc0
uzc += uzc0
else:
raise NotImplementedError('Model'+Model, 'with multiple cylinders')
# if no_wake:
# # uxc[:]=0
# # uyc[:]=0
# # uzc[:]=1
# # Zero wake induction
# bDownStream=Zc0>=-0.20*self.R
# # bDownStream=Zc0>=0
# Rc = np.sqrt(Xc0**2 + Yc0**2)
# bRotorTube = Rc<self.R*1.001 # we give a margin since VD and VC have fields very dissimilar at R+/-eps
# bSelZero = np.logical_and(bRotorTube,bDownStream)
# uxc[bSelZero]=0
# uyc[bSelZero]=0
# uzc[bSelZero]=0
# Transform back to global
uxg = T_c2g[0,0]*uxc+T_c2g[0,1]*uyc+T_c2g[0,2]*uzc
uyg = T_c2g[1,0]*uxc+T_c2g[1,1]*uyc+T_c2g[1,2]*uzc
uzg = T_c2g[2,0]*uxc+T_c2g[2,1]*uyc+T_c2g[2,2]*uzc
# Decay
if Decay:
bDownStream=Xg>=(Yg-self.r_hub[1])*
|
np.tan(-self.yaw_pos)
|
numpy.tan
|
"""
Test functions in the space of solutions of the
Euler Lagrange equations of
\int_{-1}^{1} (2/tau) \alpha dq/ds + (2/tau)^5 (1-\alpha) d^3 q / ds^3 dt
"""
import unittest
from scipy.sparse import csc_matrix
import numpy as np
from gsplines.interpolator.gspline import cSplineCalc
from gsplines.basis.basis1010 import cBasis1010
from gsplines.basis.basis0010 import cBasis0010
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(cMyTest, self).__init__(*args, **kwargs)
import sys
np.set_printoptions(
linewidth=5000000,
formatter={'float': '{:+10.3e}'.format},
threshold=sys.maxsize)
pass
def testInversion(self):
import time
print('Testinf inversion of matrix')
dim = 6 # np.random.randint(2, 6)
N = 50 # np.random.randint(3, 120)
a = np.random.rand()
splcalc = cSplineCalc(dim, N, cBasis1010(a))
for i in range(50):
tauv = np.random.rand(N)
A1 = splcalc.eval_A(tauv)
# A0 = self.eval_A(tauv, dim, N, cBasis1010(a))
# e = np.max(np.abs(A1 - A0.todense()))
# # print(A0)
# # print('----------------------------------------')
# # print(A1)
# # print(dim, N)
# assert e < 1.0e-8
splcalc.printPerformace()
pass
def testcontinuity(self):
print('Test continuity constraints with plot')
for i in range(3):
dim = np.random.randint(2, 3)
N = np.random.randint(3, 10)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
tauv = 0.5 + np.random.rand(N) * 3.0
tis = [np.sum(tauv[0:i]) for i in range(0, N + 1)]
T = np.sum(tauv)
splcalc = cSplineCalc(dim, N, cBasis1010(a))
spln = splcalc.getSpline(tauv, wp)
from matplotlib import pyplot as plt
t = np.arange(0, T, 0.005)
q_list = [spln.deriv(i)(t) for i in range(0, 6)]
fig, axs = plt.subplots(6, dim)
for i in range(0, 6):
for j in range(0, dim):
axs[i, j].plot(t, q_list[i][:, j])
axs[i, j].grid()
for ti in tis:
axs[i, j].axvline(x=ti, color='b', linestyle='--')
plt.show()
def test_eval_b(self):
import time
print('Test evaluation of b vector')
for i in range(20):
dim = np.random.randint(1, 8)
N = np.random.randint(3, 200)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
dwp0 = np.zeros((dim, ))
ddwp0 = np.zeros((dim, ))
dwpT = np.zeros((dim, ))
ddwpT = np.zeros((dim, ))
splcalc = cSplineCalc(dim, N, cBasis1010(a))
b1 = splcalc.eval_b(wp)
b2 = self.eval_b(wp, N, dim, dwp0, ddwp0, dwpT, ddwpT)
e = np.max(np.abs(b1 - b2))
assert e < 1.0e-8
def non_zero_diagonals_A(self):
for i in range(0, 1):
dim = np.random.randint(1, 8)
N = np.random.randint(3, 200)
a = np.random.rand()
splcalc = cSplineCalc(dim, N, cBasis1010, a)
tauv = 10.0 * np.random.rand(N) + 0.5
splcalc.eval_A(tauv)
A = splcalc.Aeval
upper_diags = 0
flag = 0
for i in range(A.shape[0]):
if np.max(np.abs(np.diagonal(A, i))) > 1.0e-10:
assert flag != 1, 'Matrix is Not Banded!!!'
upper_diags += 1
else:
flag = 1
lower_diags = 0
flag = 0
for i in range(A.shape[0]):
if np.max(np.abs(np.diagonal(A, -i))) > 1.0e-10:
assert flag != 1, 'Matrix is Not Banded!!!'
lower_diags += 1
else:
flag = 1
# print('upper diagonas = {:d} lower diagonals = {:d}'.format(
# upper_diags, lower_diags))
assert 4 * dim + 4 == max(upper_diags, lower_diags)
# wp = (np.random.rand(N + 1, dim) - 0.5) * 2.0 * np.pi
# b = splcalc.eval_b(wp)
#
# wp = np.random.rand(N + 1, dim)
#
# spline = splcalc.solve(wp, tauv)
#
# tis = [np.sum(tauv[0:i]) for i in range(0, N + 1)]
#
# t = np.arange(0, tis[-1], 0.1)
#
# plt.plot(t, spline(t)[:, 0])
#
# plt.show()
def test_derivative_b(self):
'''
Here we rest the correctness of the numerical output of the basis
class comparing it with its analitical form optained using sympy
'''
print('Test derivative of b w.r.t. waypoint components')
np.random.seed()
dim = np.random.randint(1, 8)
N = np.random.randint(2, 60)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
splcalc = cSplineCalc(dim, N, cBasis1010(a))
dwp = 0.0005
for i in range(N + 1):
for j in range(dim):
wpidx = i
i = j
wp_aux = wp.copy()
wp_aux[wpidx, i] += -dwp
b1 = splcalc.eval_b(wp_aux).copy()
wp_aux[wpidx, i] += 2 * dwp
b2 = splcalc.eval_b(wp_aux).copy()
dbdwpij_num = 0.5 * (b2 - b1) / dwp
dbdwpij = splcalc.eval_dbdwpij(wpidx, i)
e = np.max(np.abs(dbdwpij_num - dbdwpij))
if e > 1.0e-8:
print('Erroe in db_dwpij:')
print('implementation:')
print(dbdwpij)
print('(b1-b2)/dwp:')
print(dbdwpij_num)
print('component', i)
print('waypoint ', wpidx)
print('dimension ', dim)
print('number of intervals ', N)
raise AssertionError('Error of {:14.7e}'.format(e))
def test_derivative_y(self):
''' Compare the numerical derivate of y w.r.t tau with the nominal one
'''
for i in range(40):
np.random.seed()
dim = np.random.randint(1, 3)
N = np.random.randint(2, 6)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
tauv = 1.0 + np.random.rand(N) * 2.0
splcalc = cSplineCalc(dim, N, cBasis1010(a))
dydtauNom, y = splcalc.eval_dydtau(tauv, wp)
y = y.copy()
dtau = 1.0e-8
err = 0.0
errp = 0.0
for iinter in range(0, N):
tauv_aux = tauv.copy()
tauv_aux[iinter] += -2 * dtau
y0 = splcalc.eval_y(tauv_aux, wp).copy() * (1.0 / 12.0)
tauv_aux[iinter] += dtau
y1 = splcalc.eval_y(tauv_aux, wp).copy() * (-2.0 / 3.0)
tauv_aux[iinter] += 2 * dtau
y2 = splcalc.eval_y(tauv_aux, wp).copy() * (2.0 / 3.0)
tauv_aux[iinter] += dtau
y3 = splcalc.eval_y(tauv_aux, wp).copy() * (-1.0 / 12.0)
dydtauiTest = (y0 + y1 + y2 + y3) / dtau
ev = np.abs(dydtauiTest - dydtauNom[:, iinter])
e = np.max(ev)
eidx = np.argmax(ev)
ep = e / abs(dydtauiTest[eidx])
if e > err:
err = e
if ep > errp:
errp = ep
assert ep < 5.0e-2, '''
error on dydtau = {:10.7e}
value of dydtau = {:10.7e}
relative error = {:10.7e}
'''.format(e, dydtauiTest[eidx], ep)
def test_derivative_wp(self):
''' Compare the numerical derivate of y w.r.t waypoints with the nominal one
'''
for _ in range(4):
np.random.seed()
dim = np.random.randint(1, 8)
N = np.random.randint(2, 20)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
tauv = 0.5 + np.random.rand(N) * 2.0
splcalc = cSplineCalc(dim, N, cBasis1010(a))
_, y = splcalc.eval_dydtau(tauv, wp)
y = y.copy()
err = 0.0
errp = 0.0
err = 0.0
errp = 0.0
dwp = 1.0e-5
wpidx = [(i, j) for i in range(N + 1) for j in range(dim)]
dydwpNom = np.zeros((y.shape[0], len(wpidx)))
dydwpNom, _ = splcalc.eval_dydu(tauv, wp, wpidx, dydwpNom)
for k, (i, j) in enumerate(wpidx):
wp_aux = wp.copy()
wpidx = i
wpcom = j
wp_aux[wpidx, wpcom] += -3 * dwp
y0 = splcalc.eval_y(tauv, wp_aux).copy() * (-1.0 / 60.0)
wp_aux[wpidx, wpcom] += dwp
y1 = splcalc.eval_y(tauv, wp_aux).copy() * (3.0 / 20.0)
wp_aux[wpidx, wpcom] += dwp
y2 = splcalc.eval_y(tauv, wp_aux).copy() * (-3.0 / 4.0)
wp_aux[wpidx, wpcom] += 2 * dwp
y3 = splcalc.eval_y(tauv, wp_aux).copy() * (3.0 / 4.0)
wp_aux[wpidx, wpcom] += dwp
y4 = splcalc.eval_y(tauv, wp_aux).copy() * (-3.0 / 20.0)
wp_aux[wpidx, wpcom] += dwp
y5 = splcalc.eval_y(tauv, wp_aux).copy() * (1.0 / 60.0)
dydwpTest = (y0 + y1 + y2 + y3 + y4 + y5) / dwp
ev = np.abs(dydwpNom[:, k] - dydwpTest)
e = np.max(ev)
eidx = np.argmax(ev)
# print('{:14.7e} {:14.7e} {:14.7e}'.format(
# e, dydwpNom[eidx, k], dydwpTest[eidx]))
ep = e / dydwpTest[eidx]
if e > err:
err = e
if ep > errp:
errp = ep
if e > 1.0e-4:
assert ep < 1.0e-8, '''
Relative Error = {:10.3e}
Absolute Error = {:10.3e}
'''.format(ep, e)
# print('Maximum Error for dy dwp = {:14.7e}'.format(err))
# print('Maximum Relative Error for dy dwp = {:14.7e}'.format(errp))
# assert e < 5.0e-2, 'error = {:14.7e}'.format(e)
def test_derivative_y_2(self):
''' Second test for the derivative of y wr.t. tau.
First test the identity A*dydtau + dAdtau y = 0A
Second test the identity of above using basis
'''
for i in range(40):
np.random.seed()
dim = np.random.randint(1, 3)
N = np.random.randint(2, 6)
a = np.random.rand()
wp = (np.random.rand(N + 1, dim) - 0.5) * 2 * np.pi
tauv = 0.5 + np.random.rand(N) * 2.0
splcalc = cSplineCalc(dim, N, cBasis0010())
basis = splcalc.basis_
dydtauNom, y = splcalc.eval_dydtau(tauv, wp)
A = splcalc.eval_A(tauv).todense()
y = y.copy()
# A*dydtau + dAdtau y = 0
for i, _ in enumerate(tauv):
v0 = A.dot(dydtauNom[:, i])
v1 = splcalc.eval_dAdtiy(tauv, i, y).todense()
res = v0 + v1.T
e = np.abs(res)
assert np.max(e) < 1.0e-10, '''
e = {:14.7e}
'''.format(e)
for i, taui in enumerate(tauv):
B = basis.evalDerivOnWindow(-1, taui, 0)
dB_dtau = basis.evalDerivWrtTauOnWindow(-1, taui, 0)
for idim in range(dim):
i0 = i * 6 * dim + 6 * idim
i1 = i0 + 6
e = dydtauNom[i0:i1, i].dot(B) + y[i0:i1].dot(dB_dtau)
assert np.abs(e) < 1.0e-10, '''
error computing dydtau^\\top B + y^\\top dB_dtau
e = {:14.7e}
index of tau = {:d}
index of q = {:d}
i0 = {:d}
i1 = {:d}
'''.format(e, i, idim, i0, i1)
def eval_A(self, tau, _dim, _N, _basis):
"""
Alternative way to fill the Matrix A
WARNING: This work ok for _N<120
"""
A = np.zeros(2 * (6 * _dim * _N, ))
dim = _dim
nzv = np.zeros((((_N - 1) * dim + (3) * dim) * 4 * 3 + (3 - 1) *
(_N - 1) * dim * 8 * 3, ))
idxv = np.zeros(
(((_N - 1) * dim + (3) * dim) * 4 * 3 + (3 - 1) *
(_N - 1) * dim * 8 * 3, ),
dtype=np.int16)
ptrv = np.zeros((2 * 3 * _N * dim + 1, ), dtype=np.int16)
basis = _basis
Pl = [basis.evalDerivOnWindow(-1.0, tau[0], i) for i in range(0, 5)]
Pr = [basis.evalDerivOnWindow(1.0, tau[0], i) for i in range(0, 5)]
# Fill the content for the derivatives at boundaries
Cpl = -
|
np.vstack(Pl[1:5])
|
numpy.vstack
|
import numericalunits as nu
import numpy as np
import pandas as pd
import wimprates as wr
import dddm
from dddm import utils
import typing as ty
from .halo import SHM
from .halo_shielded import ShieldedSHM
export, __all__ = dddm.exporter()
@export
class GenSpectrum:
required_detector_fields = 'name material type exp_eff'.split()
def __init__(self,
dark_matter_model: ty.Union[SHM, ShieldedSHM],
experiment: dddm.Experiment,
):
"""
:param dark_matter_model: the dark matter model
:param experiment: dictionary containing detector parameters
"""
assert issubclass(experiment.__class__, dddm.Experiment)
self.detector = experiment
self.dm_model = dark_matter_model
def __str__(self):
"""
:return: sting of class info
"""
return f'{self.dm_model} at {self.detector}'
def get_data(self,
wimp_mass: ty.Union[int, float],
cross_section: ty.Union[int, float],
poisson=False,
return_counts=False,
) -> ty.Union[pd.DataFrame, np.ndarray]:
"""
:param wimp_mass: wimp mass (not log)
:param cross_section: cross-section of the wimp nucleon interaction
(not log)
:param poisson: type bool, add poisson True or False
:param return_counts: instead of a dataframe, return counts only
:return: pd.DataFrame containing events binned in energy
"""
bin_edges = self.get_bin_edges()
bin_centers = np.mean(bin_edges, axis=1)
bin_width = np.diff(bin_edges, axis=1)[:, 0]
assert len(bin_centers) == len(bin_width)
assert bin_width[0] == bin_edges[0][1] - bin_edges[0][0]
counts = self._calculate_counts(wimp_mass=wimp_mass,
cross_section=cross_section,
poisson=poisson,
bin_centers=bin_centers,
bin_width=bin_width,
bin_edges=bin_edges,
)
counts = self.set_negative_to_zero(counts)
if return_counts:
return counts
result = pd.DataFrame()
result['counts'] = counts
result['bin_centers'] = bin_centers
result['bin_left'] = bin_edges[:, 0]
result['bin_right'] = bin_edges[:, 1]
return result
def get_counts(self,
wimp_mass: ty.Union[int, float],
cross_section: ty.Union[int, float],
poisson=False,
) -> np.array:
"""
:param wimp_mass: wimp mass (not log)
:param cross_section: cross-section of the wimp nucleon interaction
(not log)
:param poisson: type bool, add poisson True or False
:return: array of counts/bin
"""
return self.get_data(wimp_mass=wimp_mass,
cross_section=cross_section,
poisson=poisson,
return_counts=True)
def _calculate_counts(self,
wimp_mass: ty.Union[int, float],
cross_section: ty.Union[int, float],
poisson: bool,
bin_centers: np.ndarray,
bin_width: np.ndarray,
bin_edges: np.ndarray,
) -> np.ndarray:
counts = self.spectrum_simple(bin_centers,
wimp_mass=wimp_mass,
cross_section=cross_section)
if poisson:
counts = np.random.exponential(counts).astype(np.float)
counts *= bin_width * self.effective_exposure
return counts
def spectrum_simple(self,
energy_bins: ty.Union[list, tuple, np.ndarray],
wimp_mass: ty.Union[int, float],
cross_section: ty.Union[int, float],
):
"""
Compute the spectrum for a given mass and cross-section
:param wimp_mass: wimp mass (not log)
:param cross_section: cross-section of the wimp nucleon interaction
(not log)
:return: returns the rate
"""
material = self.target_material
exp_type = self.interaction_type
dddm.log.debug(f'Eval {wimp_mass, cross_section} for {material}-{exp_type}')
if exp_type in ['SI']:
rate = wr.rate_wimp_std(energy_bins,
wimp_mass,
cross_section,
halo_model=self.dm_model,
material=material
)
elif exp_type in ['migdal_SI']:
# This integration takes a long time, hence, we will lower the
# default precision of the scipy dblquad integration
migdal_integration_kwargs = dict(epsabs=1e-4,
epsrel=1e-4)
convert_units = (nu.keV * (1000 * nu.kg) * nu.year)
rate = convert_units * wr.rate_migdal(
energy_bins * nu.keV,
wimp_mass * nu.GeV / nu.c0 ** 2,
cross_section * nu.cm ** 2,
interaction='SI',
halo_model=self.dm_model,
material=material,
**migdal_integration_kwargs
)
else:
raise NotImplementedError(f'Unknown {exp_type}-interaction')
return rate
def get_bin_edges(self):
return utils.get_bins(self.e_min_kev, self.e_max_kev, self.n_energy_bins)
def set_negative_to_zero(self, counts: np.ndarray):
mask = counts < 0
if
|
np.any(mask)
|
numpy.any
|
# coding=utf-8
import numpy as np
import torch
class PrototypicalBatchSampler(object):
"""
PrototypicalBatchSampler: yield a batch of indexes at each iteration.
Indexes are calculated by keeping in account 'classes_per_it' and 'num_samples',
In fact at every iteration the batch indexes will refer to 'num_support' + 'num_query' samples
for 'classes_per_it' random classes.
__len__ returns the number of episodes per epoch (same as 'self.iterations').
"""
def __init__(self, labels, classes_per_it, num_samples, iterations):
"""
Initialize the PrototypicalBatchSampler object
:param labels: an iterable containing all the labels for the current dataset
samples indexes will be inferred from this iterable.
:param classes_per_it: number of random classes for each iteration
:param num_samples: number of samples for each iteration for each class (support + query)
:param iterations: number of iterations (episodes) per epoch
"""
super(PrototypicalBatchSampler, self).__init__()
self.labels = labels
self.classes_per_it = classes_per_it
self.sample_per_class = num_samples
self.iterations = iterations
self.classes, self.counts = np.unique(self.labels, return_counts=True)
self.classes = torch.LongTensor(self.classes)
# create a matrix, indexes, of dim: classes X max(elements per class)
# fill it with nans
# for every class c, fill the relative row with the indices samples belonging to c
# in numel_per_class we store the number of samples for each class/row
self.idxs = range(len(self.labels))
self.indexes = np.empty((len(self.classes), max(self.counts)), dtype=int) * np.nan
self.indexes = torch.Tensor(self.indexes)
self.numel_per_class = torch.zeros_like(self.classes)
for idx, label in enumerate(self.labels):
label_idx = np.argwhere(self.classes == label).item()
self.indexes[label_idx, np.where(
|
np.isnan(self.indexes[label_idx])
|
numpy.isnan
|
# Author: <NAME> <<EMAIL>>
#
# License: Simplified BSD
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
import mne
from mne.datasets import testing
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.cov import regularize
from mne.inverse_sparse import gamma_map
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles
from mne import pick_types_forward
from mne.utils import assert_stcs_equal, run_tests_if_main
from mne.dipole import Dipole
data_path = testing.data_path(download=False)
fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
subjects_dir = op.join(data_path, 'subjects')
def _check_stc(stc, evoked, idx, hemi, fwd, dist_limit=0., ratio=50.):
"""Check correctness."""
assert_array_almost_equal(stc.times, evoked.times, 5)
amps = np.sum(stc.data ** 2, axis=1)
order = np.argsort(amps)[::-1]
amps = amps[order]
verts = np.concatenate(stc.vertices)[order]
hemi_idx = int(order[0] >= len(stc.vertices[1]))
hemis = ['lh', 'rh']
assert hemis[hemi_idx] == hemi
dist = np.linalg.norm(np.diff(fwd['src'][hemi_idx]['rr'][[idx, verts[0]]],
axis=0)[0]) * 1000.
assert dist <= dist_limit
assert amps[0] > ratio * amps[1]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_gamma_map():
"""Test Gamma MAP inverse."""
forward = read_forward_solution(fname_fwd)
forward = convert_forward_solution(forward, surf_ori=True)
forward = pick_types_forward(forward, meg=False, eeg=True)
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
proj=False)
evoked.resample(50, npad=100)
evoked.crop(tmin=0.1, tmax=0.14) # crop to window around peak
cov = read_cov(fname_cov)
cov = regularize(cov, evoked.info, rank=None)
alpha = 0.5
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=True, update_mode=1)
_check_stc(stc, evoked, 68477, 'lh', fwd=forward)
vec_stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=True, update_mode=1, pick_ori='vector')
assert_stcs_equal(vec_stc.magnitude(), stc)
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=1)
_check_stc(stc, evoked, 82010, 'lh', fwd=forward)
dips = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=1,
return_as_dipoles=True)
assert (isinstance(dips[0], Dipole))
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert_stcs_equal(stc, stc_dip)
# force fixed orientation
stc = gamma_map(evoked, forward, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=2,
loose=0, return_residual=False)
_check_stc(stc, evoked, 85739, 'lh', fwd=forward, ratio=20.)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_gamma_map_vol_sphere():
"""Gamma MAP with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0),
proj=False)
evoked.resample(50, npad=100)
evoked.crop(tmin=0.1, tmax=0.16) # crop to window around peak
cov = read_cov(fname_cov)
cov = regularize(cov, evoked.info, rank=None)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=30., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src, bem=sphere,
eeg=False, meg=True)
alpha = 0.5
pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
loose=0, return_residual=False)
pytest.raises(ValueError, gamma_map, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False)
stc = gamma_map(evoked, fwd, cov, alpha, tol=1e-4,
xyz_same_gamma=False, update_mode=2,
return_residual=False)
|
assert_array_almost_equal(stc.times, evoked.times, 5)
|
numpy.testing.assert_array_almost_equal
|
'''Simulated Annealing
###
###Code and Implementation by <NAME>
###
###
###Implemented on 08/06/2019
'''
import numpy as np
import matplotlib.pyplot as mp
class SimulatedAnnealing():
def __init__(self,f,x,lb,ub,pop=200,max_gen=50,nsize=1,normal_neighbour=True,verbose=True):
self.f=np.vectorize(f)
self.x=x
self.lb=lb
self.ub=ub
self.pop=pop
self.verbose=verbose
self.normal_neighbour=normal_neighbour
self.nsize=nsize
self.max_gen=max_gen
self.pop_mat=np.tile(self.lb,(pop,1))+np.random.rand(pop,len(x)).astype(np.longdouble)*(np.tile(self.ub,(pop,1))-np.tile(self.lb,(pop,1)))
self.plotgen=[]
self.average_fit=[]
self.history1=self.pop_mat[:,0]
self.history2=self.pop_mat[:,1]
self.best_result=[]
self.best_domain=[]
self.overall_best=[]
self.overall_bestdomain=[]
def solve(self):
self.evaluate(initial=True)
for i in range(self.max_gen+1):
self.update(generation=i)
self.evaluate()
if self.verbose:
self.log_result(generation=i)
def evaluate(self,initial=False):
#get fitness of all population
if initial:
self.pop_mat_fit=self.f(*self.pop_mat.T)
#concatenate and sort population by fitness
temp_mat=np.concatenate((np.asarray(self.pop_mat_fit).reshape(self.pop_mat_fit.shape[0],1),self.pop_mat),axis=1)
#sort new points by fitness
temp_mat=temp_mat[temp_mat[:,0].argsort()]
#return the sorted values to pop matrix
self.pop_mat_fit, self.pop_mat= np.copy(temp_mat[:,0]), np.copy(temp_mat[:,1:])
def update(self,generation):
#neighbours=np.tile(self.lb,(self.pop,1))+np.random.rand(self.pop,len(self.x)).astype(np.longdouble)*(np.tile(self.ub,(self.pop,1))-np.tile(self.lb,(self.pop,1)))
if self.normal_neighbour:
neighbours=np.clip(np.tile(self.lb,(self.pop,1))+np.random.uniform(0,1,(self.pop,len(self.x))).astype(np.longdouble)*(np.tile(self.ub,(self.pop,1))-np.tile(self.lb,(self.pop,1)))/self.nsize,a_min=self.lb,a_max=self.ub)
else:
neighbours=np.clip(np.tile(self.lb,(self.pop,1))+np.random.rand(self.pop,len(self.x)).astype(np.longdouble)*(np.tile(self.ub,(self.pop,1))-np.tile(self.lb,(self.pop,1)))/self.nsize,a_min=self.lb,a_max=self.ub)
neighbour_fit=self.f(*neighbours.T)
#print('nf=',neighbour_fit)
#print('pop_mat_fit',self.pop_mat_fit)
p=np.random.rand(*self.pop_mat_fit.shape).astype(np.longdouble)
condition=(p<=np.clip(np.exp((self.pop_mat_fit-neighbour_fit)/(self.max_gen/(generation+1))).astype(np.longdouble),a_min=0,a_max=1)).reshape(self.pop_mat_fit.shape)
self.pop_mat=np.repeat((~condition).astype(int),len(self.x)).reshape(self.pop_mat.shape)*self.pop_mat+np.repeat((condition).astype(int),len(self.x)).reshape(self.pop_mat.shape)*neighbours
self.pop_mat_fit=(~condition).astype(int)*self.pop_mat_fit+(condition).astype(int)*neighbour_fit
def log_result(self,generation):
print("Generation #",generation,"Best Fitness=", self.pop_mat_fit[0], "Answer=", self.pop_mat[0])
self.plotgen.append(generation)
self.best_result.append(self.pop_mat_fit[0])
self.best_domain.append(self.pop_mat[0])
self.overall_best.append(min(self.best_result))
if self.overall_best[-1]==self.best_result[-1]:
self.overall_bestdomain.append(self.best_domain[-1])
else:
self.overall_bestdomain.append(self.overall_bestdomain[-1])
self.average_fit.append(np.average(self.pop_mat_fit))
self.history1=
|
np.concatenate((self.history1,self.pop_mat[:,0]),axis=0)
|
numpy.concatenate
|
import numpy as np
from numpy import sqrt, pi, cos, sin
from numpy.linalg import norm
from utils import rot
class Lattice:
def __init__(self, a1 : np.array, a2 : np.array):
self.a1, self.a2 = a1, a2
def unit_cell(self):
return NotImplementedError()
def get_reciprocal_vectors(self, a1, a2) -> tuple:
b1 = 2 * pi * np.dot(rot(pi / 2), a2) / np.dot(a1, np.dot(rot(pi / 2), a2))
b2 = 2 * pi * np.dot(rot(pi / 2), a1) / np.dot(a2, np.dot(rot(pi / 2), a1))
return b1, b2
def get_lattice_vectors(self) -> tuple:
return self.a1, self.a2
@property
def size(self) -> int:
return len(self.unit_cell())
@property
def area(self) -> float:
return np.linalg.norm(np.cross(self.a1, self.a2))
class Triangular(Lattice):
def __init__(self, lattice_constant:float):
self.a0 = lattice_constant
self.R0 = lattice_constant
self.a1 = np.array([1.5 / sqrt(3), 0.5, 0]) * self.a0
self.a2 = np.array([1.5 / sqrt(3), -0.5, 0]) * self.a0
self.b1, self.b2 = self.get_reciprocal_vectors(self.a1, self.a2)
Gamma = np.array([0, 0, 0])
K = np.array([(2 * sqrt(3) * pi) / (3 * self.a0), (2 * pi) / (3 * self.a0), 0])
K_prime = np.array([(2 * sqrt(3) * pi)/(3 * self.a0), -(2 * pi)/(3 * self.a0), 0])
M = np.array([(2 * sqrt(3) * pi) / (3 * self.a0), 0, 0])
self.bz = {'Gamma': Gamma, 'K': K, 'K_prime': K_prime, 'M': M}
def __str__(self):
return f'Triangular lattice: a0 = {self.a0/1E-9:.2f} nm'
def unit_cell(self) -> np.array:
return np.array([np.array([0, 0, 0])])
def get_bz_path(self, N:int) -> np.array:
paths = np.array([norm(self.bz['M'] - self.bz['Gamma']),
norm(self.bz['Gamma'] - self.bz['K']),
norm(self.bz['K'] - self.bz['M'])])
n_paths = (paths /
|
np.sum(paths)
|
numpy.sum
|
import numpy as np
import matplotlib as mpl
import mpl_toolkits.axes_grid1 as mplax
import matplotlib.colors as mplc
import matplotlib.cm as mplcm
import numba
import warnings
import scipy.misc as scm
import scipy.optimize as spo
import scipy.ndimage as scnd
import scipy.signal as scsig
import skimage.color as skc
import stemtool as st
def move_by_phase(image_to_move, x_pixels, y_pixels):
"""
Move Images with sub-pixel precision
Parameters
----------
image_to_move: ndarray
Original Image to be moved
x_pixels: float
Pixels to shift in X direction
y_pixels: float
Pixels to Shift in Y direction
Returns
-------
moved_image: ndarray
Moved Image
Notes
-----
The underlying idea is that a shift in the real space
is phase shift in Fourier space. So we take the original
image, and take it's Fourier transform. Also, we calculate
how much the image shifts result in the phase change, by
calculating the Fourier pixel dimensions. We then multiply
the FFT of the image with the phase shift value and then
take the inverse FFT.
:Authors:
<NAME> <<EMAIL>>
"""
image_size = (np.asarray(image_to_move.shape)).astype(int)
fourier_cal_y = np.linspace(
(-image_size[0] / 2), ((image_size[0] / 2) - 1), image_size[0]
)
fourier_cal_y = fourier_cal_y / (image_size[0]).astype(np.float64)
fourier_cal_x = np.linspace(
(-image_size[1] / 2), ((image_size[1] / 2) - 1), image_size[1]
)
fourier_cal_x = fourier_cal_x / (image_size[1]).astype(np.float64)
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_matrix = np.multiply(fourier_mesh_x, x_pixels) + np.multiply(
fourier_mesh_y, y_pixels
)
move_phase = np.exp((-2) * np.pi * 1j * move_matrix)
original_image_fft = np.fft.fftshift(np.fft.fft2(image_to_move))
moved_in_fourier = np.multiply(move_phase, original_image_fft)
moved_image = np.fft.ifft2(moved_in_fourier)
return moved_image
def image_normalizer(image_orig):
"""
Normalizing Image
Parameters
----------
image_orig: ndarray
'image_orig' is the original input image to be normalized
Returns
-------
image_norm: ndarray
Normalized Image
Notes
-----
We normalize a real valued image here
so that it's values lie between o and 1.
This is done by first subtracting the
minimum value of the image from the
image itself, and then subsequently
dividing the image by the maximum value
of the subtracted image.
:Authors:
<NAME> <<EMAIL>>
"""
image_norm = np.zeros_like(image_orig, dtype=np.float64)
image_norm = (image_orig - np.amin(image_orig)) / (
np.amax(image_orig) - np.amin(image_orig)
)
return image_norm
def image_logarizer(image_orig, bit_depth=64):
"""
Normalized log of image
Parameters
----------
image_orig: ndarray
Numpy array of real valued image
bit_depth: int
Bit depth of output image
Default is 32
Returns
-------
image_log: ndarray
Normalized log
Notes
-----
Normalize the image, and scale it 2^0 to 2^bit_depth.
Take log2 of the scaled image.
:Authors:
<NAME> <<EMAIL>>
"""
bit_max = 2 ** bit_depth
image_norm = image_normalizer(image_orig)
image_scale = np.zeros_like(image_norm, dtype=np.float64)
image_log = np.zeros_like(image_norm, dtype=np.float64)
image_scale = 1 + ((bit_max - 1) * image_norm)
image_log = np.log2(image_scale)
return image_log
def remove_dead_pixels(image_orig, iter_count=1, level=10000):
"""
Remove dead pixels
Parameters
----------
image_orig: ndarray
Numpy array of real valued image
iter_count: int
Number of iterations to run
the process. Default is 1
level: int,float
Ratio of minima pixels to total
pixels. Default is 10,000
Returns
-------
image_orig: ndarray
Image with dead pixels converted
Notes
-----
Subtract the minima from the image, and if the
number of pixels with minima values is less than
the 1/level of the total pixels, then those are
decided to be dead pixels. Iterate if necessary
:Authors:
<NAME> <<EMAIL>>
"""
pp, qq = np.mgrid[0 : image_orig.shape[0], 0 : image_orig.shape[1]]
no_points = np.size(pp)
for ii in range(iter_count):
original_min = np.amin(image_orig)
image_pos = image_orig - original_min
no_minima = np.size(pp[image_pos == 0])
if no_minima < (no_points / level):
new_minimum = np.amin(image_pos[image_pos > 0])
image_pos = image_pos - new_minimum
image_pos[image_pos < 0] = 0
image_orig = image_pos + new_minimum + original_min
return image_orig
def hanned_image(image):
"""
2D hanning filter for images
Parameters
----------
image: ndarray
Original Image on which the Hanning filter
is to be applied
Returns
-------
hanned_image: ndarray
Image with the hanning filter applied
Notes
-----
:Authors:
<NAME> <<EMAIL>>
"""
size_image = np.asarray(np.shape(image), dtype=int)
row_hann = np.zeros((size_image[0], 1))
row_hann[:, 0] = np.hanning(size_image[0])
col_hann = np.zeros((1, size_image[1]))
col_hann[0, :] = np.hanning(size_image[1])
hann_window = np.multiply(row_hann, col_hann)
hanned_image = np.multiply(image, hann_window)
return hanned_image
def sane_colorbar(mappable):
ax = mappable.axes
fig = ax.figure
divider = mplax.make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
return fig.colorbar(mappable, cax=cax)
def phase_color(phase_image):
size_im = np.asarray(np.shape(phase_image), dtype=int)
hsv_im = np.ones((size_im[0], size_im[1], 3))
hsv_im[:, :, 0] = (phase_image + (2 * np.pi)) / (2 * np.pi)
hsv_im[:, :, 0] = hsv_im[:, :, 0] - np.floor(hsv_im[:, :, 0])
rgb_im = mplc.hsv_to_rgb(hsv_im)
r, g, b = rgb_im[:, :, 0], rgb_im[:, :, 1], rgb_im[:, :, 2]
gray_im = (0.2989 * r) + (0.5870 * g) + (0.1140 * b)
gray_im = gray_im
hsv_im[:, :, 2] = np.divide(hsv_im[:, :, 2], gray_im)
hsv_im[:, :, 2] = hsv_im[:, :, 2] / np.amax(hsv_im[:, :, 2])
rgb_im = mplc.hsv_to_rgb(hsv_im)
return rgb_im
def hsv_overlay(data, image, color, climit=None, bit_depth=8):
bit_range = 2 ** bit_depth
im_map = mplcm.get_cmap(color, bit_range)
if climit == None:
data_lim = np.amax(np.abs(data))
else:
data_lim = climit
data = 0.5 + (data / (2 * data_lim))
rgb_image = np.asarray(im_map(data)[:, :, 0:3])
hsv_image = mplc.rgb_to_hsv(rgb_image)
hsv_image[:, :, -1] = (image - np.amin(image)) / (np.amax(image) - np.amin(image))
rgb_image = mplc.hsv_to_rgb(hsv_image)
return rgb_image
def sparse_division(sparse_numer, sparse_denom, bit_depth=32):
"""
Divide two sparse matrices element wise to prevent zeros
Parameters
----------
spase_numer: ndarray
Numpy array of real valued numerator
sparse_denom: ndarray
Numpy array of real valued denominator
bit_depth: int
Bit depth of output image
Default is 32
Returns
-------
divided_matrix: ndarray
Quotient matrix
Notes
-----
Decide on a bit depth below which
the values in the denominator are
just noise, as they are below the
bit depth. Do the same for the
numerator. Turn those values to 1 in
the denominator and 0 in the numerator.
Then in the quotient matrix, turn the
denominator values below the threshold
to 0 too.
:Authors:
<NAME> <<EMAIL>>
"""
depth_ratio = 2 ** bit_depth
denom_abs = np.abs(sparse_denom)
numer_abs = np.abs(sparse_numer)
threshold_denom = (np.amax(denom_abs)) / depth_ratio
threshold_numer = (np.amax(numer_abs)) / depth_ratio
threshold_ind_denom = denom_abs < threshold_denom
threshold_ind_numer = numer_abs < threshold_numer
sparse_denom[threshold_ind_denom] = 1
sparse_numer[threshold_ind_numer] = 0
divided_matrix = np.divide(sparse_numer, sparse_denom)
divided_matrix[threshold_ind_denom] = 0
return divided_matrix
def cross_corr_unpadded(image_1, image_2, normal=True):
im_size = np.asarray(np.shape(image_1))
if normal:
im1_norm = image_1 / (np.sum(image_1 ** 2) ** 0.5)
im2_norm = image_2 / (np.sum(image_2 ** 2) ** 0.5)
im1_fft = np.fft.fft2(im1_norm)
im2_fft = np.conj(np.fft.fft2(im2_norm))
else:
im1_fft = np.fft.fft2(image1)
im2_fft = np.conj(np.fft.fft2(image2))
corr_fft = np.abs(np.fft.ifftshift(np.fft.ifft2(im1_fft * im2_fft)))
return corr_fft
def cross_corr(image_1, image_2, hybridizer=0, normal=True):
"""
Normalized Correlation, allowing for hybridization
with cross correlation being the default output if
no hybridization parameter is given
Parameters
----------
image_1: ndarray
First image
image_2: ndarray
Second image
hybridizer: float
Hybridization parameter between 0 and 1
0 is pure cross correlation
1 is pure phase correlation
Returns
-------
corr_hybrid: ndarray
Complex valued correlation output
Notes
-----
The cross-correlation theorem can be stated as:
.. math::
G_c = G_1 \times G_2^*
where :math:`G_c` is the Fourier transform of the cross
correlated matrix and :math:`G_1` and :math:`G_2` are
the Fourier transforms of :math:`g_1` and :math:`g_2`
respectively, which are the original matrices. This is pure
cross-correlation. Phase correlation can be expressed as:
.. math::
G_c = \frac{G_1 \times G_2^*}{\mid G_1 \times G_2^* \mid}
Thus, we can now define a hybrid cross-correlation where
.. math::
G_c = \frac{G_1 \times G_2^*}{\mid G_1 \times G_2^* \mid ^n}
If n is 0, we have cross correlation, and if n is 1 we
have phase correlation.
References
----------
1]_, <NAME>., <NAME>., <NAME>., Minor, A.M. and <NAME>.,
2017. Optimizing disk registration algorithms for nanobeam
electron diffraction strain mapping. Ultramicroscopy, 176,
pp.170-176.
See Also
--------
sparse_division
:Authors:
<NAME> <<EMAIL>>
"""
im_size = np.asarray(np.shape(image_1))
pad_size = (np.round(im_size / 2)).astype(int)
if normal:
im1_norm = image_1 / (np.sum(image_1 ** 2) ** 0.5)
im1_pad = np.pad(im1_norm, pad_width=pad_size, mode="median")
im2_norm = image_2 / (np.sum(image_2 ** 2) ** 0.5)
im2_pad = np.pad(im2_norm, pad_width=pad_size, mode="median")
im1_fft = np.fft.fft2(im1_pad)
im2_fft = np.conj(np.fft.fft2(im2_pad))
else:
im1_pad = np.pad(image_1, pad_width=pad_size, mode="median")
im2_pad = np.pad(image_2, pad_width=pad_size, mode="median")
im1_fft = np.fft.fft2(im1_pad)
im2_fft = np.conj(np.fft.fft2(im2_pad))
corr_fft = np.multiply(im1_fft, im2_fft)
corr_abs = (np.abs(corr_fft)) ** hybridizer
corr_hybrid_fft = sparse_division(corr_fft, corr_abs, 32)
corr_hybrid = np.fft.ifft2(corr_hybrid_fft)
corr_hybrid = np.abs(np.fft.ifftshift(corr_hybrid))
corr_unpadded = corr_hybrid[
pad_size[0] : pad_size[0] + im_size[0], pad_size[1] : pad_size[1] + im_size[1]
]
return corr_unpadded
def make_circle(size_circ, center_x, center_y, radius):
"""
Make a circle
Parameters
----------
size_circ: ndarray
2 element array giving the size of the output matrix
center_x: float
x position of circle center
center_y: float
y position of circle center
radius: float
radius of the circle
Returns
-------
circle: ndarray
p X q sized array where the it is 1
inside the circle and 0 outside
:Authors:
<NAME> <<EMAIL>>
"""
p = size_circ[0]
q = size_circ[1]
yV, xV = np.mgrid[0:p, 0:q]
sub = ((((yV - center_y) ** 2) + ((xV - center_x) ** 2)) ** 0.5) < radius
circle = np.asarray(sub, dtype=np.float64)
return circle
@numba.jit
def image_tiler(dataset_4D, reducer=4, bit_depth=8):
"""
Generate a tiled pattern of the 4D-STEM dataset
"""
size_data = (np.asarray(dataset_4D.shape)).astype(int)
normalized_4D = (dataset_4D -
|
np.amin(dataset_4D)
|
numpy.amin
|
import numpy as np
from numpy import sin, cos
from scipy.optimize import leastsq, brentq
from scipy.linalg import eig, inv
#### Global Constants ####
G_SI = 6.67384e-11
C_SI = 299792458.0
PI = 3.141592653589793
MSUN_SI = 1.9885469549614615e+30
PC_SI = 3.085677581491367e+16
##########################
def symRatio(m1, m2):
"""Compute symmetric mass ratio from component masses"""
return m1*m2/(m1+m2)/(m1+m2)
def mchirp(m1, m2):
"""Compute chirp mass from component masses"""
return (m1*m2)**(3./5.)*(m1+m2)**(-1./5.)
def nextPow2(length):
"""
Find next power of 2 <= length
"""
return int(2**np.ceil(np.log2(length)))
def m1m2(Mc, eta):
"""Compute component masses from Mc, eta. Returns m1 >= m2"""
etaV = 1-4*eta
if etaV < 0:
etaV = 0
m1 = 0.5*Mc*eta**(-3./5.)*(1. + np.sqrt(etaV))
m2 = 0.5*Mc*eta**(-3./5.)*(1. - np.sqrt(etaV))
return m1, m2
def estimateDeltaF(m1, m2, fmin, deltaT, LmaxEff=2):
"""
Input: m1, m2, fmin, deltaT
Output:estimated duration (in s) based on Newtonian inspiral from P.fmin to infinite frequency
"""
T = estimateWaveformDuration(m1, m2, fmin, LmaxEff=2)+0.1 # buffer for merger
return 1./(deltaT*nextPow2(T/deltaT))
def estimateWaveformDuration(m1, m2, fmin, LmaxEff=2):
"""
Input: m1, m2, fmin
Output:estimated duration (in s) based on Newtonian inspiral from fmin to infinite frequency
"""
fM = fmin*(m1+m2)*G_SI / C_SI**3
fM *= 2./LmaxEff # if we use higher modes, lower the effective frequency, so HM start in band
eta = symRatio(m1,m2)
Msec = (m1+m2)*G_SI / C_SI**3
return Msec*5./256. / eta* np.power((PI*fM),-8./3.)
#
# Antenna pattern functions
#
def Fplus(theta, phi, psi):
"""
Antenna pattern as a function of polar coordinates measured from
directly overhead a right angle interferometer and polarization angle
"""
return 0.5*(1. + cos(theta)*cos(theta))*cos(2.*phi)*cos(2.*psi)\
- cos(theta)*sin(2.*phi)*sin(2.*psi)
def Fcross(theta, phi, psi):
"""
Antenna pattern as a function of polar coordinates measured from
directly overhead a right angle interferometer and polarization angle
"""
return 0.5*(1. + cos(theta)*cos(theta))*cos(2.*phi)*sin(2.*psi)\
+ cos(theta)*sin(2.*phi)*cos(2.*psi)
def GWaveform(m1, m2):
hp, hc = np.loadtxt('effFisher_wf/Target_wf_{:.4f}_{:.4f}_hp_hc'.format(m1/MSUN_SI, m2/MSUN_SI))
return hp, hc
def hoft_norm(hp, hc, fs, deltaF, deltaT, weights, theta, phi, psi):
'''
Generate a normalized waveform in frequency domain according
to inner product (w.r.t deltaF & deltaT) by inputting a TD waveform (h+,hx),
zero-padding and then Fourier transforming.
'''
ht = Transf2ht(hp, hc, deltaF, deltaT, theta=theta, phi=phi, psi=psi)
#print('ht => %s sec' %(ht.size/fs))
hf = Transf2hf(ht, fs ,deltaF, deltaT)
h_norm = norm(hf, deltaF, weights)
hf_norm = hf / h_norm
return hf_norm
def Transf2ht(hp, hc, deltaF, deltaT, theta, phi, psi):
'''
ht = F_x * h_x + F_+ * h_+
ht <= padding ht corresponding to deltaF, deltaT.
'''
fp = Fplus(theta, phi, psi)
fc = Fcross(theta, phi, psi)
hp *= fp
hc *= fc
ht = hp+hc
if deltaF is not None:
TDlen = int(1./deltaF * 1./deltaT)
assert TDlen >= ht.size
ht = np.pad(ht, (0, TDlen-ht.size), 'constant', constant_values=0)
return ht
def Transf2hf(ht, fs, deltaF, deltaT):
'''
Generate a Freq. Data waveform
'''
# Check zero-padding was done to expected length
TDlen = int(1./deltaF * 1./deltaT)
assert TDlen == ht.size
FDlen = TDlen//2+1
hf = np.fft.rfft(ht) /fs
return hf
def norm(hf, deltaF, weights):
"""
Compute norm of a COMPLEX16Frequency Series
"""
#assert hf.size == len1side
val = 0.
val = np.sum( np.conj(hf)*hf * weights)
val = np.sqrt( 4. * deltaF * np.abs(val) )
return val
def inner_product(hf1, hf2, fs, len1side, len2side, weights):
'''
Compute inner product maximized over time and phase. inner_product(h1,h2) computes:
fNyq
max 4 Abs \int h1*(f,tc) h2(f) / Sn(f) df
tc fLow
h1, h2 must be frequency series defined in [0, fNyq]
(with the negative frequencies implicitly given by Hermitianity)
a
return: The maximized (real-valued, > 0) overlap
'''
assert hf1.size==hf2.size==len1side
# Tabulate the SNR integrand
# Set negative freqs. of integrand to zero
intgd = np.zeros(len2side).astype(complex)
intgd[:len1side] =
|
np.zeros(len1side)
|
numpy.zeros
|
from django.core.management.base import BaseCommand#, CommandError
#from django.core.management import call_command
#from django.conf import settings
#from django.db import connection
import contactnetwork.pdb as pdb
from structure.models import Structure
from residue.models import Residue
import logging, os
import numpy as np
from sklearn.decomposition import PCA
from copy import deepcopy
from numpy.core.umath_tests import inner1d
import io
import freesasa
import pickle
TMNUM = 7
class Command(BaseCommand):
help = "Command to calculate an axis for a TM helix."
np.set_printoptions(suppress=True)
logger = logging.getLogger(__name__)
# def add_arguments(self, parser):
# parser.add_argument('pdb_code')
###########################################################################
############################ Helper Functions ############################
###########################################################################
def load_pdb(self,pdb_code, no_save=False):
if no_save:
return pdb.pdb_get_structure(pdb_code)
else:
if not os.path.exists("pdbfiles/pdb" + pdb_code.lower() + ".ent"):
pdbl = pdb.PDBList()
pdbl.retrieve_pdb_file(pdb_code, file_format="pdb", pdir="pdbfiles")
parser = pdb.PDBParser(QUIET=True)
return parser.get_structure(pdb_code, "pdbfiles/pdb" + pdb_code.lower() + ".ent")
def load_pdb_var(self,pdb_code, var):
parser = pdb.PDBParser(QUIET=True)
with io.StringIO(var) as f:
return parser.get_structure(pdb_code,f)
def save_pseudo(self, chainlist, pname):
pseudopdb = pdb.Structure.Structure(pname)
pseudopdb.add(pdb.Model.Model(0))
for hi, h in enumerate(chainlist):
pseudopdb[0].add((pdb.Chain.Chain(str(hi+1))))
for j, r in enumerate(h):
res = pdb.Residue.Residue((" ",j," "),"X",j)
res.add(pdb.Atom.Atom("CA",r,0,0,"X","PSO",0,"U"))
pseudopdb[0][str(hi+1)].add(res)
io1 = pdb.PDBIO()
io1.set_structure(pseudopdb)
io1.save("pymol_output/"+ pname + '.pdb')
def write_cgo_arrow_pml(self, pdb_code, name, pos_list):
with open("pymol_output/"+pdb_code + name + ".pml", "w") as ps:
ps.write("run cgo_arrow.py\n")
for i, p in enumerate(pos_list):
ps.write("cgo_arrow " + str(list(p[0])) +", "+ str(list(p[1])) + ", name="+pdb_code + name + str(i) +"\n")
def save_pdb(self, strct, name):
io1 = pdb.PDBIO()
io1.set_structure(strct)
io1.save("pymol_output/"+name)
def handle(self, *args, **options):
failed = []
# get preferred chain for PDB-code
references = Structure.objects.all().prefetch_related('pdb_code','pdb_data')
for reference in references:
preferred_chain = reference.preferred_chain.split(',')[0]
pdb_code = reference.pdb_code.index
try:
print(pdb_code)
if "refined" in pdb_code:
failed.append(pdb_code)
continue
#structure = self.load_pdb(pdb_code)
structure = self.load_pdb_var(pdb_code,reference.pdb_data.pdb)
# grab residues with the generic numbering for this structure
db_reslist = list(Residue.objects.exclude(generic_number__isnull=True).filter(protein_conformation__protein=reference.protein_conformation.protein).prefetch_related('generic_number'))
#######################################################################
############################# filter pdb #############################
db_tmlist = [[] for i in range(TMNUM)]
db_set = set()
for r in db_reslist:
if r.generic_number.label[:2] in ["1x","2x","3x","4x","5x","6x","7x"]: # and r.generic_number in pchain
db_tmlist[int(r.generic_number.label[0])-1].append(r.sequence_number)
db_set.add((' ',r.sequence_number,' '))
def recurse(entity,slist):
for subenty in entity.get_list():
if not subenty.id in slist[0]: entity.detach_child(subenty.id)
elif slist[1:]: recurse(subenty, slist[1:])
recurse(structure,[[0], preferred_chain])
hse_struct = deepcopy(structure)
recurse(structure, [[0], preferred_chain, db_set])
pchain = structure[0][preferred_chain]
#######################################################################
############### Calculate the axes through the helices ################
#######################################################################
N = 3
hres_list = [np.asarray([pchain[r]["CA"].get_coord() for r in sl], dtype=float) for sl in db_tmlist]
h_cb_list = [np.asarray([pchain[r]["CB"].get_coord() if "CB" in pchain[r] else np.array([None,None,None]) for r in sl], dtype=float) for sl in db_tmlist]
# fast and fancy way to take the average of N consecutive elements
hres_three = np.asarray([sum([h[i:-(len(h) % N) or None:N] for i in range(N)])/N for h in hres_list])
helices_mn = np.asarray([np.mean(h, axis=0) for h in hres_three ])
self.save_pseudo(hres_three, pdb_code+"helper")
#######################################################################
################################# PCA #################################
#######################################################################
def pca_line(pca,h, r=0):
if ((not r) if pca.fit_transform(h)[0][0] < 0 else r):
return pca.inverse_transform(np.asarray([[-20,0,0],[20,0,0]]))
else:return pca.inverse_transform(np.asarray([[20,0,0],[-20,0,0]]))
helix_pcas = [PCA() for i in range(7)]
pos_list = np.asarray([pca_line(helix_pcas[i], h,i%2) for i,h in enumerate(hres_three)])
#self.write_cgo_arrow_pml(pdb_code, "pca",pos_list)
pos_list = np.mean(pos_list,axis=0)
#self.write_cgo_arrow_pml(pdb_code, "pca_mean",[pos_list])
pca = PCA()
pos_list = pca_line(pca, np.vstack(hres_three))
#self.write_cgo_arrow_pml(pdb_code, "pca_all",[pos_list])
pos_list = np.asarray([pca_line(PCA(), h[:len(h)//2:(-(i%2) or 1)]) for i,h in enumerate(hres_three)])
pos_list = pos_list - (np.mean(pos_list,axis=1)-helices_mn).reshape(-1,1,3)
#self.write_cgo_arrow_pml(pdb_code, "pca_extra",pos_list)
#self.write_cgo_arrow_pml(pdb_code, "pca_extra_mean",[np.mean(pos_list,axis=0)])
pca_extra = PCA()
pos_list = pca_line(pca_extra, np.vstack(pos_list))
#self.write_cgo_arrow_pml(pdb_code, "pca_extra_pca",[pos_list])
#######################################################################
################################ Angles ###############################
#######################################################################
def calc_angle(b,c):
ba = -b
bc = c + ba
ba[:,0] = 0
return np.degrees(np.arccos(inner1d(ba, bc) / (
|
np.linalg.norm(ba,axis=1)
|
numpy.linalg.norm
|
#!/usr/bin/env python3
# logging.debug('foo')
# logging.info('bar')
# logging.warning('buz')
import os, datetime, sys, logging, psutil, time, signal, cv2, re
import tkinter as tk #sudo apt install python3-tk
from tkinter import ttk
from PIL import ImageTk, Image
from enum import Enum
import numpy as np
from ast import literal_eval
import matplotlib.pyplot as plt
#from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
g_main_dir = '/home/lidar/Documents/capture'
g_acquisition_cmd = 'capture_streams.py'
g_camera_cmd = 'acquire_free_cpp_debug'
g_pid_dict = {g_acquisition_cmd : None, g_camera_cmd : None}
g_user_name = 'lidar'
g_data_dir_name = 'RAWCAP'
g_changing_states_now = True
g_inbetween_duration = 20 # seconds
g_refresh_interval = 1 # second
g_tk_refresh_interval = 2000 # milliseconds
g_last_refresh_timestamp = 0
g_start_in_between_timestamp = 0
g_last_processed_image_filenames = [None, None, None]
g_current_images = [None, None, None]
g_current_hists = [None, None, None]
g_toggle_page_timer = 0
g_toggle_page_interval = 5 #seconds
g_active_tab_idx = 0
g_active_tabs = []
g_title_cam_minus_two = "Camera acquisition -2"
g_title_cam_minus_one = "Camera acquisition -1"
g_title_cam_latest = "Camera acquisition now"
g_title_acq = "Acquisition"
g_displ_cam_minus_two = None
g_displ_cam_minus_one = None
g_displ_cam_latest = None
g_displ_hist_minus_two = None
g_displ_hist_minus_one = None
g_displ_hist_latest = None
g_displ_text_start = None
g_displ_text_minus_two = None
g_displ_text_minus_one = None
g_displ_text_latest = None
g_pashr_dict = {'utc_times':[],'gnss_qs':[],'imu_qs':[]}
def sig_handler(signum, frame):
if signum == signal.SIGINT:
logging.info(f"Signal {signum} has been received. Trying not to shut down...")
return
else:
try:
logging.warning(f"Signal {signum} has been received. Shutting down.")
except:
print(f"LOGGER NOT FOUND! Signal {signum} has been received. Shutting down.")
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)
signal.signal(signal.SIGTERM, sig_handler)
class App_States(Enum):
"""
Describes what is currently happening outside the program:
- No data acquisition yet (startup)
- Data acquisition in progress
- Data acquisition stopped and waiting for next one (in between)
"""
STARTUP = 0
ACQUISITION = 1
INBETWEEN = 2
g_app_state = App_States.STARTUP
def switch_tk_state(new_state):
print("should change tk state here")
print(new_state)
def update_tk_window():
time.sleep(1)
def update_pid(old_pid, cmd, username = g_user_name):
"""
If the pid is None, checks for a matching process.
Else checks if the process is still running.
Returns: pid if valid. Else: None
"""
if old_pid is None:
for proc in psutil.process_iter(['pid', 'name', 'username', 'cmdline']):
if proc.info['username'] == username and cmd in ' '.join(proc.info['cmdline']):
return proc.info['pid']
return None #if none of the currently runnig processes contains the cmd string
elif psutil.pid_exists(old_pid):
return old_pid
else:
return None
def update_pid_dict(dict_of_pids):
"""
The input dict() should be something like:
{GLOBAL_ACQUISITION_CMD : None, GLOBAL_CAMERA_CMD : 12345}
Iterate over keys, update their current value and return a dict
"""
result_dict = dict_of_pids.copy()
for key in result_dict.keys():
result_dict[key] = update_pid(dict_of_pids[key], key)
return result_dict
def read_acq_log():
"""
Reads the log left by the acquisition software, to interpret the raw images correctly (12/10/8 bits/channel)
"""
global g_data_dir_name
pattern = r"{.*}"
try:
log_fname = [fn for fn in os.listdir() if ".log" in fn and g_data_dir_name in fn][0]
with open(log_fname, "r") as acq_log:
for line in acq_log.readlines():
if "Camera_parameters" in line:
x = re.findall(pattern, line)[0]
return literal_eval(x)
except: return None
def read_raw_12p(data_chunk):
data = np.frombuffer(data_chunk, dtype=np.uint8)
byte_0, byte_1, byte_2 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T
pixel_0 = byte_0 + ((byte_1 % 16) << 8)
pixel_1 = (byte_1 >> 4) + (byte_2 << 4)
return np.reshape(
|
np.concatenate((pixel_0[:, None], pixel_1[:, None]), axis=1)
|
numpy.concatenate
|
import numpy as np
arrA = np.random.rand(3)
arrB = np.random.rand(3)
arrC = np.random.rand(3,3)
arrD = np.random.rand(3,4)
arrE = np.random.rand(3,3,3)
arrF = np.random.rand(3,4,5)
arrG = np.random.rand(3,3,3,3)
arrH = np.random.rand(3,4,5,6)
# transpose
np.einsum('ij->ji', arrD)
np.einsum('ijk->ikj', arrF)
np.einsum('ijk->jik', arrF)
np.einsum('ijk->jki', arrF)
np.einsum('ijk->kij', arrF)
np.einsum('ijk->kji', arrF)
np.einsum('ijkl->ijlk', arrG)
np.einsum('ijkl->ikjl', arrG)
np.einsum('ijkl->iklj', arrG)
np.einsum('ijkl->iljk', arrG)
np.einsum('ijkl->iljk', arrG)
np.einsum('ijkl->jikl', arrG)
np.einsum('ijkl->jilk', arrG)
np.einsum('ijkl->jkil', arrG)
np.einsum('ijkl->jkli', arrG)
np.einsum('ijkl->jlik', arrG)
np.einsum('ijkl->jlki', arrG)
np.einsum('ijkl->kijl', arrG)
np.einsum('ijkl->kilj', arrG)
np.einsum('ijkl->kjil', arrG)
np.einsum('ijkl->kjli', arrG)
np.einsum('ijkl->klij', arrG)
np.einsum('ijkl->klji', arrG)
np.einsum('ijkl->lijk', arrG)
np.einsum('ijkl->likj', arrG)
np.einsum('ijkl->ljik', arrG)
np.einsum('ijkl->ljki', arrG)
np.einsum('ijkl->lkij', arrG)
np.einsum('ijkl->lkji', arrG)
# sum(including colSum, rowSum, modeSum)
np.einsum('i->', arrA)
np.einsum('ij->', arrD)
np.einsum('ij->i', arrD)
np.einsum('ij->j', arrD)
np.einsum('ijk->', arrF)
np.einsum('ijk->i', arrF)
np.einsum('ijk->j', arrF)
np.einsum('ijk->k', arrF)
np.einsum('ijk->ij', arrF)
np.einsum('ijk->jk', arrF)
np.einsum('ijk->ik', arrF)
np.einsum('ijkl->i', arrG)
np.einsum('ijkl->j', arrG)
np.einsum('ijkl->k', arrG)
np.einsum('ijkl->l', arrG)
np.einsum('ijkl->ij', arrG)
np.einsum('ijkl->ik', arrG)
np.einsum('ijkl->il', arrG)
np.einsum('ijkl->ji', arrG)
np.einsum('ijkl->jk', arrG)
np.einsum('ijkl->jl', arrG)
np.einsum('ijkl->ki', arrG)
np.einsum('ijkl->kj', arrG)
np.einsum('ijkl->kl', arrG)
np.einsum('ijkl->li', arrG)
np.einsum('ijkl->lj', arrG)
np.einsum('ijkl->lk', arrG)
np.einsum('ijkl->ijk', arrG)
np.einsum('ijkl->ijl', arrG)
np.einsum('ijkl->ikj', arrG)
np.einsum('ijkl->ikl', arrG)
np.einsum('ijkl->ilj', arrG)
np.einsum('ijkl->ilk', arrG)
np.einsum('ijkl->jik', arrG)
np.einsum('ijkl->jil', arrG)
np.einsum('ijkl->jki', arrG)
np.einsum('ijkl->jkl', arrG)
np.einsum('ijkl->jli', arrG)
np.einsum('ijkl->jlk', arrG)
np.einsum('ijkl->kij', arrG)
np.einsum('ijkl->kil', arrG)
np.einsum('ijkl->kji', arrG)
np.einsum('ijkl->kjl', arrG)
np.einsum('ijkl->kli', arrG)
np.einsum('ijkl->klj', arrG)
np.einsum('ijkl->lij', arrG)
np.einsum('ijkl->lik', arrG)
np.einsum('ijkl->lji', arrG)
np.einsum('ijkl->ljk', arrG)
np.einsum('ijkl->lki', arrG)
np.einsum('ijkl->lkj', arrG)
# multiply (diagonal elements)
np.einsum('ii->i', arrC)
np.einsum('iii->i', arrE)
np.einsum('iiii->i', arrG)
# sum + multiply(Hadamard Product)
np.einsum('i,i->i', arrA, arrA)
np.einsum('ij,ij->ij', arrD, arrD)
np.einsum('ijk,ijk->ijk', arrF, arrF)
np.einsum('ijkl,ijkl->ijkl', arrG, arrG)
# sum + multiply(Sum of squares, Frobenius norm if you take sqrt)
np.einsum('i,i->', arrA, arrA)
np.einsum('ij,ij->', arrD, arrD)
np.einsum('ijk,ijk->', arrF, arrF)
np.einsum('ijkl,ijkl->', arrG, arrG)
# multiply + sum + transpose
#(Include different ones, adamantine products, inner products,
# matrix products, and contraction products)
np.einsum('ij,jk->', arrC, arrD)
np.einsum('ij,jk->i', arrC, arrD)
np.einsum('ij,jk->j', arrC, arrD)
np.einsum('ij,jk->k', arrC, arrD)
np.einsum('ij,jk->ij', arrC, arrD)
np.einsum('ij,jk->ji', arrC, arrD)
np.einsum('ij,jk->jk', arrC, arrD)
np.einsum('ij,jk->kj', arrC, arrD)
np.einsum('ij,jk->ik', arrC, arrD)
np.einsum('ij,jk->ki', arrC, arrD)
np.einsum('ij,jk->ijk', arrC, arrD)
np.einsum('ij,jk->ikj', arrC, arrD)
np.einsum('ij,jk->jik', arrC, arrD)
np.einsum('ij,jk->jki', arrC, arrD)
np.einsum('ij,jk->kij', arrC, arrD)
np.einsum('ij,jk->kji', arrC, arrD)
np.einsum('ij,ijk->', arrC, arrE)
np.einsum('ij,ijk->i', arrC, arrE)
np.einsum('ij,ijk->j', arrC, arrE)
np.einsum('ij,ijk->k', arrC, arrE)
np.einsum('ij,ijk->ij', arrC, arrE)
np.einsum('ij,ijk->ji', arrC, arrE)
np.einsum('ij,ijk->jk', arrC, arrE)
np.einsum('ij,ijk->kj', arrC, arrE)
np.einsum('ij,ijk->ik', arrC, arrE)
np.einsum('ij,ijk->ki', arrC, arrE)
np.einsum('ij,ijk->ijk', arrC, arrE)
np.einsum('ijk,ijkl->', arrE, arrG)
np.einsum('ijk,ijkl->i', arrE, arrG)
np.einsum('ijk,ijkl->j', arrE, arrG)
np.einsum('ijk,ijkl->k', arrE, arrG)
np.einsum('ijk,ijkl->l', arrE, arrG)
np.einsum('ijk,ijkl->ij', arrE, arrG)
np.einsum('ijk,ijkl->ji', arrE, arrG)
np.einsum('ijk,ijkl->ik', arrE, arrG)
np.einsum('ijk,ijkl->ki', arrE, arrG)
np.einsum('ijk,ijkl->il', arrE, arrG)
np.einsum('ijk,ijkl->li', arrE, arrG)
np.einsum('ijk,ijkl->jk', arrE, arrG)
np.einsum('ijk,ijkl->kj', arrE, arrG)
np.einsum('ijk,ijkl->jl', arrE, arrG)
np.einsum('ijk,ijkl->lj', arrE, arrG)
np.einsum('ijk,ijkl->kl', arrE, arrG)
np.einsum('ijk,ijkl->lk', arrE, arrG)
np.einsum('ijk,ijkl->ijk', arrE, arrG)
np.einsum('ijk,ijkl->ikj', arrE, arrG)
np.einsum('ijk,ijkl->jik', arrE, arrG)
np.einsum('ijk,ijkl->jki', arrE, arrG)
np.einsum('ijk,ijkl->kij', arrE, arrG)
np.einsum('ijk,ijkl->kji', arrE, arrG)
np.einsum('ijk,ijkl->ijl', arrE, arrG)
np.einsum('ijk,ijkl->ilj', arrE, arrG)
np.einsum('ijk,ijkl->jil', arrE, arrG)
np.einsum('ijk,ijkl->jli', arrE, arrG)
np.einsum('ijk,ijkl->lij', arrE, arrG)
np.einsum('ijk,ijkl->lji', arrE, arrG)
np.einsum('ijk,ijkl->ikl', arrE, arrG)
np.einsum('ijk,ijkl->ilk', arrE, arrG)
np.einsum('ijk,ijkl->kil', arrE, arrG)
np.einsum('ijk,ijkl->kli', arrE, arrG)
np.einsum('ijk,ijkl->lik', arrE, arrG)
|
np.einsum('ijk,ijkl->lki', arrE, arrG)
|
numpy.einsum
|
def LSTM():
print('Sentiment Analysis 시작')
import json
import os
from pprint import pprint
from konlpy.tag import Okt
import nltk
import numpy as np
import pandas as pd
import time
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, LSTM, Embedding
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from keras.models import load_model
from datetime import timedelta,date
def read_data(filename):
with open(filename, 'r') as f:
# tab 별로 자른다
data = [line.split('\t') for line in f.read().splitlines()]
# txt 파일의 헤더(id document label)는 제외하기
data = data[1:]
return data
train_data = read_data('ratings_train.txt')
test_data = read_data('ratings_test.txt')
okt = Okt()
def tokenize(doc):
# 토큰과 근어 사이에 '/'로 구부해줍니다
# norm은 정규화, stem은 근어로 표시하기를 나타냄
return ['/'.join(t) for t in okt.pos(doc, norm=True, stem=True)]
# 매번 반복하지 않기 위해 json파일이 있으면 읽어서 사용
if os.path.isfile('train_docs.json'):
with open('train_docs.json') as f:
train_docs = json.load(f)
with open('test_docs.json') as f:
test_docs = json.load(f)
else:
# row[1]에 리뷰가, row[2]에 부정or긍정이 담겨있음
train_docs = [(tokenize(row[1]), row[2]) for row in train_data]
test_docs = [(tokenize(row[1]), row[2]) for row in test_data]
# JSON 파일로 저장
with open('train_docs.json', 'w', encoding="utf-8") as make_file:
json.dump(train_docs, make_file, ensure_ascii=False, indent="\t")
with open('test_docs.json', 'w', encoding="utf-8") as make_file:
json.dump(test_docs, make_file, ensure_ascii=False, indent="\t")
tokens = [t for d in train_docs for t in d[0]]
text = nltk.Text(tokens, name='NMSC')
# 모든 문장을 학습할 순 없으니 가장 많이 등장하는 2500개의 토큰을 사용해서 벡터화
# RAM이 높다면 10000까지 해봅시다
selected_words = [f[0] for f in text.vocab().most_common(3000)]
# selected_words 안에 있는 단어들이 doc안에 있는지 확인해서 반환
# 문서집합에서 단어 토큰을 생성하고 각 단어의 수를 세어 BOW 인코딩한 벡터를 만듭니다
# BOW(Back Of Words)
def term_frequency(doc):
return [doc.count(word) for word in selected_words]
# token_list : 0 or 1(긍정,부정)으로 이루어져있으므로 token_list만 확인(d, _)
# train_docs 안에 있는 toekn중 selected_words에 들어있는 단어만 포함
# train_docs는 2차원 list들([[영화 리뷰], 긍정or부정])로 구성된 3차원 list
# train_x는 0과 1로 이루어진 5천개의 list가 15만개 존재(2차원 list)
train_x = [term_frequency(d) for d, _ in train_docs]
test_x = [term_frequency(d) for d, _ in test_docs]
train_y = [c for _, c in train_docs]
test_y = [c for _, c in test_docs]
# 데이터가 문자열이니 input을 위해 float으로 바꿔줍니다
# 15만개의 데이터가 각각 2500개의 0과 1로 존재
x_train = np.asarray(train_x).astype('float32')
x_test = np.asarray(test_x).astype('float32')
y_train = np.asarray(train_y).astype('float32')
y_test =
|
np.asarray(test_y)
|
numpy.asarray
|
import sys
import numpy as np
import networkx as nx
from itertools import combinations
import numba
EIGEN=2
N=19
EPS= 0.000001
EXPECTED_CLIQUE=57
mat=np.empty((N,N), 'uint8')
mat_inv=np.empty_like(mat)
vecs_prod=np.empty((1<<N,N))
verts=np.empty_like(vecs_prod)
vec_j=np.ones(N)
#seeds = np.asarray((
#(1,0,1)+(0,)*11+((1,)*4+(0,)*8)*3,
#(1,0,1)+(0,)*11+((0,)*4+(1,)*4+(0,)*4)*3,
#(1,0,1)+(0,)*11+((0,)*8+(1,)*4)*3#,
#(0,)*8+(1,)*4+(0,)*4+(1,)*4+(0,)*4+(1,)*4+(0,)*8,
#(0,)*4+(1,)*4+(0,)*4+(1,)*4+(0,)*16+(1,)*4,
#(1,)*4+(0,)*16+(1,)*4+(0,)*4+(1,)*4+(0,)*4
#), 'uint8')
def stringtomat(s):
I=np.identity(N)
A=nx.to_numpy_array(nx.from_graph6_bytes(s))
mat=EIGEN*I-A
return np.linalg.inv(mat)
@numba.jit(nopython=True, fastmath=True)
def checkVert(v, mat_inv):
#print(v)
prod=
|
np.dot(mat_inv, v)
|
numpy.dot
|
# -*- coding: utf-8 -*-
"""
Testing the stats and auxiliary functions
"""
import numpy as np
import pyrft as pr
def test_mvtstat():
""" Testing the mvtstat function """
for i in np.arange(2):
if i == 0:
dim = (50,)
elif i == 1:
dim = (50,50)
nsubj = 20
overall_dim = dim + (nsubj,)
noise = np.random.randn(*overall_dim)
tstat, xbar, std_dev = pr.mvtstat(noise)
assert tstat.shape == dim
assert xbar.shape == dim
assert std_dev.shape == dim
assert np.sum(std_dev > 0) == np.prod(dim)
def test_contrast_tstats():
""" Testing the contrast_tstats function """
# Note that the function always runs contrast_error_checking and
# constrast_tstats_noerrorchecking, so these functions are automatically tested
# via running it
# Need to include a 1D example
nsubj = 30
dim = (10,10)
for i in np.arange(3):
if i == 0:
categ = np.zeros(nsubj)
C = np.array(1)
elif i == 1:
categ = np.random.binomial(1, 0.4, size = nsubj)
C = np.array((1,-1))
elif i == 2:
categ = np.random.multinomial(2, [1/3,1/3,1/3], size = nsubj)[:,1]
C = np.array([[1,-1,0],[0,1,-1]]);
X = pr.group_design(categ); lat_data = pr.wfield(dim,nsubj)
tstat, residuals = pr.contrast_tstats(lat_data, X, C)
assert isinstance(tstat, pr.classes.Field)
assert tstat.D == len(dim)
assert tstat.masksize == dim
if len(C.shape) < 2:
C =
|
np.array([C])
|
numpy.array
|
from pyuvdata import UVCal
import numpy as np
import copy
from pyuvdata import utils as uvutils
def blank_uvcal_from_uvdata(uvdata):
"""initialize UVCal object with same times, antennas, and frequencies as uvdata.
Parameters
----------
uvdata: UVData object
UVData object that you wish to generate a blanck UVCal object from.
Returns
-------
uvcal: UVCal object
UVCal object with all flags set to False
and all gains set to unity with same antennas, freqs, jones, and times
as uvdata.
"""
uvcal = UVCal()
uvcal.Nfreqs = uvdata.Nfreqs
uvcal.Njones = uvdata.Npols
uvcal.Ntimes = uvdata.Ntimes
uvcal.Nspws = uvdata.Nspws
uvcal.history = ""
uvcal.Nspws = uvdata.Nspws
uvcal.telescope_name = uvdata.telescope_name
uvcal.telescope_location = uvdata.telescope_location
uvcal.Nants_data = uvdata.Nants_data
uvcal.Nants_telescope = uvdata.Nants_telescope
uvcal.ant_array = np.asarray(list(set(uvdata.ant_1_array).union(set(uvdata.ant_2_array))))
uvcal.antenna_names = uvdata.antenna_names
uvcal.antenna_numbers = uvdata.antenna_numbers
uvcal.antenna_positions = uvdata.antenna_positions
uvcal.spw_array = uvdata.spw_array
uvcal.freq_array = uvdata.freq_array
uvcal.jones_array = uvdata.polarization_array
uvcal.time_array =
|
np.unique(uvdata.time_array)
|
numpy.unique
|
import math
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as sg
from astropy.io import fits
from scipy.interpolate import CubicSpline
from scipy.ndimage.filters import percentile_filter
from scipy.signal import convolve2d
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.neighbors import KernelDensity
from collections import Counter
__author__ = 'nate'
def cantor(a, b):
"""Cantor pairing function, used to give unique int name to each observation"""
a = int(a)
b = int(b)
return (a + b) * (a + b + 1) / 2 + b
def decantor(z):
"""Inverse Cantor"""
w = math.floor(math.sqrt(8 * z + 1) / 2 - 0.5)
t = ((w + 1) * w) / 2
y = z - t
x = w - y
return int(x), int(y)
class APFRunString(str):
def __add__(self, x):
# If we're trying to add anything but an int, do normal string
# addition.
if type(x) is not int:
return str.__add__(self, x)
res = ''
i = len(self)-1
while x > 0:
# Get the ASCII code of the i-th letter and "normalize" it
# so that a is 0, b is 1, etc.
# If we are at the end of the string, make it -1, so that if we
# need to "add" 1, we get a.
if i >= 0:
c = ord(self[i]) - 97
else:
c = -1
# Calculate the number of positions by which the letter is to be
# "rotated".
pos = x % 26
# Calculate x for the next letter, add a "carry" if needed.
x //= 26
if c + pos >= 26:
x += 1
# Do the "rotation".
c = (c + pos) % 26
# Add the letter at the beginning of the string.
res = chr(c + 97) + res
i -= 1
# If we didn't reach the end of the string, add the rest of the string back.
if i >= 0:
res = self[:i+1] + res
return APFRunString(res)
def filterbypercentile(arr, top, bottom):
topp = np.percentile(arr, top)
bottomp = np.percentile(arr, bottom)
shortened = [bottomp < x < topp for x in arr]
return shortened
def reject_outliers(data, m=2.): # this must be modified so that it does a biased above outlier rejection
p = 50 # PERCENTILE TO USE for split
perc = np.percentile(data, p)
upper_half = data[data > perc]
d = np.abs(data - np.median(upper_half))
d2 = np.abs(upper_half - np.median(upper_half))
mdev = np.median(d2)
s = d / mdev if mdev else 0.
return s < m
def gauss(x, *p):
"""
Simple Gaussian function
:param x: ind var
:param p: coefficients A, mu, sigma
:return: numpy array gauss(x)
"""
A, mu, sigma = p
return A * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2))
def csq_red(model, data, dof=3.):
"""
Computed a reduced cui square fit of data to model
Assumes model sums to one
:param model: expectation value of model
:param data: observed data
:param dof: number of degrees of freedom
:return: Reduced chi square (float)
"""
total = np.sum(data)
csq = np.power(np.array(model) * total - np.array(data), 2)
error = np.array(data)
error[np.where(error < 9)] = 9
csq = csq / error
csq = np.sum(csq)
csq /= len(data) - dof
return csq
def minimal_csq(coeffs, data, dof=3., n_it=20, min_thresh=0.005):
"""
Does a binary search to find a minimal chi square.
:param coeffs: gaussian fit coefficients
:param data: column to compute fit
:param dof: number of DOF for reduced chi square computation
:param n_it: number of iterations of Binary Search
:param min_thresh: difference in BS iterations deemed "sufficiently close"
:return: minimal value of chi square given n_it and min_thresh
"""
# TODO - anonymize from specific function type
indices = np.arange(len(data))
ub = coeffs[1] + 0.9
lb = coeffs[1] - 1
ctr = n_it
csq_past = 100
csq_now = 0
quick_csq = lambda x: csq_red(gauss(indices, *[coeffs[0], x, coeffs[2]]), data, dof=dof)
while ctr and (csq_past - csq_now > min_thresh or ctr == n_it - 1):
csq_past = csq_now
midpoint = (ub + lb) / 2.
l_midpoint = (lb + midpoint) / 2.
r_midpoint = (ub + midpoint) / 2.
csq_l = quick_csq(l_midpoint)
csq_r = quick_csq(r_midpoint)
if csq_r < csq_l:
lb = midpoint
csq_now = csq_r
else:
ub = midpoint
csq_now = csq_l
ctr -= 1
midpoint = (ub + lb) / 2.
return csq_red(gauss(indices, *[coeffs[0], midpoint, coeffs[2]]), data, dof=dof)
def makegaussian(size, fwhm=3, center=None):
"""
Adapted from <NAME> on GitHub
Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
def find_nearest(array, value):
"""Finds nearest value in array"""
idx = (np.abs(array - value)).argmin()
return array[idx]
def pythag_dist_kernel(size=3):
s = size + size + 1
offsetx = np.repeat(np.reshape(range(s), (s, 1)), s, axis=1)
offsety = np.transpose(offsetx)
return np.square(offsetx - size) + np.square(offsety - size)
def get_header_info(rolist, info=['RA'], loc='/mir3/iodfitsdb'):
"""
Get header info for HIRES
:param rolist:
:param info:
:param loc:
:return:
"""
targ = 'Target'
info.insert(0, targ)
out = []
print (info)
for i in rolist:
t = fits.open(loc + '/rj' + str(i[0]) + '.' + str(i[1]) + '.fits')[0]
prt = [t.header['TARGNAME'].strip(' ')]
for j in info[1:]:
prt.append(t.header[j])
print (prt)
out.append(prt)
return out
def conv(arr, kernel=[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]):
return convolve2d(arr, kernel)
# ------------------------------------------------------------------------------
# Utilities for fitting and interpolation
# ------------------------------------------------------------------------------
def spline_interpolate(arr, nseg=20, percentile=50):
"""
Returns the cubic spline describing data, split into nseg segments.
:param arr: input array
:param nseg: number of same-size segments
:param percentile: what percentile of each segment to use as midpt y value?
:return: scipy.interpolate.CubicSpline
"""
l = len(arr)
midpts = [np.median(arr[:int(l // nseg // 2)])]
x = [0]
for i in range(nseg):
x += [int((0.5+i)*(l/nseg))]
midpts += [np.percentile(arr[i * (l // nseg):(i + 1) * (l // nseg)], percentile)]
x += [l-1]
midpts += [np.median(arr[-int(l // nseg / 2):])]
return CubicSpline(x,midpts)
def poly_interpolator(arr,degree=4, nseg=5,percentile=95):
"""
Returns the function describing a polynomial fitting the data, split into nseg segments
:param arr: input array
:param degree: degree of polynpomial fit
:param nseg: number of segments of equal length to use.
:param percentile:
:return:
"""
l = len(arr)
midpts = [np.median(arr[:int(l // nseg // 2)])]
x = [0]
for i in range(nseg):
x += [int((0.5 + i) * (l // nseg))]
midpts += [np.percentile(arr[i * (l // nseg):(i + 1) * (l // nseg)], percentile)]
x += [l - 1]
midpts += [np.median(arr[-int(l // nseg // 2):])]
return np.poly1d(np.polyfit(x, midpts, deg=degree))
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
# This is the most effective continuum fit
def continuum_fit(arr, percentile_kernel = 501,savitzky_kernel = 2001, savitzky_degree=4, perc=50):
# This fixes the singularities
# Value of 500 chosen arbitrarily - should not have too much of an effect
fixval = np.max([np.abs(np.min(arr) * 2),500.])
fix = arr + fixval
pcf = percentile_filter(fix, perc, size=percentile_kernel)
sav = savitzky_golay(pcf, savitzky_kernel, savitzky_degree)
return fix/(sav/np.max(sav)) - fixval
def deblaze(arr, method = 'savitzky', percentile_kernel = 101, savitzky_kernel=2001, savitzky_degree=4, perc=50):
if method == 'savitzky':
return continuum_fit(arr, percentile_kernel=percentile_kernel, savitzky_kernel=savitzky_kernel,
savitzky_degree=savitzky_degree, perc=perc)
elif method == 'meanshift':
median_of_array = np.median((arr) + 1000.)
bandwidth = estimate_bandwidth(arr[:, np.newaxis], quantile=0.1)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(arr[:, np.newaxis])
# xvals = np.arange(4608)
test = np.array(arr)
labels = ms.labels_
# Replace the missing values (not at maximal cluster) with median of array values in cluster in original array
test[labels != 0] = np.median(test[labels == 0])
test[test == 0] = 1
med_test = sg.medfilt(test, kernel_size=101)
return arr / med_test * median_of_array
elif method == 'percentile':
# This code has been adapted - the percentile kernel specified is used to Lower Bound only
fixval = np.max([np.abs(np.min(arr) * 2), 500.])
fix = arr + fixval
pcf = np.max(np.array([percentile_filter(fix, perc, size=percentile_kernel),
percentile_filter(fix, perc, size=101)]), axis=0)
return fix / (pcf / np.mean(pcf)) - fixval
else:
raise KeyError('The deblaze method you have passed is not implemented. Please pick from savitzky, bstar, and meanshift')
# ------------------------------------------------------------------------------
# Utilities for laser search
# ------------------------------------------------------------------------------
def getpercentile(order, perc, method='meanshift', kernel_bandwidth=100, kernel='epanechnikov'):
"""
Returns value of 'perc'th percentile
(usually 75th) count value in 'order'
:param order: Spectral order to compute percentile on
:param perc: What(th) %ile to compute.
"""
#TODO - add support for piecewise thresholds
if method == 'percentile':
nsplits = 1 # Compute percentile piecewise - I have not been
maximum_thresh = 0
l=len(order)
inc = l / nsplits
for i in range(nsplits):
sub = order[i * inc:(i + 1) * inc]
percentile = np.percentile(sub, perc)
if maximum_thresh < percentile:
maximum_thresh = percentile
return maximum_thresh
elif method == 'kde':
kde = KernelDensity(kernel=kernel, bandwidth=kernel_bandwidth).fit(order)
elif method == 'meanshift':
order_subset = order[::10,np.newaxis]
try:
bandwidth = estimate_bandwidth(order_subset, quantile=0.1)
except ValueError:
# Replace the NaN with enarest value
order_to_estimate = order_subset
ind = np.where(~np.isinf(order_to_estimate))[0]
first, last = ind[0], ind[-1]
order_to_estimate[:first] = order_to_estimate[first]
order_to_estimate[last + 1:] = order_to_estimate[last]
bandwidth = estimate_bandwidth(order_to_estimate, quantile=0.1)
# print ('Bandwidth is {0}'.format(bandwidth))
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(order_subset)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
#for k in range(n_clusters_):
# my_members = labels == k
# print "cluster {0}: {1}".format(k, order[:, np.newaxis][my_members, 0])
#return cluster_centers[0][0]
# print(cluster_centers)
# TODO Determine why there is a index error ocurring here - should there be more than 3 clusters
# or is this normal behavior?
label_counter = Counter(labels)
top_labels = filter(lambda x: x[1] > 100, label_counter.most_common())
if top_labels.__len__() == 0:
top_labels = filter(lambda x: x[1] > 50, label_counter.most_common())
return np.max(map(lambda x: cluster_centers[x[0]][0],top_labels))
else:
raise KeyError
def contiguous_regions(condition):
"""
Borrowed from <NAME> on StackOverflow
Finds contiguous True regions of the boolean array 'condition'. Returns
a 2D array where the first column is the start index of the region and the
second column is the end index.
"""
# Find the indices of changes in 'condition'
d = np.diff(condition)
idx, = d.nonzero()
# We need to start things after the change in 'condition'. Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def finddeviates(order, thresh, npix=3):
"""returns a list of deviation indices [start,stop]."""
out = []
#plt.plot(order)
#plt.show()
#print(order, thresh)
#plt.cla()
for start, stop in contiguous_regions(order > thresh):
diff = stop - start
if diff >= npix:
out.append([diff, start])
return out
def findthresh(order, npix=3.0, method='full_deviation'):
"""
Computes threshold using median absolute deviation (MAD) method
note here that 'order' passed to this function is generally:
spectral order - 75th percentile count value of spectral order
That is, the sum of 3 consecutive points in order being above 0
is the same as sum of 3 being above 75th %ile in spectral order.
Returns the Median Absolute (positive) Deviation of npix (usually 3)
pixel bins above the percentile set in findhigher.
"""
# Number of pixels to demand consecutively deviant. 3 is appropriate for HIRES.
# Convolution as a way of binning by 3 pixels to see groups that exceed
if method == '3pix':
binned_ord = np.convolve(np.ones(npix) / npix, order, 'valid')
deviations = finddeviates(binned_ord, 0, npix)
uppies = []
for j in deviations:
for i in range(j[0]):
uppies.append(binned_ord[j[1] + i])
elif method == 'full_deviation':
deviations = finddeviates(order, 0, npix)
uppies = []
for j in deviations:
uppies.append(
|
np.median(order[j[1]:j[1] + j[0]])
|
numpy.median
|
#!/usr/bin/env python
"""
# > Script for measuring quantitative performances in terms of
# - Underwater Image Quality Measure (UIQM)
# - Structural Similarity Metric (SSIM)
# - Peak Signal to Noise Ratio (PSNR)
#
# Maintainer: Jahid (email: <EMAIL>)
# Interactive Robotics and Vision Lab (http://irvlab.cs.umn.edu/)
# Any part of this repo can be used for academic and educational purposes only
"""
## python libs
import os
import ntpath
import numpy as np
from scipy import misc
## local libs
from utils.data_utils import getPaths
from utils.uqim_utils import getUIQM
from utils.ssm_psnr_utils import getSSIM, getPSNR
## data paths
REAL_im_dir = 'data/test/A/' # real/input im-dir with {f.ext}
GEN_im_dir = "data/output/" # generated im-dir with {f_gen.ext}
GTr_im_dir = 'data/test/GTr_A/' # ground truth im-dir with {f.ext}
REAL_paths, GEN_paths = getPaths(REAL_im_dir), getPaths(GEN_im_dir)
## mesures uqim for all images in a directory
def measure_UIQMs(dir_name):
paths = getPaths(dir_name)
uqims = []
for img_path in paths:
im = misc.imread(img_path)
uqims.append(getUIQM(im))
return np.array(uqims)
## compares avg ssim and psnr
def measure_SSIM_PSNRs(GT_dir, Gen_dir):
"""
Assumes:
* GT_dir contain ground-truths {filename.ext}
* Gen_dir contain generated images {filename_gen.png}
* Images are of same-size
"""
GT_paths, Gen_paths = getPaths(GT_dir), getPaths(Gen_dir)
ssims, psnrs = [], []
for img_path in GT_paths:
name_split = ntpath.basename(img_path).split('.')
gen_path = os.path.join(Gen_dir, name_split[0]+'_gen.png') #+name_split[1])
if (gen_path in Gen_paths):
r_im = misc.imread(img_path)
g_im = misc.imread(gen_path)
assert (r_im.shape==g_im.shape), "The images should be of same-size"
ssim = getSSIM(r_im, g_im)
psnr = getPSNR(r_im, g_im)
#print ("{0}, {1}: {2}".format(img_path,gen_path, ssim))
#print ("{0}, {1}: {2}".format(img_path,gen_path, psnr))
ssims.append(ssim)
psnrs.append(psnr)
return np.array(ssims), np.array(psnrs)
### compute SSIM and PSNR
SSIM_measures, PSNR_measures = measure_SSIM_PSNRs(GTr_im_dir, GEN_im_dir)
print ("SSIM >> Mean: {0} std: {1}".format(np.mean(SSIM_measures),
|
np.std(SSIM_measures)
|
numpy.std
|
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
import torch
from openmmforcefields.generators import SystemGenerator
from simtk import openmm, unit
from simtk.openmm.app import Simulation
from simtk.unit import Quantity
from espaloma.units import *
import espaloma as esp
# =============================================================================
# CONSTANTS
# =============================================================================
# simulation specs
TEMPERATURE = 350 * unit.kelvin
STEP_SIZE = 1.0 * unit.femtosecond
COLLISION_RATE = 1.0 / unit.picosecond
EPSILON_MIN = 0.05 * unit.kilojoules_per_mole
# =============================================================================
# MODULE FUNCTIONS
# =============================================================================
def add_nonbonded_force(
g,
forcefield="gaff-1.81",
subtract_charges=False,
):
# parameterize topology
topology = g.mol.to_topology().to_openmm()
generator = SystemGenerator(
small_molecule_forcefield=forcefield,
molecules=[g.mol],
forcefield_kwargs={"constraints": None, "removeCMMotion": False},
)
# create openmm system
system = generator.create_system(
topology,
)
# use langevin integrator, although it's not super useful here
integrator = openmm.LangevinIntegrator(
TEMPERATURE, COLLISION_RATE, STEP_SIZE
)
# create simulation
simulation = Simulation(
topology=topology, system=system, integrator=integrator
)
# get forces
forces = list(system.getForces())
# loop through forces
for force in forces:
name = force.__class__.__name__
# turn off angle
if "Angle" in name:
for idx in range(force.getNumAngles()):
id1, id2, id3, angle, k = force.getAngleParameters(idx)
force.setAngleParameters(idx, id1, id2, id3, angle, 0.0)
force.updateParametersInContext(simulation.context)
elif "Bond" in name:
for idx in range(force.getNumBonds()):
id1, id2, length, k = force.getBondParameters(idx)
force.setBondParameters(
idx,
id1,
id2,
length,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Torsion" in name:
for idx in range(force.getNumTorsions()):
(
id1,
id2,
id3,
id4,
periodicity,
phase,
k,
) = force.getTorsionParameters(idx)
force.setTorsionParameters(
idx,
id1,
id2,
id3,
id4,
periodicity,
phase,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Nonbonded" in name:
if subtract_charges:
for idx in range(force.getNumParticles()):
q, sigma, epsilon = force.getParticleParameters(idx)
force.setParticleParameters(idx, 0.0, sigma, epsilon)
for idx in range(force.getNumExceptions()):
idx0, idx1, q, sigma, epsilon = force.getExceptionParameters(idx)
force.setExceptionParameters(idx, idx0, idx1, 0.0, sigma, epsilon)
force.updateParametersInContext(simulation.context)
# the snapshots
xs = (
Quantity(
g.nodes["n1"].data["xyz"].detach().numpy(),
esp.units.DISTANCE_UNIT,
)
.value_in_unit(unit.nanometer)
.transpose((1, 0, 2))
)
# loop through the snapshots
energies = []
derivatives = []
for x in xs:
simulation.context.setPositions(x)
state = simulation.context.getState(
getEnergy=True,
getParameters=True,
getForces=True,
)
energy = state.getPotentialEnergy().value_in_unit(
esp.units.ENERGY_UNIT,
)
derivative = state.getForces(asNumpy=True).value_in_unit(
esp.units.FORCE_UNIT,
)
energies.append(energy)
derivatives.append(derivative)
# put energies to a tensor
energies = torch.tensor(
energies,
dtype=torch.get_default_dtype(),
).flatten()[None, :]
derivatives = torch.tensor(
np.stack(derivatives, axis=1),
dtype=torch.get_default_dtype(),
)
# add the energies
g.heterograph.apply_nodes(
lambda node: {"u": node.data["u"] + energies},
ntype="g",
)
return g
def get_coulomb_force(
g,
forcefield="gaff-1.81",
):
# parameterize topology
topology = g.mol.to_topology().to_openmm()
generator = SystemGenerator(
small_molecule_forcefield=forcefield,
molecules=[g.mol],
forcefield_kwargs={"constraints": None, "removeCMMotion": False},
)
# create openmm system
system = generator.create_system(
topology,
)
# get forces
forces = list(system.getForces())
for force in forces:
name = force.__class__.__name__
if "Nonbonded" in name:
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
# use langevin integrator, although it's not super useful here
integrator = openmm.VerletIntegrator(0.0)
# create simulation
simulation = Simulation(
topology=topology, system=system, integrator=integrator
)
# the snapshots
xs = (
Quantity(
g.nodes["n1"].data["xyz"].detach().numpy(),
esp.units.DISTANCE_UNIT,
)
.value_in_unit(unit.nanometer)
.transpose((1, 0, 2))
)
# loop through the snapshots
energies = []
derivatives = []
for x in xs:
simulation.context.setPositions(x)
state = simulation.context.getState(
getEnergy=True,
getParameters=True,
getForces=True,
)
energy = state.getPotentialEnergy().value_in_unit(
esp.units.ENERGY_UNIT,
)
derivative = state.getForces(asNumpy=True).value_in_unit(
esp.units.FORCE_UNIT,
)
energies.append(energy)
derivatives.append(derivative)
# put energies to a tensor
energies = torch.tensor(
energies,
dtype=torch.get_default_dtype(),
).flatten()[None, :]
derivatives = torch.tensor(
np.stack(derivatives, axis=1),
dtype=torch.get_default_dtype(),
)
# loop through forces
forces = list(system.getForces())
for force in forces:
name = force.__class__.__name__
if "Nonbonded" in name:
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
for idx in range(force.getNumParticles()):
q, sigma, epsilon = force.getParticleParameters(idx)
force.setParticleParameters(idx, 0.0, sigma, epsilon)
for idx in range(force.getNumExceptions()):
idx0, idx1, q, sigma, epsilon = force.getExceptionParameters(idx)
force.setExceptionParameters(idx, idx0, idx1, 0.0, sigma, epsilon)
force.updateParametersInContext(simulation.context)
# the snapshots
xs = (
Quantity(
g.nodes["n1"].data["xyz"].detach().numpy(),
esp.units.DISTANCE_UNIT,
)
.value_in_unit(unit.nanometer)
.transpose((1, 0, 2))
)
# loop through the snapshots
new_energies = []
new_derivatives = []
for x in xs:
simulation.context.setPositions(x)
state = simulation.context.getState(
getEnergy=True,
getParameters=True,
getForces=True,
)
energy = state.getPotentialEnergy().value_in_unit(
esp.units.ENERGY_UNIT,
)
derivative = state.getForces(asNumpy=True).value_in_unit(
esp.units.FORCE_UNIT,
)
new_energies.append(energy)
new_derivatives.append(derivative)
# put energies to a tensor
new_energies = torch.tensor(
new_energies,
dtype=torch.get_default_dtype(),
).flatten()[None, :]
new_derivatives = torch.tensor(
np.stack(new_derivatives, axis=1),
dtype=torch.get_default_dtype(),
)
return energies - new_energies, derivatives - new_derivatives
def subtract_coulomb_force(
g,
forcefield="gaff-1.81",
):
delta_energies, delta_derivatives = get_coulomb_force(g, forcefield=forcefield)
# subtract the energies
g.heterograph.apply_nodes(
lambda node: {"u_ref": node.data["u_ref"] - delta_energies},
ntype="g",
)
if "u_ref_prime" in g.nodes["n1"]:
g.heterograph.apply_nodes(
lambda node: {
"u_ref_prime": node.data["u_ref_prime"] - delta_derivatives
},
ntype="n1",
)
return g
def subtract_nonbonded_force(
g,
forcefield="gaff-1.81",
subtract_charges=False,
):
# parameterize topology
topology = g.mol.to_topology().to_openmm()
generator = SystemGenerator(
small_molecule_forcefield=forcefield,
molecules=[g.mol],
forcefield_kwargs={"constraints": None, "removeCMMotion": False},
)
# create openmm system
system = generator.create_system(
topology,
)
# use langevin integrator, although it's not super useful here
integrator = openmm.LangevinIntegrator(
TEMPERATURE, COLLISION_RATE, STEP_SIZE
)
# create simulation
simulation = Simulation(
topology=topology, system=system, integrator=integrator
)
# get forces
forces = list(system.getForces())
# loop through forces
for force in forces:
name = force.__class__.__name__
# turn off angle
if "Angle" in name:
for idx in range(force.getNumAngles()):
id1, id2, id3, angle, k = force.getAngleParameters(idx)
force.setAngleParameters(idx, id1, id2, id3, angle, 0.0)
force.updateParametersInContext(simulation.context)
elif "Bond" in name:
for idx in range(force.getNumBonds()):
id1, id2, length, k = force.getBondParameters(idx)
force.setBondParameters(
idx,
id1,
id2,
length,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Torsion" in name:
for idx in range(force.getNumTorsions()):
(
id1,
id2,
id3,
id4,
periodicity,
phase,
k,
) = force.getTorsionParameters(idx)
force.setTorsionParameters(
idx,
id1,
id2,
id3,
id4,
periodicity,
phase,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Nonbonded" in name:
if subtract_charges:
for idx in range(force.getNumParticles()):
q, sigma, epsilon = force.getParticleParameters(idx)
force.setParticleParameters(idx, 0.0, sigma, epsilon)
for idx in range(force.getNumExceptions()):
idx0, idx1, q, sigma, epsilon = force.getExceptionParameters(idx)
force.setExceptionParameters(idx, idx0, idx1, 0.0, sigma, epsilon)
force.updateParametersInContext(simulation.context)
# the snapshots
xs = (
Quantity(
g.nodes["n1"].data["xyz"].detach().numpy(),
esp.units.DISTANCE_UNIT,
)
.value_in_unit(unit.nanometer)
.transpose((1, 0, 2))
)
# loop through the snapshots
energies = []
derivatives = []
for x in xs:
simulation.context.setPositions(x)
state = simulation.context.getState(
getEnergy=True,
getParameters=True,
getForces=True,
)
energy = state.getPotentialEnergy().value_in_unit(
esp.units.ENERGY_UNIT,
)
derivative = state.getForces(asNumpy=True).value_in_unit(
esp.units.FORCE_UNIT,
)
energies.append(energy)
derivatives.append(derivative)
# put energies to a tensor
energies = torch.tensor(
energies,
dtype=torch.get_default_dtype(),
).flatten()[None, :]
derivatives = torch.tensor(
np.stack(derivatives, axis=1),
dtype=torch.get_default_dtype(),
)
# subtract the energies
g.heterograph.apply_nodes(
lambda node: {"u_ref": node.data["u_ref"] - energies},
ntype="g",
)
if "u_ref_prime" in g.nodes["n1"]:
g.heterograph.apply_nodes(
lambda node: {
"u_ref_prime": node.data["u_ref_prime"] - derivatives
},
ntype="n1",
)
if subtract_charges:
g = subtract_coulomb_force(g)
return g
def subtract_nonbonded_force_except_14(
g,
forcefield="gaff-1.81",
):
# parameterize topology
topology = g.mol.to_topology().to_openmm()
generator = SystemGenerator(
small_molecule_forcefield=forcefield,
molecules=[g.mol],
)
# create openmm system
system = generator.create_system(
topology,
)
# use langevin integrator, although it's not super useful here
integrator = openmm.LangevinIntegrator(
TEMPERATURE, COLLISION_RATE, STEP_SIZE
)
# create simulation
simulation = Simulation(
topology=topology, system=system, integrator=integrator
)
# get forces
forces = list(system.getForces())
# loop through forces
for force in forces:
name = force.__class__.__name__
# turn off angle
if "Angle" in name:
for idx in range(force.getNumAngles()):
id1, id2, id3, angle, k = force.getAngleParameters(idx)
force.setAngleParameters(idx, id1, id2, id3, angle, 0.0)
force.updateParametersInContext(simulation.context)
elif "Bond" in name:
for idx in range(force.getNumBonds()):
id1, id2, length, k = force.getBondParameters(idx)
force.setBondParameters(
idx,
id1,
id2,
length,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Torsion" in name:
for idx in range(force.getNumTorsions()):
(
id1,
id2,
id3,
id4,
periodicity,
phase,
k,
) = force.getTorsionParameters(idx)
force.setTorsionParameters(
idx,
id1,
id2,
id3,
id4,
periodicity,
phase,
0.0,
)
force.updateParametersInContext(simulation.context)
elif "Nonbonded" in name:
for exception_index in range(force.getNumExceptions()):
(
p1,
p2,
chargeprod,
sigma,
epsilon,
) = force.getExceptionParameters(exception_index)
force.setExceptionParameters(
exception_index, p1, p2, chargeprod, sigma, 1e-8 * epsilon
)
force.updateParametersInContext(simulation.context)
# the snapshots
xs = (
Quantity(
g.nodes["n1"].data["xyz"].detach().numpy(),
esp.units.DISTANCE_UNIT,
)
.value_in_unit(unit.nanometer)
.transpose((1, 0, 2))
)
# loop through the snapshots
energies = []
derivatives = []
for x in xs:
simulation.context.setPositions(x)
state = simulation.context.getState(
getEnergy=True,
getParameters=True,
getForces=True,
)
energy = state.getPotentialEnergy().value_in_unit(
esp.units.ENERGY_UNIT,
)
derivative = state.getForces(asNumpy=True).value_in_unit(
esp.units.FORCE_UNIT,
)
energies.append(energy)
derivatives.append(derivative)
# put energies to a tensor
energies = torch.tensor(
energies,
dtype=torch.get_default_dtype(),
).flatten()[None, :]
derivatives = torch.tensor(
np.stack(derivatives, axis=1),
dtype=torch.get_default_dtype(),
)
# subtract the energies
g.heterograph.apply_nodes(
lambda node: {"u_ref": node.data["u_ref"] - energies},
ntype="g",
)
if "u_ref_prime" in g.nodes["n1"].data:
g.heterograph.apply_nodes(
lambda node: {
"u_ref_prime": node.data["u_ref_prime"] - derivatives
},
ntype="n1",
)
return g
# =============================================================================
# MODULE CLASSES
# =============================================================================
class MoleculeVacuumSimulation(object):
"""Simluate a single molecule system in vaccum.
Parameters
----------
g : `espaloma.Graph`
Input molecular graph.
n_samples : `int`
Number of samples to collect.
n_steps_per_sample : `int`
Number of steps between each sample.
temperature : `float * unit.kelvin`
Temperature for the simluation.
collision_rate : `float / unit.picosecond`
Collision rate.
timestep : `float * unit.femtosecond`
Time step.
Methods
-------
simulation_from_graph : Create simluation from molecule.
run : Run the simluation.
"""
def __init__(
self,
forcefield="gaff-1.81",
n_samples=100,
n_conformers=10,
n_steps_per_sample=1000,
temperature=TEMPERATURE,
collision_rate=COLLISION_RATE,
step_size=STEP_SIZE,
charge_method=None,
):
self.n_samples = n_samples
self.n_steps_per_sample = n_steps_per_sample
self.temperature = temperature
self.collision_rate = collision_rate
self.step_size = step_size
self.forcefield = forcefield
self.n_conformers = n_conformers
self.charge_method = charge_method
def simulation_from_graph(self, g):
""" Create simulation from moleucle """
# assign partial charge
if self.charge_method is not None:
g.mol.assign_partial_charges(self.charge_method)
# parameterize topology
topology = g.mol.to_topology().to_openmm()
generator = SystemGenerator(
small_molecule_forcefield=self.forcefield,
molecules=[g.mol],
)
# create openmm system
system = generator.create_system(
topology,
)
# set epsilon minimum to 0.05 kJ/mol
for force in system.getForces():
if "Nonbonded" in force.__class__.__name__:
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
for particle_index in range(force.getNumParticles()):
charge, sigma, epsilon = force.getParticleParameters(
particle_index
)
if epsilon < EPSILON_MIN:
force.setParticleParameters(
particle_index, charge, sigma, EPSILON_MIN
)
# use langevin integrator
integrator = openmm.LangevinIntegrator(
self.temperature, self.collision_rate, self.step_size
)
# initialize simulation
simulation = Simulation(
topology=topology,
system=system,
integrator=integrator,
platform=openmm.Platform.getPlatformByName("Reference"),
)
return simulation
def run(self, g, in_place=True):
"""Collect samples from simulation.
Parameters
----------
g : `esp.Graph`
Input graph.
in_place : `bool`
If ture,
Returns
-------
samples : `torch.Tensor`, `shape=(n_samples, n_nodes, 3)`
`in_place=True`
Sample.
graph : `esp.Graph`
Modified graph.
"""
# build simulation
simulation = self.simulation_from_graph(g)
import openff.toolkit
# get conformer
g.mol.generate_conformers(
toolkit_registry=openff.toolkit.utils.RDKitToolkitWrapper(),
n_conformers=self.n_conformers,
)
# get number of actual conformers
true_n_conformers = len(g.mol.conformers)
samples = []
for idx in range(true_n_conformers):
# put conformer in simulation
simulation.context.setPositions(g.mol.conformers[idx])
# set velocities
simulation.context.setVelocitiesToTemperature(self.temperature)
# minimize
simulation.minimizeEnergy()
# loop through number of samples
for _ in range(self.n_samples // self.n_conformers):
# run MD for `self.n_steps_per_sample` steps
simulation.step(self.n_steps_per_sample)
# append samples to `samples`
samples.append(
simulation.context.getState(getPositions=True)
.getPositions(asNumpy=True)
.value_in_unit(DISTANCE_UNIT)
)
# if the `samples` array is not filled,
# pick a random conformer to do it again
if len(samples) < self.n_samples:
len_samples = len(samples)
import random
idx = random.choice(list(range(true_n_conformers)))
simulation.context.setPositions(g.mol.conformers[idx])
# set velocities
simulation.context.setVelocitiesToTemperature(self.temperature)
# minimize
simulation.minimizeEnergy()
# loop through number of samples
for _ in range(self.n_samples - len_samples):
# run MD for `self.n_steps_per_sample` steps
simulation.step(self.n_steps_per_sample)
# append samples to `samples`
samples.append(
simulation.context.getState(getPositions=True)
.getPositions(asNumpy=True)
.value_in_unit(DISTANCE_UNIT)
)
assert len(samples) == self.n_samples
# put samples into an array
samples =
|
np.array(samples)
|
numpy.array
|
#!/usr/bin/env python
import rospy
import numpy as np
from math import *#sin, cos, acos, asin, radians
from geometry_msgs.msg import Pose
import transform
class viewpoint_creator:
# initial vp_generator with camera information
def __init__(self,camera):
self.camera = camera
# compute a viewpoint with input the offset on the image uv
# and the transfom of point cloud coordinate frame to the world frame
def compute_vp(self,dx,dy,dist,mat):
depth = self.camera.depth_image()
cx,cy = depth.center()
u = cx+dx
v = cy+dy
vp = self._viewpoint(depth,u,v,dist,mat)
return vp
# create a viewpoint at pixel (u,v) and distance to surface is d
# with tranform to the camere
def _viewpoint(self,img,u,v,d,mat_c):
# target point in camera frame at pixel u, v
# print("current", mat)
pc = self._position3d(img,u,v)
if pc[2] < 0: # depth
print("invalid depth image")
return None
#print("position at uv", pc)
# target point normal in camera frame
nc = self._normal3d_1(img,u,v)
print("normal at uv", nc)
pv = pc-d*nc
#print("target vp", pv)
# rotate normal in camera frame
a = asin(-nc[1]) # about x
b = atan2(nc[0],nc[2]) # about y
c = 0 # about z
mat_v = transform.rotation_translation(
|
np.array([a,b,c])
|
numpy.array
|
import sys,os
sys.path.append('../..')
from lenser import *
import numpy as np
from astropy.io import fits
import time
from scipy.special import gamma
"""
Module: lenser_sim_cat
.. synopsis: Simulates a catalogue of galaxy images
.. module author: <NAME> <<EMAIL>>
.. Create a catalogue of simulated galaxy postage stamps using Lenser.
.. A number of desired galaxies Ngal is specified, galactic and lensing
parameters are randomly generated for each galaxy, an image is
generated for each galaxy, and the resulting Ngal postage stamps are
exported to the path
'Lenser/examples/Catalogues/Simulated_(insert time)/'
"""
# Create path to catalogue and name the catalogue with the time it was created
tm = time.localtime()
tm_str = str(tm[2])+'_'+str(tm[1])+'_'+str(tm[0])+'_'+str(tm[3])+':'+str(tm[4])+':'+str(tm[5])
path_to_cat = '../Catalogues/'
cat_name = 'Simulated_'+tm_str
cat_folder = cat_name+'/'
image_folder = 'ima/'
os.mkdir(path_to_cat+cat_folder)
os.mkdir(path_to_cat+cat_folder+image_folder)
# Number of galaxies in simulated catalogue
Ngal = 1000
# Generate the seeds for np.random.
# .. We need seeds for the random generation of model parameter, except for the centroid,
# .. plus I0, which is 13-2+1 seeds, as well as for the random noise generation in each postage stamp.
Npars_seed = 12
seed_list = np.arange(0, Npars_seed+Ngal)
# Choose which galaxy catalogue to mimic: COSMOS, EFIGI, or other. If other is chosen,
# you will need to specify the following parameters yourself:
# .. Postage stamp size
# .. Galaxy size in pixels
# .. Range of galaxy brightness,
# .. Stamp noise
# .. Sky background
catalogue_type = 'EFIGI'
# COSMOS type catalogue
if catalogue_type == 'COSMOS':
# Generate non-fit parameters.
# .. These values should be motivated to reflect actual data
# .. Postage stamp size
Nx = 150
Ny = 150
# .. Standard galaxy size (in pixels)
a = 10.
# .. I0
np.random.seed(seed_list[0])
I0_low, I0_up = 0.1, 10.
I0_list = np.random.uniform(low=I0_low, high=I0_up, size=Ngal)
# .. noise1 and noise2
noise1 = 1.3e-3
noise2 = 0.
# .. Background
background = 0.
# Comments
comments = """This catalogue was generated for the purpose of testing the ability of Lenser to reconstruct known parameters.
The postage stamp size, peak brightness I0, and the noisemaps are all chosen to mimic the COSMOS dataset."""
# EFIGI type catalogue
elif catalogue_type == 'EFIGI':
# Generate non-fit parameters.
# .. These values should be motivated to reflect actual data
# .. Postage stamp size
Nx = 255
Ny = 255
# .. Standard galaxy size (in pixels)
a = 25.
# .. I0
np.random.seed(seed_list[0])
I0_low, I0_up = 1.e3, 1.e6
I0_list = np.random.uniform(low=I0_low, high=I0_up, size=Ngal)
# .. noise1 and noise2
noise1 = 2.
gain = 4.75
noise2 = 1/np.sqrt(gain)
# .. Background
background = 0.
# Comments
comments = """This catalogue was generated for the purpose of testing the ability of Lenser to reconstruct known parameters.
The postage stamp size, peak brightness I0, and the noisemaps (and gains) are all chosen to mimic the EFIGI dataset."""
else:
print('Other was chosen for catalogue type. User will specify catalogue-type parameters themselves.')
# Randomly generate galaxy parameters
# .. Set lower and upper bounds for np.random.uniform
# .. Set mean and 1sigma for np.random.normal
# .. Set a seed before each np.random call
# Lensing parameters
# .. First, we have the option whether or not to include shear. By default, shear is included.
# The consequence of including shear is that q and phi will not properly be reconstructed by a Lenser fit,
# due to the shear-ellipticity degeneracy. It is more realistic, however, to include shear fields if one wishes
# to use this script in order to test Lenser's ability to fit to itself.
include_shear = True
if include_shear == True:
# .. We will generate gamma1 and gamma2 from Gaussians and then get psi,ij.
# We will set kappa = 0 (we can arbitrarily make this choice due to the
# mass-sheet degeneracy)
# .. kappa
kappa = 0.
# .. shear standard deviation
sigma_gamma = 0.05
# .. gamma1
np.random.seed(seed_list[1])
gamma1_mean, gamma1_stdv = 0., sigma_gamma/np.sqrt(2)
gamma1_list = np.random.normal(gamma1_mean, gamma1_stdv, size=Ngal)
# .. gamma2
np.random.seed(seed_list[2])
gamma2_mean, gamma2_stdv = 0., sigma_gamma/np.sqrt(2)
gamma2_list = np.random.normal(gamma2_mean, gamma2_stdv, size=Ngal)
# .. psi,11
psi11_list = kappa + gamma1_list
# .. psi,12
psi12_list = gamma2_list
# .. psi,22
psi22_list = kappa - gamma1_list
elif include_shear == False:
# .. psi,11
psi11 = 0.
psi11_list = psi11*np.ones(Ngal)
# .. psi,12
psi12 = 0.
psi12_list = psi12*np.ones(Ngal)
# .. psi,22
psi22 = 0.
psi22_list = psi22*np.ones(Ngal)
# .. We have to be careful when generating the flexion, because not all of psi,ijj
# are independent from one another. We do the following:
# (i). Generate F1 and F2 from Gaussian distributions
# (ii). Use F1 and F2 to calculate the angle of flexion, phi_F
# (iii). Assume a particular analytic lens model, which in this case is a
# singular isothermal sphere (SIS). This allows us to relate first and
# section flexion in an analytic way. We then use F1, F2, and phi_F to
# get G1 and G2
# (iv). Use F1, F2, G1, and G2 to get psi,ijk
# .. Set sigma_F, the standard deviation for first flexion that is equal to sigma_F1 and
# sigma_F2 added in quadruture
sigma_F = 1.e-3
# .. F1
np.random.seed(seed_list[3])
F1_mean, F1_stdv = 0., sigma_F/
|
np.sqrt(2)
|
numpy.sqrt
|
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that application of operations works correctly in the plugin devices"""
import numpy as np
import pennylane as qml
import pytest
from scipy.linalg import block_diag
from conftest import A, U
np.random.seed(42)
# ==========================================================
# Some useful global variables
# non-parametrized qubit gates
I = np.identity(2)
X = np.array([[0, 1], [1, 0]])
Y = np.array([[0, -1j], [1j, 0]])
Z = np.array([[1, 0], [0, -1]])
H = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
S = np.diag([1, 1j])
T = np.diag([1, np.exp(1j * np.pi / 4)])
SWAP = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
CNOT = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
CZ = np.diag([1, 1, 1, -1])
toffoli = np.diag([1 for i in range(8)])
toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
CSWAP = block_diag(I, I, SWAP)
# parametrized qubit gates
phase_shift = lambda phi: np.array([[1, 0], [0, np.exp(1j * phi)]])
rx = lambda theta:
|
np.cos(theta / 2)
|
numpy.cos
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import mindspore._c_dataengine as cde
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
from mindspore.dataset.text import to_str, to_bytes
def test_basic():
x = np.array([["ab", "cde", "121"], ["x", "km", "789"]], dtype='S')
n = cde.Tensor(x)
arr = n.as_array()
np.testing.assert_array_equal(x, arr)
def compare(strings, dtype='S'):
arr = np.array(strings, dtype=dtype)
def gen():
(yield arr,)
data = ds.GeneratorDataset(gen, column_names=["col"])
for d in data.create_tuple_iterator(output_numpy=True):
np.testing.assert_array_equal(d[0], arr.astype('S'))
def test_generator():
compare(["ab"])
compare(["", ""])
compare([""])
compare(["ab", ""])
compare(["ab", "cde", "121"])
compare([["ab", "cde", "121"], ["x", "km", "789"]])
compare([["ab", "", "121"], ["", "km", "789"]])
compare(["ab"], dtype='U')
compare(["", ""], dtype='U')
compare([""], dtype='U')
compare(["ab", ""], dtype='U')
compare(["", ""], dtype='U')
compare(["", "ab"], dtype='U')
compare(["ab", "cde", "121"], dtype='U')
compare([["ab", "cde", "121"], ["x", "km", "789"]], dtype='U')
compare([["ab", "", "121"], ["", "km", "789"]], dtype='U')
line = np.array(["This is a text file.",
"Be happy every day.",
"Good luck to everyone."])
words = np.array([["This", "text", "file", "a"],
["Be", "happy", "day", "b"],
["女", "", "everyone", "c"]])
chinese = np.array(["今天天气太好了我们一起去外面玩吧",
"男默女泪",
"江州市长江大桥参加了长江大桥的通车仪式"])
def test_batching_strings():
def gen():
for row in chinese:
yield (np.array(row),)
data = ds.GeneratorDataset(gen, column_names=["col"])
data = data.batch(2, drop_remainder=True)
for d in data.create_tuple_iterator(output_numpy=True):
np.testing.assert_array_equal(d[0], to_bytes(chinese[0:2]))
def test_map():
def gen():
yield (
|
np.array(["ab cde 121"], dtype='S')
|
numpy.array
|
from PIL import Image
import numpy as np
import os
import random
import argparse
def random_crop(img, mask, height, width, num_of_crops,name,stride=1,dir_name='data'):
Image_dir = dir_name + '/Images'
Mask_dir = dir_name + '/Masks'
directories = [dir_name,Image_dir,Mask_dir]
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
max_x = int(((img.shape[0]-height)/stride)+1)
max_y = int(((img.shape[1]-width)/stride)+1)
max_crops = (max_x)*(max_y)
crop_seq = [i for i in range(0,max_crops)]
for i in range(num_of_crops):
crop = random.choice(crop_seq)
#print("crop_value for",i,":",crop)
if crop ==0:
x = 0
y = 0
else:
x = int((crop+1)/max_y)
#print(x)
y = int((crop+1)%max_y)
#print(y)
crop_img_arr = img[x:x+width,y:y+height]
#print(crop_img_arr.shape)
crop_mask_arr = mask[x:x+width,y:y+height]
crop_img = Image.fromarray(crop_img_arr)
crop_mask = Image.fromarray(crop_mask_arr)
img_name = directories[1] + "/" + name + "_" + str(i+1)+".png"
mask_name = directories[2] + "/" + name + "_mask_" + str(i+1)+".png"
crop_img.save(img_name)
crop_mask.save(mask_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_dim', type=int, default=512)
parser.add_argument('--n_crops', type=int, default=50)
parser.add_argument('--datadir', type=str, required=True, help='path/to/data_directory',default='Dataset')
parser.add_argument('--output_dir', type=str, default='data')
args = parser.parse_args()
# Abnormal Fundus/Angio Image pairs
ab = [4,5,6,8,18,21,28]
for i in range(7):
img_name = args.datadir+"/ABNORMAL/"+str(ab[i])+".jpg"
im = Image.open(img_name)
img_arr =
|
np.asarray(im)
|
numpy.asarray
|
"""
Library Features:
Name: drv_data_io_h03b
Author(s): <NAME> (<EMAIL>)
Date: '20190401'
Version: '1.0.2'
"""
#################################################################################
# Library
import logging
import progressbar
from sys import version_info
from datetime import timedelta
from datetime import datetime
from os import remove
from os.path import exists, isfile
from copy import deepcopy
from numpy import reshape, full, empty, nan, zeros, isnan, asarray, where, delete, unique, concatenate
import numpy.ma as ma
from src.common.analysis.lib_analysis_interpolation_point import interpPointData
from src.common.utils.lib_utils_op_string import defineString, convertUnicode2ASCII
from src.common.utils.lib_utils_op_list import mergeList
from src.common.utils.lib_utils_apps_data import updateDictStructure
from src.common.utils.lib_utils_apps_file import handleFileData, selectFileDriver, zipFileData
from src.common.utils.lib_utils_apps_time import getTimeFrom, getTimeTo, getTimeSteps, checkTimeRef, roundTimeStep, \
findTimeDiff
from src.common.default.lib_default_args import sZipExt as sZipExt_Default
from src.common.default.lib_default_args import sTimeFormat as sTimeFormat_Default
from src.common.default.lib_default_args import sTimeCalendar as sTimeCalendar_Default
from src.common.default.lib_default_args import sTimeUnits as sTimeUnits_Default
from src.common.default.lib_default_conventions import oVarConventions as oVarConventions_Default
from src.common.default.lib_default_conventions import oFileConventions as oFileConventions_Default
from src.common.default.lib_default_args import sLoggerName
from src.hyde.driver.configuration.generic.drv_configuration_debug import Exc
# Logging
oLogStream = logging.getLogger(sLoggerName)
# Debug
#################################################################################
# -------------------------------------------------------------------------------------
# Class data object
class DataObj(dict):
pass
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class to compute time product
class DataProductTime:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, **kwargs):
self.sVarTimeStep = kwargs['timestep']
self.sVarTimeRun = kwargs['timerun']
self.oVarTime = kwargs['settings']['data']['dynamic']['time']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to round time to closest time to closest defined time interval
def __roundTimeStep(self, sTimeStep):
oVarTime = self.oVarTime
if 'time_reference_type' in oVarTime:
sTimeUnits = oVarTime['time_reference_type']['units']
iTimeRound = oVarTime['time_reference_type']['rounding']
oTimeSteps = oVarTime['time_reference_type']['steps']
bTimeMM = checkTimeRef(sTimeStep, oTimeMins=oTimeSteps)
if bTimeMM is False:
sTimeRound = roundTimeStep(sTimeStep, sDeltaUnits=sTimeUnits, iDeltaValue=iTimeRound)
return sTimeRound
else:
return sTimeStep
else:
return sTimeStep
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute reference time (to control files mount and avoid memory fails)
def __selectRefTime(self, sVarTimeStep, sVarTimeRun):
oVarTimeStep = datetime.strptime(sVarTimeStep, sTimeFormat_Default)
oVarTimeRun = datetime.strptime(sVarTimeRun, sTimeFormat_Default)
oVarTimeDelta = timedelta(seconds=self.oVarTime['time_observed_step'] * self.oVarTime['time_observed_delta'])
oVarTimeCheck = oVarTimeStep + oVarTimeDelta
if oVarTimeCheck > oVarTimeRun:
sVarTimeRef = oVarTimeRun.strftime(sTimeFormat_Default)
elif oVarTimeCheck < oVarTimeRun:
sVarTimeRef = oVarTimeStep.strftime(sTimeFormat_Default)
else:
sVarTimeRef = oVarTimeRun.strftime(sTimeFormat_Default)
iVarTimeDiff = findTimeDiff(sVarTimeRef, sVarTimeStep)
return sVarTimeRef, iVarTimeDiff
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute data time
def computeDataTime(self):
sVarTimeStep = self.sVarTimeStep
sVarTimeRun = self.sVarTimeRun
oVarTime = self.oVarTime
sVarTimeRun = self.__roundTimeStep(sVarTimeRun)
sVarTimeRef, iVarTimeDiff = self.__selectRefTime(sVarTimeStep, sVarTimeRun)
if 'time_observed_step' in oVarTime and 'time_observed_delta' in oVarTime:
iVarTimeObsStep = oVarTime['time_observed_step']
iVarTimeObsDelta = oVarTime['time_observed_delta']
else:
iVarTimeObsStep = 0
iVarTimeObsDelta = 0
if 'time_forecast_step' in oVarTime and 'time_forecast_delta' in oVarTime:
iVarTimeForStep = oVarTime['time_forecast_step']
iVarTimeForDelta = oVarTime['time_forecast_delta']
else:
iVarTimeForStep = 0
iVarTimeForDelta = 0
iVarTimeRefStep = int(iVarTimeDiff / iVarTimeObsDelta)
iVarTimeObsStep = iVarTimeObsStep + iVarTimeRefStep
sVarTimeFrom = getTimeFrom(sVarTimeRef, iVarTimeObsDelta, iVarTimeObsStep)[0]
sVarTimeTo = getTimeTo(sVarTimeRef, iVarTimeForDelta, iVarTimeForStep)[0]
a1oVarTimeObs = getTimeSteps(sVarTimeFrom, sVarTimeRef, iVarTimeObsDelta)
a1oVarTimeFor = getTimeSteps(sVarTimeRef, sVarTimeTo, iVarTimeForDelta)
a1oVarTime = mergeList(a1oVarTimeObs, a1oVarTimeFor)
a1oVarTime.sort()
return a1oVarTime
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class to clean data product
class DataProductCleaner:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, **kwargs):
self.a1oFile = kwargs['file']
self.a1bFlag = kwargs['flag']
try:
self.a1oTime = kwargs['time']
except BaseException:
self.a1oTime = None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to clean selected file(s)
def cleanDataProduct(self):
if isinstance(self.a1bFlag, bool):
self.a1bFlag = [self.a1bFlag]
if isinstance(self.a1oFile, str):
self.a1oFile = [self.a1oFile]
if self.a1bFlag.__len__() < self.a1oFile.__len__():
self.a1bFlag = full(self.a1oFile.__len__(), self.a1bFlag[0], dtype=bool)
for bFlag, oFile in zip(self.a1bFlag, self.a1oFile):
if version_info < (3, 0):
oFile = convertUnicode2ASCII(oFile)
if isinstance(oFile, str):
oFile = [oFile]
if self.a1oTime is None:
for sFile in oFile:
if exists(sFile):
if bFlag:
remove(sFile)
else:
if isinstance(self.a1oTime, str):
self.a1oTime = [self.a1oTime]
for sTime in self.a1oTime:
oTimeTags = {'$yyyy': sTime[0:4],
'$mm': sTime[4:6], '$dd': sTime[6:8], '$HH': sTime[8:10],
'$MM': sTime[10:12]}
for sFile in oFile:
sFile = defineString(deepcopy(deepcopy(sFile)), oTimeTags)
if exists(sFile):
if bFlag:
remove(sFile)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class to analyze data product
class DataProductAnalyzer:
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, **kwargs):
self.sVarTime = kwargs['time']
self.oVarIn = kwargs['settings']['variables']['input']
self.oVarOut = kwargs['settings']['variables']['outcome']
self.oVarData = kwargs['data']
self.sVarFolderTMP = kwargs['temp_data_file']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute data
def computeDataProduct(self, oDataGeo=None):
# -------------------------------------------------------------------------------------
# Iterate over file variable(s)
oVarWS = self.oVarData
oVarIn = self.oVarIn
oVarOut = self.oVarOut
sVarFolderTMP = self.sVarFolderTMP
oVarSel = DataObj()
# Set progress bar widget(s)
oVarPBarWidgets = [
' ===== Computing data progress: ', progressbar.Percentage(),
' ', progressbar.Bar(marker=progressbar.RotatingMarker()),
' ', progressbar.ETA(),
]
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Iterate over variable(s)
for (sVarKey_IN, oVarDef_IN), (sVarKey_OUT, oVarDef_OUT) in zip(oVarIn.items(), oVarOut.items()):
# -------------------------------------------------------------------------------------
# Get input variable information
oVarType_IN = oVarDef_IN['id']['var_type']
oVarName_IN = oVarDef_IN['id']['var_name']
oVarFile_IN = oVarDef_IN['id']['var_file']
oVarMethod_IN = oVarDef_IN['id']['var_method_compute']
oVarAttrs_IN = oVarDef_IN['attributes']
oVarType_OUT = oVarDef_OUT['id']['var_type']
oVarName_OUT = oVarDef_OUT['id']['var_name']
oVarFile_OUT = oVarDef_OUT['id']['var_file']
oVarMethod_OUT = oVarDef_OUT['id']['var_method_save']
oVarAttrs_OUT = oVarDef_OUT['attributes']
assert sVarKey_IN == sVarKey_OUT
sVarKey = list(set([sVarKey_IN, sVarKey_OUT]))[0]
# Info start about computing variable
oLogStream.info(' ---> Compute algorithm variable: ' + sVarKey + ' ... ')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check variable workspace
if oVarWS:
# -------------------------------------------------------------------------------------
# Check data field(s) availability to compute results
if sVarKey in oVarWS:
# -------------------------------------------------------------------------------------
# Iterate over declared variable(s)
oVarSel[sVarKey] = {}
for iVarID, (sVarType_IN, sVarName_IN, sVarFile_IN, sVarMethod_IN,
sVarType_OUT, sVarName_OUT, sVarFile_OUT, sVarMethod_OUT) in enumerate(zip(
oVarType_IN, oVarName_IN, oVarFile_IN, oVarMethod_IN,
oVarType_OUT, oVarName_OUT, oVarFile_OUT, oVarMethod_OUT)):
# -------------------------------------------------------------------------------------
# Info variable starting
oLogStream.info(' ----> Select product variable ' +
sVarName_IN + ' to ' + sVarName_OUT + ' ... ')
# Check variable field availability to compute results
if sVarName_IN in oVarWS[sVarKey]:
# -------------------------------------------------------------------------------------
# Get data
oVarData = oVarWS[sVarKey][sVarName_IN]
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check fields definition in variable workspace
if ('values' in oVarData) and ('attributes' in oVarData) and (
'times' in oVarData):
# -------------------------------------------------------------------------------------
# Get data and attributes
a2dVarValue = deepcopy(oVarData['values']).astype(float)
a2dVarGeoX = deepcopy(oVarData['longitude']).astype(float)
a2dVarGeoY = deepcopy(oVarData['latitude']).astype(float)
oVarTime = deepcopy(oVarData['times'])
oVarAttrs_IN_SEL = deepcopy(oVarData['attributes'])
# Get attributes using outcome definition(s)
oVarAttrs_OUT_SEL = selectVarAttributes(oVarAttrs_OUT, iVarID)
# Get missing value
if 'Missing_value' in oVarAttrs_OUT_SEL:
dVarMissValue = oVarAttrs_OUT_SEL['Missing_value']
else:
dVarMissValue = -9999.0
# Get scale factor
if 'ScaleFactor' in oVarAttrs_OUT_SEL:
dScaleFactor = oVarAttrs_OUT_SEL['ScaleFactor']
else:
dScaleFactor = 1
# Get fill value
if '_FillValue' in oVarAttrs_OUT_SEL:
dVarFillValue = oVarAttrs_OUT_SEL['_FillValue']
else:
dVarFillValue = -9999.0
# Get units
if 'units' in oVarAttrs_OUT_SEL:
sVarUnits = oVarAttrs_OUT_SEL['units']
else:
sVarUnits = None
# Get valid range
if 'Valid_range' in oVarAttrs_OUT_SEL:
oVarValidRange = asarray(oVarAttrs_OUT_SEL['Valid_range'])
else:
oVarValidRange = None
# Get interpolation radius x
if 'interp_radius_x' in oVarAttrs_OUT_SEL:
dVarRadiusX = float(oVarAttrs_OUT_SEL['interp_radius_x'])
else:
Exc.getExc(' ---> Interpolation radius x not defined! Default value is 0.066', 2, 1)
dVarRadiusX = 0.066
# Get interpolation radius y
if 'interp_radius_y' in oVarAttrs_OUT_SEL:
dVarRadiusY = asarray(oVarAttrs_OUT_SEL['interp_radius_y'])
else:
Exc.getExc(' ---> Interpolation radius y not defined! Default value is 0.066', 2, 1)
dVarRadiusY = 0.066
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check data attributes to control units conversion
if 'units' in oVarAttrs_IN_SEL:
assert oVarAttrs_IN_SEL['units'] == 'kg.m-2.s-1' or oVarAttrs_IN_SEL['units'] == '%'
assert oVarAttrs_IN_SEL['ScaleFactor'] == 1
else:
Exc.getExc(' ---> Variable units are not defined! Mismatch in input data!', 2, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check data attributes to control units conversion
if 'units' in oVarAttrs_OUT_SEL:
assert oVarAttrs_OUT_SEL['units'] == 'mm/h' or \
oVarAttrs_OUT_SEL['units'] == '%'
assert oVarAttrs_OUT_SEL['ScaleFactor'] == 3600 or \
oVarAttrs_OUT_SEL['ScaleFactor'] == 1
else:
Exc.getExc(' ---> Variable units are not defined! Mismatch in outcome data!', 2, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Initialize variable to store results
a3dVarValue_FILTER = zeros(
[oDataGeo.a2dGeoX.shape[0], oDataGeo.a2dGeoY.shape[1], oVarTime.__len__()])
a3dVarValue_FILTER[:, :, :] = nan
# Info start interpolating variable
oLogStream.info(' ----> Interpolate product variable ' +
sVarName_IN + ' to ' + sVarName_OUT + ' ... ')
# Iterate over time step(s)
oPBar = progressbar.ProgressBar(widgets=oVarPBarWidgets, redirect_stdout=True)
for iTimeStep, sTimeStep in enumerate(oPBar(oVarTime)):
# -------------------------------------------------------------------------------------
# Info time step
oLogStream.info(' -----> TimeStep: ' + sTimeStep)
# Get variable data, longitude and latitude
a1dVarValue = a2dVarValue[:, iTimeStep]
a1dVarGeoX = a2dVarGeoX[:, iTimeStep]
a1dVarGeoY = a2dVarGeoY[:, iTimeStep]
# Select defined value(s)
a1iVarIdx = where(isnan(a1dVarValue))[0]
a1dVarValue_SEL = delete(a1dVarValue, a1iVarIdx)
a1dVarGeoY_SEL = delete(a1dVarGeoY, a1iVarIdx)
a1dVarGeoX_SEL = delete(a1dVarGeoX, a1iVarIdx)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Set nan for missing and outbounds values
a1dVarValue_SEL[a1dVarValue_SEL == dVarMissValue] = nan
if oVarValidRange[0] is not None:
a1dVarValue_SEL[a1dVarValue_SEL < oVarValidRange[0]] = nan
if oVarValidRange[1] is not None:
a1dVarValue_SEL[a1dVarValue_SEL > oVarValidRange[1]] = nan
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Interpolate variable over grid reference using nearest method (gdal library)
a2dVarValue_INTERP = interpPointData(
a1dVarValue_SEL, a1dVarGeoX_SEL, a1dVarGeoY_SEL,
oDataGeo.a2dGeoX, oDataGeo.a2dGeoY,
iCodeEPSG=4326, dInterpNoData=-9999.0,
dInterpRadiusX=dVarRadiusX, dInterpRadiusY=dVarRadiusY,
sInterpMethod='nearest', sInterpOption=None,
sFolderTemp=sVarFolderTMP)
# Apply scale factor (from kg m^-2 to mm (kg/m^2 == mm)
a2dVarValue_INTERP = a2dVarValue_INTERP * dScaleFactor
# Set fill value for undefined data (nan)
a1dVarValue_INTERP = deepcopy(a2dVarValue_INTERP.ravel())
a1dVarValue_INTERP[oDataGeo.a1iGeoIndexNaN] = dVarFillValue
a1dVarValue_INTERP[isnan(a1dVarValue_INTERP)] = dVarFillValue
# Check data valid range
if oVarValidRange[0] is not None:
a1iVarNoData_INTERP = where(a1dVarValue_INTERP < oVarValidRange[0])[0]
a1dVarValue_INTERP[a1iVarNoData_INTERP] = dVarMissValue
if oVarValidRange[1] is not None:
a1iVarNoData_INTERP = where(a1dVarValue_INTERP > oVarValidRange[1])[0]
a1dVarValue_INTERP[a1iVarNoData_INTERP] = dVarMissValue
# Reshape data with selected domain
a2dVarValue_DEF = reshape(a1dVarValue_INTERP,
[oDataGeo.a2dGeoX.shape[0],
oDataGeo.a2dGeoY.shape[1]])
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Debug
# import numpy as np
# from mpl_toolkits.basemap import Basemap
# plt.figure(figsize=(8, 8))
# m = Basemap(llcrnrlat=np.min(oDataGeo.a2dGeoY), urcrnrlat=np.max(oDataGeo.a2dGeoY),
# llcrnrlon=np.min(oDataGeo.a2dGeoX), urcrnrlon=np.max(oDataGeo.a2dGeoX),
# resolution='l')
# m.drawcoastlines(color='gray')
# m.drawcountries(color='gray')
# plt.pcolormesh(oDataGeo.a2dGeoX, oDataGeo.a2dGeoY, a2dVarValue_DEF)
# plt.colorbar()
# plt.clim(0, 50)
# plt.show()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Store results
a3dVarValue_FILTER[:, :, iTimeStep] = a2dVarValue_DEF
# -------------------------------------------------------------------------------------
# Info end interpolating variable
oLogStream.info(' ----> Interpolate product variable ' +
sVarName_IN + ' to ' + sVarName_OUT + ' ... OK')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Add attributes to workspace
if not hasattr(oVarSel, 'iRows'):
oVarSel.iRows = a3dVarValue_FILTER.shape[0]
if not hasattr(oVarSel, 'iCols'):
oVarSel.iCols = a3dVarValue_FILTER.shape[1]
if not hasattr(oVarSel, 'iTime'):
oVarSel.iTime = a3dVarValue_FILTER.shape[2]
if not hasattr(oVarSel, 'oDataTime'):
oVarSel.oDataTime = oVarTime
if 'longitude' not in oVarSel:
oVarSel['longitude'] = oDataGeo.a2dGeoX.ravel()
if 'latitude' not in oVarSel:
oVarSel['latitude'] = oDataGeo.a2dGeoY.ravel()
# Save data
oVarSel[sVarKey][sVarName_OUT] = {}
oVarSel[sVarKey][sVarName_OUT]['results'] = a3dVarValue_FILTER
oVarSel[sVarKey][sVarName_OUT]['attributes'] = oVarAttrs_OUT_SEL
oVarSel[sVarKey][sVarName_OUT]['times'] = oVarTime
# Info variable ending
oLogStream.info(' ----> Select product variable ' +
sVarName_IN + ' to ' + sVarName_OUT + ' ... OK')
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit variable key not in workspace
oLogStream.info(' ----> Select product variable ' +
sVarName_IN + ' to ' + sVarName_OUT + ' ... FAILED')
Exc.getExc(' =====> WARNING: variable field is not defined!', 2, 1)
oVarSel[sVarKey][sVarName_OUT] = None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Info end computing variable
oLogStream.info(' ---> Compute algorithm variable: ' + sVarKey + ' ... OK')
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit variable key not in workspace
oLogStream.info(' ---> Compute algorithm variable: ' + sVarKey + ' ... FAILED!')
Exc.getExc(' =====> WARNING: data field is not defined!', 2, 1)
oVarSel[sVarKey] = None
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit variable key not in workspace
oLogStream.info(' ---> Compute algorithm variable: ' + sVarKey + ' ... FAILED!')
Exc.getExc(' =====> WARNING: data workspace is null!', 2, 1)
oVarSel = None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Return workspace
return oVarSel
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class to finalize data product
class DataProductFinalizer:
# -------------------------------------------------------------------------------------
# Class declaration(s)
oVarCM = None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, **kwargs):
self.sVarTime = kwargs['time']
self.oVarOut = kwargs['settings']['variables']['outcome']
self.oVarData = kwargs['data']
self.oVarFramework = {'rain_product': kwargs['rain_product_file']}
self.oVarColorMap = kwargs['rain_colormap_file']
self.oAlgConventions = kwargs['settings']['algorithm']
self.bVarSubSet = True
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to subset data
@staticmethod
def subsetData(oData_Dynamic, iData_Index=None):
# Check data workspace
if oData_Dynamic is not None:
# Define time index
if iData_Index is None:
iData_Index = 0
else:
iData_Index = iData_Index + 1
iData_Dynamic_Index = oData_Dynamic.iTime - 1
# Check time step availability in data
if iData_Dynamic_Index >= iData_Index:
# Get data
oData_Dynamic_Step = deepcopy(oData_Dynamic)
# Get time(s)
sData_Time = oData_Dynamic.oDataTime[iData_Index]
# Get key(s) and removing static variable(s) [longitude, latitude ... ]
oData_Key = list(oData_Dynamic.keys())
oData_Key.remove('longitude')
oData_Key.remove('latitude')
# Remove attribute(s)
delattr(oData_Dynamic_Step, 'iTime')
delattr(oData_Dynamic_Step, 'oDataTime')
# Iterate over data key(s)
for sData_Key in oData_Key:
# Info variable
oLogStream.info(' ----> SubsetVar: ' + sData_Key)
# Get data
oData_WS = oData_Dynamic[sData_Key]
# Check workspace data availability
if oData_WS:
# Iterate over variable(s)
for sData_Var, oData_Value in oData_WS.items():
# Check variable data availability
if oData_WS[sData_Var]:
# Get results and times
oData_Results = oData_WS[sData_Var]['results']
oData_Time = oData_WS[sData_Var]['times']
# Remove unnecessary fields
oData_Dynamic_Step[sData_Key][sData_Var].pop('values', None)
oData_Dynamic_Step[sData_Key][sData_Var].pop('results', None)
oData_Dynamic_Step[sData_Key][sData_Var].pop('times', None)
# Save data
oData_Dynamic_Step[sData_Key][sData_Var]['results'] = oData_Results[:, :, iData_Index]
oData_Dynamic_Step[sData_Key][sData_Var]['times'] = [oData_Time[iData_Index]]
if not hasattr(oData_Dynamic_Step, 'iTime'):
oData_Dynamic_Step.iTime = 1
if not hasattr(oData_Dynamic_Step, 'oDataTime'):
oData_Dynamic_Step.oDataTime = [sData_Time]
# Information
oLogStream.info(' ---> SubsetData Time: ' + sData_Time + ' Index: ' + str(iData_Index))
else:
sData_Time = None
iData_Index = None
oData_Dynamic_Step = None
else:
Exc.getExc(' ---> Data workspace is null!', 2, 1)
sData_Time = None
iData_Index = None
oData_Dynamic_Step = None
return sData_Time, iData_Index, oData_Dynamic_Step
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get variable colormap
@staticmethod
def __getColorMap(sFileCM):
# Get file driver (according with filename extensions
if isfile(sFileCM):
oFileDriver = selectFileDriver(sFileCM, sFileMode='r')[0]
oFileCM = oFileDriver.oFileLibrary.openFile(sFileCM, 'r')
oFileLines = oFileDriver.oFileLibrary.getLines(oFileCM)
else:
oFileLines = ''
return oFileLines
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to save data
def saveDataProduct(self, oDataGeo=None, oDataDynamic=None):
# -------------------------------------------------------------------------------------
# Get information if single or recursive method
if self.bVarSubSet:
sVarTime = oDataDynamic[0]
iVarIndex = oDataDynamic[1]
oVarData = oDataDynamic[2]
if oVarData is None:
return None
else:
sVarTime = self.sVarTime
iVarIndex = 0
oVarData = oDataDynamic
oVarOut = self.oVarOut
oVarFramework = self.oVarFramework
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Define time tags
oTimeTags = {'$yyyy': sVarTime[0:4],
'$mm': sVarTime[4:6], '$dd': sVarTime[6:8], '$HH': sVarTime[8:10],
'$MM': sVarTime[10:12]}
# Define general and geo-system information
oFileGeneralInfo = updateDictStructure(oFileConventions_Default, self.oAlgConventions, 'general')
oFileGeoSystemInfo = updateDictStructure(oFileConventions_Default, self.oAlgConventions, 'geosystem')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Iterate over outcome variable(s)
for sVarKey, oVarDef in oVarOut.items():
# -------------------------------------------------------------------------------------
# Info start saving variable
oLogStream.info(' ---> Save algorithm variable ' + sVarKey + ' ... ')
# Get outcome variable information
oVarType = oVarDef['id']['var_type']
oVarName = oVarDef['id']['var_name']
oVarFile = oVarDef['id']['var_file']
oVarMethod = oVarDef['id']['var_method_save']
oVarColormap = oVarDef['id']['var_colormap']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check variable workspace
if oVarData[sVarKey]:
# -------------------------------------------------------------------------------------
# Iterate over declared variable(s)
for iVarID, (oVarFeat, sVarName, sVarFile, sVarMethod, sVarColormap) in enumerate(zip(
oVarType, oVarName, oVarFile, oVarMethod, oVarColormap)):
# -------------------------------------------------------------------------------------
# Variable type
sVarType = oVarFeat[0]
# Get outcome variable colormap
oVarCM = {}
if sVarColormap in self.oVarColorMap:
oVarCM['colormap'] = self.__getColorMap(self.oVarColorMap[sVarColormap])
else:
oVarCM['colormap'] = None
# Get default variable attributes
oVarAttrs = oVarConventions_Default[sVarType]
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check variable data availability
if oVarData[sVarKey][sVarName]:
# -------------------------------------------------------------------------------------
# Check file tag in file definition(s)
if sVarFile in oVarFramework:
# -------------------------------------------------------------------------------------
# Get filename from file definition(s) using file tag in outcome variable(s)
sVarFileName = defineString(deepcopy(oVarFramework[sVarFile]), oTimeTags)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check file saved on disk
if not exists(sVarFileName):
# -------------------------------------------------------------------------------------
# Info create file
oLogStream.info(' ----> Create file ' + sVarFileName + ' ... ')
# Get file driver (according with filename extensions
[oFileDriver, sFileUnzip, _] = selectFileDriver(sVarFileName, sZipExt_Default)
# Open file outcome
oFileData = oFileDriver.oFileLibrary.openFile(sFileUnzip, 'w')
# Write file attributes
oFileDriver.oFileLibrary.writeFileAttrs(oFileData, oFileGeneralInfo)
# Write geo system information
oFileDriver.oFileLibrary.writeGeoSystem(oFileData, oFileGeoSystemInfo)
# Write X, Y, time, nsim, ntime and nens
oFileDriver.oFileLibrary.writeDims(oFileData, 'X', oVarData.iCols)
oFileDriver.oFileLibrary.writeDims(oFileData, 'Y', oVarData.iRows)
oFileDriver.oFileLibrary.writeDims(oFileData, 'time', oVarData.iTime)
oFileDriver.oFileLibrary.writeDims(oFileData, 'nsim', 1)
oFileDriver.oFileLibrary.writeDims(oFileData, 'ntime', 2)
oFileDriver.oFileLibrary.writeDims(oFileData, 'nens', 1)
# Get file dimension(s)
oFileDims = oFileDriver.oFileLibrary.getDims(oFileData)
# Write time information
oFileDriver.oFileLibrary.writeTime(oFileData, 'time', oVarData.oDataTime, 'float64', 'time',
sTimeFormat_Default, sTimeCalendar_Default,
sTimeUnits_Default)
# Write longitude information
sVarNameX = 'longitude'
a2dVarDataX = oDataGeo.a2dGeoX
oVarAttrsX = oVarConventions_Default[sVarNameX]
sVarFormatX = oVarConventions_Default[sVarNameX]['Format']
oFileDriver.oFileLibrary.write2DVar(oFileData, sVarNameX,
a2dVarDataX, oVarAttrsX, sVarFormatX,
sVarDimY=oFileDims['Y']['name'],
sVarDimX=oFileDims['X']['name'])
# Write latitude information
sVarNameY = 'latitude'
a2dVarDataY = oDataGeo.a2dGeoY
oVarAttrsY = oVarConventions_Default[sVarNameY]
sVarFormatY = oVarConventions_Default[sVarNameY]['Format']
oFileDriver.oFileLibrary.write2DVar(oFileData, sVarNameY,
a2dVarDataY, oVarAttrsY, sVarFormatY,
sVarDimY=oFileDims['Y']['name'],
sVarDimX=oFileDims['X']['name'])
# Info create file
oLogStream.info(' ----> Create file ' + sVarFileName + ' ... OK')
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Info get file
oLogStream.info(' ----> Get file ' + sVarFileName + ' previously created ... ')
# Get file driver (according with filename extensions
[oFileDriver, sFileUnzip, _] = selectFileDriver(sVarFileName, sZipExt_Default)
# Open file outcome
oFileData = oFileDriver.oFileLibrary.openFile(sFileUnzip, 'a')
# Get file dimension(s)
oFileDims = oFileDriver.oFileLibrary.getDims(oFileData)
# Info get file
oLogStream.info(' ----> Get file ' + sVarFileName + ' previously created ... OK')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Info start saving variable
oLogStream.info(' -----> Save product variable ' + sVarName + ' ... ')
# Check variable in file handle
if oFileDriver.oFileLibrary.checkVarName(oFileData, sVarName) is False:
# -------------------------------------------------------------------------------------
# Get file dimensions
sVarDimX = oFileDims['X']['name']
sVarDimY = oFileDims['Y']['name']
sVarDimT = oFileDims['time']['name']
# Get var structure
oVarStruct = oVarData[sVarKey][sVarName]
# Define var attribute(s)
oVarAttrs = updateDictStructure(oVarAttrs, oVarStruct['attributes'])
oVarAttrs = updateDictStructure(oVarAttrs,
selectVarAttributes(oVarDef['attributes'], iVarID))
oVarAttrs = updateDictStructure(oVarAttrs, oVarCM)
# Get variable data
oVarResults = oVarStruct['results']
# Get variable format
if 'Format' in oVarStruct['attributes']:
sVarFormat = oVarStruct['attributes']['Format']
else:
sVarFormat = 'f4'
# Check and get library write method
if hasattr(oFileDriver.oFileLibrary, sVarMethod):
# Get write method
oVarMethod = getattr(oFileDriver.oFileLibrary, sVarMethod)
# Store variable (2d and 3d dimensions)
if sVarType == 'var2d':
oVarMethod(oFileData, sVarName, oVarResults, oVarAttrs, sVarFormat,
sVarDimY=sVarDimY, sVarDimX=sVarDimX)
elif sVarType == 'var3d':
oVarMethod(oFileData, sVarName, oVarResults, oVarAttrs, sVarFormat,
sVarDimT=sVarDimT, sVarDimY=sVarDimY, sVarDimX=sVarDimX)
# Info end saving variable
oLogStream.info(' -----> Save product variable ' + sVarName + ' ... OK ')
else:
# Exit without saving variable
oLogStream.info(' -----> Save product variable ' + sVarName + ' ... FAILED ')
Exc.getExc(' =====> WARNING: selected method is not available in io library', 2, 1)
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Info skip saving variable
oLogStream.info(' -----> Save product variable ' + sVarName + ' ... SKIPPED ')
Exc.getExc(' =====> WARNING: variable is already saved in selected file', 2, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Info start closing and zipping file
oLogStream.info(' ----> Close and zip file ' + sVarFileName + ' ... ')
# Close file
oFileDriver.oFileLibrary.closeFile(oFileData)
# Zip file
zipFileData(sFileUnzip, sZipExt_Default)
# Info end closing and zipping file
oLogStream.info(' ----> Close and zip file ' + sVarFileName + ' ... OK ')
# Info end saving variable
oLogStream.info(' ---> Save algorithm variable ' + sVarKey + ' ... OK ')
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit without saving variable
oLogStream.info(' ---> Save algorithm variable ' + sVarKey + ' ... FAILED')
Exc.getExc(' =====> WARNING: variable is not declared in file', 2, 1)
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit without saving variable
oLogStream.info(' ---> Save algorithm variable ' + sVarKey + ' ... FAILED')
Exc.getExc(' =====> WARNING: variable is null', 2, 1)
# -------------------------------------------------------------------------------------
# End iterations of variables
# -------------------------------------------------------------------------------------
else:
# -------------------------------------------------------------------------------------
# Exit without saving variable
oLogStream.info(' ---> Save algorithm variable ' + sVarKey + ' ... FAILED')
Exc.getExc(' =====> WARNING: variable data is undefined', 2, 1)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Active recursive method
if self.bVarSubSet:
self.saveDataProduct(oDataGeo, self.subsetData(oData_Dynamic=self.oVarData, iData_Index=iVarIndex))
else:
return None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Class to build data product
class DataProductBuilder:
# -------------------------------------------------------------------------------------
# Class declaration(s)
oVarData = DataObj()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to initialize class
def __init__(self, **kwargs):
self.oVarTime = kwargs['time']
self.oVarDef = kwargs['settings']['variables']['input']
self.oVarFramework = {'rain_data': kwargs['rain_file']}
self.__defineVar()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define data variable
def __defineVar(self):
# -------------------------------------------------------------------------------------
# Define variable(s) workspace by conventions and defined input field(s)
for sVarKey, oVarValue in self.oVarDef.items():
self.oVarData[sVarKey] = {}
oVarID = oVarValue['id']['var_type'][0]
if isinstance(oVarID, list):
sVarID = oVarID[0]
else:
sVarID = oVarID
if 'attributes' in oVarValue:
oVarAttrs = oVarValue['attributes']
for sAttrKey, oAttrValue in oVarAttrs.items():
if isinstance(oAttrValue, str):
if sAttrKey in oVarConventions_Default[sVarID].keys():
self.oVarData[sVarKey][sAttrKey] = {}
self.oVarData[sVarKey][sAttrKey] = deepcopy(oVarConventions_Default[sVarID][sAttrKey])
elif isinstance(oAttrValue, list):
if sAttrKey in oVarConventions_Default[sVarID].keys():
self.oVarData[sVarKey][sAttrKey] = {}
self.oVarData[sVarKey][sAttrKey] = deepcopy(oVarConventions_Default[sVarID][sAttrKey])
# Update variable workspace
for sVarKey, oVarValue in self.oVarDef.items():
oVarID = oVarValue['id']['var_type'][0]
if isinstance(oVarID, list):
sVarID = oVarID[0]
else:
sVarID = oVarID
for sAttrKey, oAttrValue in oVarConventions_Default[sVarID].items():
self.oVarData[sVarKey][sAttrKey] = {}
self.oVarData[sVarKey][sAttrKey] = oAttrValue
if 'attributes' in oVarValue:
oVarAttrs = oVarValue['attributes']
for sAttrKey, oAttrValue in oVarAttrs.items():
self.oVarData[sVarKey][sAttrKey] = {}
self.oVarData[sVarKey][sAttrKey] = oAttrValue
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to get data
def getDataProduct(self, a1dGeoBox):
# -------------------------------------------------------------------------------------
# Iterate over file variable(s)
oVarWS = {}
for sVarKey, oVarDef in self.oVarDef.items():
# -------------------------------------------------------------------------------------
# Get input variable information
oVarType = oVarDef['id']['var_type']
oVarName = oVarDef['id']['var_name']
oVarFile = oVarDef['id']['var_file']
oVarMethod = oVarDef['id']['var_method_get']
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check file tag in file definition(s)
oVarGet = {}
if sVarKey in self.oVarFramework:
# -------------------------------------------------------------------------------------
# Iterate over time(s)
oVarWS[sVarKey] = {}
for iVarTime, sVarTime in enumerate(self.oVarTime):
# -------------------------------------------------------------------------------------
# Define time tags
oTimeTags = {'$yyyy': sVarTime[0:4],
'$mm': sVarTime[4:6], '$dd': sVarTime[6:8], '$HH': sVarTime[8:10],
'$MM': sVarTime[10:12]}
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Iterate over declared variable(s)
for iVarID, (sVarType, sVarName, sVarFile, sVarMethod) in enumerate(zip(
oVarType, oVarName, oVarFile, oVarMethod)):
# -------------------------------------------------------------------------------------
# Get filename from file definition(s) using file tag in outcome variable(s)
sVarFileName = defineString(deepcopy(self.oVarFramework[sVarFile]), oTimeTags)
# Info start about selected file
oLogStream.info(' ---> Get file: ' + sVarFileName + ' (Time: ' + sVarTime + ') ... ')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check file saved on disk
if exists(sVarFileName):
# -------------------------------------------------------------------------------------
# Get data
[oFileHandle, oFileDriver, bFileOpen] = handleFileData(sVarFileName)
oVarAttribute = selectVarAttributes(self.oVarData[sVarKey], iVarID)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Check file opening
if bFileOpen is True:
# -------------------------------------------------------------------------------------
# Info start about getting variable
oLogStream.info(' ----> Algorithm variable: ' +
sVarKey + ' - Product variable: ' +
sVarName + ' ...')
# Init variable workspace
if sVarName not in oVarGet:
oVarGet[sVarName] = {}
oVarGet[sVarName]['values'] = None
oVarGet[sVarName]['longitude'] = None
oVarGet[sVarName]['latitude'] = None
oVarGet[sVarName]['parameters'] = None
oVarGet[sVarName]['attributes'] = None
oVarGet[sVarName]['times'] = None
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Get variable data and attributes
if 'Missing_value' in oVarAttribute:
dVarMissingValue = oVarAttribute['Missing_value']
else:
dVarMissingValue = -9999
assert sVarName in oFileHandle.variables
assert dVarMissingValue == 999.0
a1oVarData = oFileHandle[sVarName][:][0, :, :].ravel()
a1oVarGeoX = oFileHandle['longitude'][:].ravel()
a1oVarGeoY = oFileHandle['latitude'][:].ravel()
if ma.is_masked(a1oVarData):
a1dVarData = a1oVarData.data
else:
a1dVarData = a1oVarData
if ma.is_masked(a1oVarGeoX):
a1dVarGeoX = a1oVarGeoX.data
else:
a1dVarGeoX = a1oVarGeoX
if ma.is_masked(a1oVarGeoY):
a1dVarGeoY = a1oVarGeoY.data
else:
a1dVarGeoY = a1oVarGeoY
a1iVarData_NoData = where(a1dVarData >= dVarMissingValue)[0]
a1iVarGeoX_NoData = where(a1dVarGeoX == dVarMissingValue)[0]
a1iVarGeoY_NoData =
|
where(a1dVarGeoY == dVarMissingValue)
|
numpy.where
|
import unittest
import os.path
import numpy as np
from astropy.time import Time
from astropy.coordinates import ICRS, AltAz
import astropy.units as u
import astropy.io
from desisurvey.test.base import Tester, read_horizons_moon_ephem
from desisurvey.ephem import get_ephem, get_grid, get_object_interpolator
from desisurvey.utils import get_location
class TestEphemerides(Tester):
@classmethod
def setUpClass(cls):
super(TestEphemerides, cls).setUpClass()
# Read moon ephemerides for the first week of 2020.
cls.table = read_horizons_moon_ephem()
def test_get_ephem(self):
"""Test memory and disk caching"""
# Create and save to memory only
id1 = id(get_ephem(write_cache=False))
# Load from memory
id2 = id(get_ephem())
self.assertEqual(id1, id2)
# Save to disk.
id3 = id(get_ephem())
self.assertEqual(id1, id3)
# Clear memory cache.
_ephem = None
# Read from disk.
id4 = id(get_ephem())
self.assertEqual(id1, id4)
def test_ephem_table(self):
"""Test basic table structure"""
ephem = get_ephem()
self.assertEqual(ephem.num_nights, (ephem.stop_date - ephem.start_date).days)
self.assertEqual(id(ephem._table), id(ephem.table))
etable = ephem._table
self.assertEqual(len(etable), 59)
self.assertTrue(np.all(etable['dusk'] > etable['noon']))
self.assertTrue(np.all(etable['dawn'] > etable['dusk']))
self.assertTrue(
|
np.all(etable['dusk'] > etable['brightdusk'])
|
numpy.all
|
####-Changed-#### # noqa
# The LanguageModelSampler was moved here from its original place in anchor_text.py. This is in order to scope the
# tensorflow import.
#################
import string
from functools import partial
####-Changed-#### # noqa
# -from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple,
# - Type, Union)
from typing import (Dict, List, Optional, Tuple, Type)
#################
import numpy as np
import tensorflow as tf
from alibi.utils.lang_model import LanguageModel
####-Changed-#### # noqa
from alibi.explainers.anchors.text_samplers import AnchorTextSampler
#################
class LanguageModelSampler(AnchorTextSampler):
# filling procedures
FILLING_PARALLEL: str = 'parallel' #: Parallel filling procedure.
FILLING_AUTOREGRESSIVE = 'autoregressive' #: Autoregressive filling procedure. Considerably slow.
def __init__(self, model: LanguageModel, perturb_opts: dict, ):
"""
Initialize language model sampler. This sampler replaces words with the ones
sampled according to the output distribution of the language model. There are
two modes to use the sampler: ``'parallel'`` and ``'autoregressive'``. In the ``'parallel'``
mode, all words are replaced simultaneously. In the ``'autoregressive'`` model, the words
are replaced one by one, starting from left to right. Thus the following words
are conditioned on the previous predicted words.
Parameters
----------
model
Transformers masked language model.
perturb_opts
Perturbation options.
"""
super().__init__()
# set language model and perturbation options
self.model = model
self.perturb_opts = perturb_opts
# Define language model's vocab
vocab: Dict[str, int] = self.model.tokenizer.get_vocab()
# Define masking sampling tensor. This tensor is used to avoid sampling
# certain tokens from the vocabulary such as: subwords, punctuation, etc.
self.subwords_mask = np.zeros(len(vocab.keys()), dtype=np.bool_)
for token in vocab:
# Add subwords in the sampling mask. This means that subwords
# will not be considered when sampling for the masked words.
if self.model.is_subword_prefix(token):
self.subwords_mask[vocab[token]] = True
continue
# Add punctuation in the sampling mask. This means that the
# punctuation will not be considered when sampling for the masked words.
sample_punctuation: bool = perturb_opts.get('sample_punctuation', False)
punctuation: str = perturb_opts.get('punctuation', string.punctuation)
if (not sample_punctuation) and self.model.is_punctuation(token, punctuation):
self.subwords_mask[vocab[token]] = True
# define head, tail part of the text
self.head, self.tail = '', '' # type: str, str
self.head_tokens, self.tail_tokens = [], [] # type: List[str], List[str]
def get_sample_ids(self,
punctuation: str = string.punctuation,
stopwords: Optional[List[str]] = None,
**kwargs) -> None:
"""
Find indices in words which can be perturbed.
Parameters
----------
punctuation
String of punctuation characters.
stopwords
List of stopwords.
**kwargs
Other arguments. Not used.
"""
# transform stopwords to lowercase
if stopwords:
stopwords = [w.lower().strip() for w in stopwords]
# Initialize list of indices allowed to be perturbed
ids_sample = list(np.arange(len(self.head_tokens)))
# Define partial function for stopwords checking
is_stop_word = partial(
self.model.is_stop_word,
tokenized_text=self.head_tokens,
punctuation=punctuation,
stopwords=stopwords
)
# lambda expressions to check for a subword
subword_cond = lambda token, idx: self.model.is_subword_prefix(token) # noqa: E731
# lambda experssion to check for a stopword
stopwords_cond = lambda token, idx: is_stop_word(start_idx=idx) # noqa: E731
# lambda expression to check for punctuation
punctuation_cond = lambda token, idx: self.model.is_punctuation(token, punctuation) # noqa: E731
# Gather all in a list of conditions
conds = [punctuation_cond, stopwords_cond, subword_cond]
# Remove indices of the tokens that are not allowed to be masked
for i, token in enumerate(self.head_tokens):
if any([cond(token, i) for cond in conds]):
ids_sample.remove(i)
# Save the indices allowed to be masked and the corresponding mapping.
# The anchor base algorithm alters indices one by one. By saving the mapping
# and sending only the initial token of a word, we avoid unnecessary sampling.
# E.g. word = token1 ##token2. Instead of trying two anchors (1 0), (1, 1) - which are
# equivalent because we take the full word, just try one (1)
self.ids_sample =
|
np.array(ids_sample)
|
numpy.array
|
"""
Core segmentation functions for SuRVoS Super-region segmentation
"""
import os
import sys
from functools import partial
from typing import List, Optional
import numpy as np
import pandas as pd
import sklearn
from loguru import logger
from sklearn import svm
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.ensemble import (
AdaBoostClassifier,
ExtraTreesClassifier,
GradientBoostingClassifier,
IsolationForest,
RandomForestClassifier,
)
from xgboost import XGBClassifier
from sklearn.kernel_approximation import RBFSampler
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.random_projection import SparseRandomProjection
from sklearn.svm import SVC
from survos2.improc.segmentation._qpbo import solve_aexpansion, solve_binary
from survos2.improc.segmentation.appearance import refine
from survos2.improc.segmentation.mappings import rmeans
from survos2.server.features import features_factory
from survos2.server.model import SRData, SRPrediction
from survos2.server.region_labeling import rlabels
from survos2.server.state import cfg
from survos2.server.supervoxels import invrmap, superregion_factory
PRED_MIN = 0 # label value to use as minimum prediction label
def obtain_classifier(clf_p):
if clf_p["clf"] == "Ensemble":
mode = "ensemble"
if clf_p["type"] == "xgb":
clf = XGBClassifier(
n_estimators=clf_p["n_estimators"],
max_depth=clf_p["max_depth"],
n_jobs=clf_p["n_jobs"],
)
if clf_p["type"] == "rf":
clf = RandomForestClassifier(
n_estimators=clf_p["n_estimators"],
max_depth=clf_p["max_depth"],
n_jobs=clf_p["n_jobs"],
)
elif clf_p["type"] == "erf":
clf = ExtraTreesClassifier(
n_estimators=clf_p["n_estimators"],
max_depth=clf_p["max_depth"],
n_jobs=clf_p["n_jobs"],
)
elif clf_p["type"] == "ada":
clf = AdaBoostClassifier(
n_estimators=clf_p["n_estimators"], learning_rate=clf_p["learning_rate"]
)
else:
clf = GradientBoostingClassifier(
n_estimators=clf_p["n_estimators"],
max_depth=clf_p["max_depth"],
learning_rate=clf_p["learning_rate"],
subsample=clf_p["subsample"],
)
elif clf_p["clf"] == "SVM":
mode = "svm"
clf = SVC(
C=clf_p["C"], gamma=clf_p["gamma"], kernel=clf_p["kernel"], probability=True
)
elif clf_p["clf"] == "sgd":
mode = "sgd"
clf = SGDClassifier(
loss=clf_p["loss"],
penalty=clf_p["penalty"],
alpha=clf_p["alpha"],
n_iter=clf_p["n_iter"],
)
else:
raise Exception("Classifier not supported")
return clf, mode
def train(X_train, y_train, predict_params, project=None, rnd=42, **kwargs):
logger.debug(f"Using projection {project} and predict params {predict_params}")
if project == "rproj":
proj = SparseRandomProjection(n_components=X_train.shape[1], random_state=rnd)
logger.debug(f"Projection is {proj}")
X_train = proj.fit_transform(X_train)
elif project == "std":
proj = StandardScaler()
logger.debug(f"Projection is {proj}")
X_train = proj.fit_transform(X_train)
elif project == "pca":
proj = PCA(n_components="mle", whiten=True, random_state=rnd)
logger.debug(f"Projection is {proj}")
X_train = proj.fit_transform(X_train)
elif project == "rbf":
proj = RBFSampler(n_components=max(X_train.shape[1], 50), random_state=rnd)
logger.debug(f"Projection is {proj}")
X_train = proj.fit_transform(X_train)
else:
proj = None
kwargs.setdefault("random_state", rnd)
clf, mode = obtain_classifier(predict_params)
logger.debug(
f"Obtained classifier {clf}, fitting on {X_train.shape}, {y_train.shape}"
)
clf.fit(X_train, y_train)
return clf, proj
def predict(X, clf, proj=None, label=True, probs=False, log=False):
if proj is not None:
logger.debug(f"Predicting with projection {proj}")
X = proj.transform(X)
result = {}
if probs:
result["probs"] = clf.predict_proba(X)
if log:
result["log_probs"] = clf.predict_log_proba(X)
if label:
result["class"] = clf.predict(X)
return result
def train_and_classify_regions(
features_stack: np.ndarray,
annotation_volume: np.ndarray,
sr: SRData,
mask: Optional[np.ndarray],
superseg_cfg: dict,
):
"""Prepare superregions and predict
Arguments:
features_stack {stacked image volumes} -- Stack of filtered volumes to use for features
annotation_volume {image volume} -- Annotation volume (label image)
sr {image volume} -- Prepared superregions SRData
Returns:
(volume, volume, volume) -- tuple of raw prediction, prediction mapped to labels and confidence map
"""
logger.debug(f"Feature stack: {features_stack.shape}")
logger.debug(f"Using annotation volume of shape {annotation_volume.shape}")
Yr = rlabels(
annotation_volume.astype(np.uint16), sr.supervoxel_vol.astype(np.uint32)
) # unsigned char and unsigned int required
logger.debug(f"Unique labels in anno: {
|
np.unique(annotation_volume)
|
numpy.unique
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 14:55:02 2021
@author: <NAME>
Copyright 2021 <NAME>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
import numpy as np
from glob import glob
import pandas as pd
import vtk
from vtk.util import numpy_support
from vtk.numpy_interface import dataset_adapter as dsa
import datetime
from sklearn.cluster import KMeans
import argparse
from scipy.spatial import cKDTree
vtk_version = vtk.vtkVersion.GetVTKSourceVersion().split()[-1].split('.')[0]
class Ring:
def __init__(self, index, name, points_num, center_point, distance, polydata):
self.id = index
self.name = name
self.np = points_num
self.center = center_point
self.ap_dist = distance
self.vtk_polydata = polydata
def parser():
parser = argparse.ArgumentParser(description='Generate boundaries.')
parser.add_argument('--mesh',
type=str,
default="",
help='path to meshname')
parser.add_argument('--LAA',
type=str,
default="",
help='LAA apex point index, leave empty if no LA')
parser.add_argument('--RAA',
type=str,
default="",
help='RAA apex point index, leave empty if no RA')
parser.add_argument('--LAA_base',
type=str,
default="",
help='LAA basis point index, leave empty if no LA')
parser.add_argument('--RAA_base',
type=str,
default="",
help='RAA basis point index, leave empty if no RA')
return parser
def smart_reader(path):
extension = str(path).split(".")[-1]
if extension == "vtk":
data_checker = vtk.vtkDataSetReader()
data_checker.SetFileName(str(path))
data_checker.Update()
if data_checker.IsFilePolyData():
reader = vtk.vtkPolyDataReader()
elif data_checker.IsFileUnstructuredGrid():
reader = vtk.vtkUnstructuredGridReader()
elif extension == "vtp":
reader = vtk.vtkXMLPolyDataReader()
elif extension == "vtu":
reader = vtk.vtkXMLUnstructuredGridReader()
elif extension == "obj":
reader = vtk.vtkOBJReader()
else:
print("No polydata or unstructured grid")
reader.SetFileName(str(path))
reader.Update()
output = reader.GetOutput()
return output
def label_atrial_orifices_TOP_epi_endo(mesh, LAA_id="", RAA_id="", LAA_base_id="", RAA_base_id=""):
"""Extrating Rings"""
print('Extracting rings...')
mesh_surf = smart_reader(mesh)
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputData(mesh_surf)
geo_filter.Update()
mesh_surf = geo_filter.GetOutput()
centroids = dict()
extension = mesh.split('.')[-1]
mesh = mesh[:-(len(extension)+1)]
meshname = mesh.split("/")[-1]
outdir = "{}_surf".format(mesh)
if not os.path.exists(outdir):
os.makedirs(outdir)
fname = glob(outdir+'/ids_*')
for r in fname:
os.remove(r)
if (LAA_id != "" and RAA_id != ""):
LA_ap_point = mesh_surf.GetPoint(int(LAA_id))
RA_ap_point = mesh_surf.GetPoint(int(RAA_id))
centroids["LAA"] = LA_ap_point
centroids["RAA"] = RA_ap_point
if (LAA_base_id != "" and RAA_base_id != ""):
LA_bs_point = mesh_surf.GetPoint(int(LAA_base_id))
RA_bs_point = mesh_surf.GetPoint(int(RAA_base_id))
centroids["LAA_base"] = LA_bs_point
centroids["RAA_base"] = RA_bs_point
connect = vtk.vtkConnectivityFilter()
connect.SetInputConnection(geo_filter.GetOutputPort())
connect.SetExtractionModeToAllRegions()
connect.ColorRegionsOn()
connect.Update()
mesh_conn=connect.GetOutput()
mesh_conn.GetPointData().GetArray("RegionId").SetName("RegionID")
id_vec = numpy_support.vtk_to_numpy(mesh_conn.GetPointData().GetArray("RegionID"))
# It can happen that the connectivity filter changes the ids
loc = vtk.vtkPointLocator()
loc.SetDataSet(mesh_conn)
loc.BuildLocator()
LAA_id = loc.FindClosestPoint(LA_ap_point)
LA_tag = id_vec[int(LAA_id)]
RA_tag = id_vec[int(RAA_id)]
thr = vtk.vtkThreshold()
thr.SetInputData(mesh_conn)
thr.ThresholdBetween(LA_tag,LA_tag)
thr.Update()
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputConnection(thr.GetOutputPort())
geo_filter.Update()
idFilter = vtk.vtkIdFilter()
idFilter.SetInputConnection(geo_filter.GetOutputPort())
if int(vtk_version) >= 9:
idFilter.SetPointIdsArrayName('Ids')
idFilter.SetCellIdsArrayName('Ids')
else:
idFilter.SetIdsArrayName('Ids')
idFilter.Update()
LA = idFilter.GetOutput()
vtkWrite(LA, outdir+'/LA.vtp')
loc = vtk.vtkPointLocator()
loc.SetDataSet(LA)
loc.BuildLocator()
LAA_id = loc.FindClosestPoint(LA_ap_point)
if LAA_base_id != "":
loc = vtk.vtkPointLocator()
loc.SetDataSet(LA)
loc.BuildLocator()
LAA_base_id = loc.FindClosestPoint(LA_bs_point)
b_tag = np.zeros((LA.GetNumberOfPoints(),))
LA_rings = detect_and_mark_rings(LA, LA_ap_point)
b_tag, centroids = mark_LA_rings(LAA_id, LA_rings, b_tag, centroids, outdir, LA)
dataSet = dsa.WrapDataObject(LA)
dataSet.PointData.append(b_tag, 'boundary_tag')
vtkWrite(dataSet.VTKObject, outdir+'/LA_boundaries_tagged.vtp'.format(mesh))
thr.ThresholdBetween(RA_tag,RA_tag)
thr.Update()
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputConnection(thr.GetOutputPort())
geo_filter.Update()
idFilter = vtk.vtkIdFilter()
idFilter.SetInputConnection(geo_filter.GetOutputPort())
if int(vtk_version) >= 9:
idFilter.SetPointIdsArrayName('Ids')
idFilter.SetCellIdsArrayName('Ids')
else:
idFilter.SetIdsArrayName('Ids')
idFilter.Update()
RA = idFilter.GetOutput()
loc = vtk.vtkPointLocator()
loc.SetDataSet(RA)
loc.BuildLocator()
RAA_id = loc.FindClosestPoint(RA_ap_point)
if LAA_base_id != "":
loc = vtk.vtkPointLocator()
loc.SetDataSet(RA)
loc.BuildLocator()
RAA_base_id = loc.FindClosestPoint(RA_bs_point)
vtkWrite(RA, outdir+'/RA.vtp')
b_tag = np.zeros((RA.GetNumberOfPoints(),))
RA_rings = detect_and_mark_rings(RA, RA_ap_point)
b_tag, centroids, RA_rings = mark_RA_rings(RAA_id, RA_rings, b_tag, centroids, outdir)
cutting_plane_to_identify_tv_f_tv_s_epi_endo(mesh, RA, RA_rings, outdir)
dataSet = dsa.WrapDataObject(RA)
dataSet.PointData.append(b_tag, 'boundary_tag')
vtkWrite(dataSet.VTKObject, outdir+'/RA_boundaries_tagged.vtp'.format(mesh))
elif RAA_id == "":
vtkWrite(geo_filter.GetOutput(), outdir+'/LA.vtp'.format(mesh))
LA_ap_point = mesh_surf.GetPoint(int(LAA_id))
centroids["LAA"] = LA_ap_point
idFilter = vtk.vtkIdFilter()
idFilter.SetInputConnection(geo_filter.GetOutputPort())
if int(vtk_version) >= 9:
idFilter.SetPointIdsArrayName('Ids')
idFilter.SetCellIdsArrayName('Ids')
else:
idFilter.SetIdsArrayName('Ids')
idFilter.Update()
LA = idFilter.GetOutput()
LA_rings = detect_and_mark_rings(LA, LA_ap_point)
b_tag = np.zeros((LA.GetNumberOfPoints(),))
b_tag, centroids = mark_LA_rings(LAA_id, LA_rings, b_tag, centroids, outdir, LA)
dataSet = dsa.WrapDataObject(LA)
dataSet.PointData.append(b_tag, 'boundary_tag')
vtkWrite(dataSet.VTKObject, outdir+'/LA_boundaries_tagged.vtp'.format(mesh))
elif LAA_id == "":
vtkWrite(geo_filter.GetOutput(), outdir+'/RA.vtp'.format(mesh))
RA_ap_point = mesh_surf.GetPoint(int(RAA_id))
idFilter = vtk.vtkIdFilter()
idFilter.SetInputConnection(geo_filter.GetOutputPort())
if int(vtk_version) >= 9:
idFilter.SetPointIdsArrayName('Ids')
idFilter.SetCellIdsArrayName('Ids')
else:
idFilter.SetIdsArrayName('Ids')
idFilter.Update()
centroids["RAA"] = RA_ap_point
RA = idFilter.GetOutput()
RA_rings = detect_and_mark_rings(RA, RA_ap_point)
b_tag = np.zeros((RA.GetNumberOfPoints(),))
b_tag, centroids, RA_rings = mark_RA_rings(RAA_id, RA_rings, b_tag, centroids, outdir)
cutting_plane_to_identify_tv_f_tv_s_epi_endo(mesh, RA, RA_rings, outdir)
dataSet = dsa.WrapDataObject(RA)
dataSet.PointData.append(b_tag, 'boundary_tag')
vtkWrite(dataSet.VTKObject, outdir+'/RA_boundaries_tagged.vtp'.format(mesh))
df = pd.DataFrame(centroids)
df.to_csv(outdir+"/rings_centroids.csv", float_format="%.2f", index=False)
def run():
args = parser().parse_args()
label_atrial_orifices_TOP_epi_endo(args.mesh, args.LAA, args.RAA, args.LAA_base, args.RAA_base)
def detect_and_mark_rings(surf, ap_point):
boundaryEdges = vtk.vtkFeatureEdges()
boundaryEdges.SetInputData(surf)
boundaryEdges.BoundaryEdgesOn()
boundaryEdges.FeatureEdgesOff()
boundaryEdges.ManifoldEdgesOff()
boundaryEdges.NonManifoldEdgesOff()
boundaryEdges.Update()
"Splitting rings"
connect = vtk.vtkConnectivityFilter()
connect.SetInputData(boundaryEdges.GetOutput())
connect.SetExtractionModeToAllRegions()
connect.Update()
num = connect.GetNumberOfExtractedRegions()
connect.SetExtractionModeToSpecifiedRegions()
rings = []
for i in range(num):
connect.AddSpecifiedRegion(i)
connect.Update()
surface = connect.GetOutput()
# Clean unused points
geo_filter = vtk.vtkGeometryFilter()
geo_filter.SetInputData(surface)
geo_filter.Update()
surface = geo_filter.GetOutput()
cln = vtk.vtkCleanPolyData()
cln.SetInputData(surface)
cln.Update()
surface = cln.GetOutput()
ring_surf = vtk.vtkPolyData()
ring_surf.DeepCopy(surface)
centerOfMassFilter = vtk.vtkCenterOfMass()
centerOfMassFilter.SetInputData(surface)
centerOfMassFilter.SetUseScalarsAsWeights(False)
centerOfMassFilter.Update()
c_mass = centerOfMassFilter.GetCenter()
ring = Ring(i,"", surface.GetNumberOfPoints(), c_mass, np.sqrt(np.sum((np.array(ap_point)- \
np.array(c_mass))**2, axis=0)), ring_surf)
rings.append(ring)
connect.DeleteSpecifiedRegion(i)
connect.Update()
return rings
def mark_LA_rings(LAA_id, rings, b_tag, centroids, outdir, LA):
rings[np.argmax([r.np for r in rings])].name = "MV"
pvs = [i for i in range(len(rings)) if rings[i].name!="MV"]
estimator = KMeans(n_clusters=2)
estimator.fit([r.center for r in rings if r.name!="MV"])
label_pred = estimator.labels_
min_ap_dist = np.argmin([r.ap_dist for r in [rings[i] for i in pvs]])
label_LPV = label_pred[min_ap_dist]
LPVs = [pvs[i] for i in np.where(label_pred == label_LPV)[0]]
LSPV_id = LPVs.index(pvs[min_ap_dist])
RPVs = [pvs[i] for i in np.where(label_pred != label_LPV)[0]]
cutting_plane_to_identify_UAC(LPVs, RPVs, rings, LA, outdir)
RSPV_id = cutting_plane_to_identify_RSPV(LPVs, RPVs, rings)
RSPV_id = RPVs.index(RSPV_id)
estimator = KMeans(n_clusters=2)
estimator.fit([r.center for r in [rings[i] for i in LPVs]])
LPV_lab = estimator.labels_
LSPVs = [LPVs[i] for i in np.where(LPV_lab == LPV_lab[LSPV_id])[0]]
LIPVs = [LPVs[i] for i in np.where(LPV_lab != LPV_lab[LSPV_id])[0]]
estimator = KMeans(n_clusters=2)
estimator.fit([r.center for r in [rings[i] for i in RPVs]])
RPV_lab = estimator.labels_
RSPVs = [RPVs[i] for i in np.where(RPV_lab == RPV_lab[RSPV_id])[0]]
RIPVs = [RPVs[i] for i in np.where(RPV_lab != RPV_lab[RSPV_id])[0]]
LPV = []
RPV = []
for i in range(len(pvs)):
if pvs[i] in LSPVs:
rings[pvs[i]].name = "LSPV"
elif pvs[i] in LIPVs:
rings[pvs[i]].name = "LIPV"
elif pvs[i] in RIPVs:
rings[pvs[i]].name = "RIPV"
else:
rings[pvs[i]].name = "RSPV"
for r in rings:
id_vec = numpy_support.vtk_to_numpy(r.vtk_polydata.GetPointData().GetArray("Ids"))
fname = outdir+'/ids_{}.vtx'.format(r.name)
if os.path.exists(fname):
f = open(fname, 'a')
else:
f = open(fname, 'w')
f.write('{}\n'.format(len(id_vec)))
f.write('extra\n')
if r.name == "MV":
b_tag[id_vec] = 1
elif r.name == "LIPV":
b_tag[id_vec] = 2
LPV = LPV + list(id_vec)
elif r.name == "LSPV":
b_tag[id_vec] = 3
LPV = LPV + list(id_vec)
elif r.name == "RIPV":
b_tag[id_vec] = 4
RPV = RPV + list(id_vec)
elif r.name == "RSPV":
b_tag[id_vec] = 5
RPV = RPV + list(id_vec)
for i in id_vec:
f.write('{}\n'.format(i))
f.close()
centroids[r.name] = r.center
fname = outdir+'/ids_LAA.vtx'
f = open(fname, 'w')
f.write('{}\n'.format(1))
f.write('extra\n')
f.write('{}\n'.format(LAA_id))
f.close()
fname = outdir+'/ids_LPV.vtx'
f = open(fname, 'w')
f.write('{}\n'.format(len(LPV)))
f.write('extra\n')
for i in LPV:
f.write('{}\n'.format(i))
f.close()
fname = outdir+'/ids_RPV.vtx'
f = open(fname, 'w')
f.write('{}\n'.format(len(RPV)))
f.write('extra\n')
for i in RPV:
f.write('{}\n'.format(i))
f.close()
return b_tag, centroids
def mark_RA_rings(RAA_id, rings, b_tag, centroids, outdir):
rings[np.argmax([r.np for r in rings])].name = "TV"
other = [i for i in range(len(rings)) if rings[i].name!="TV"]
estimator = KMeans(n_clusters=2)
estimator.fit([r.center for r in rings if r.name!="TV"])
label_pred = estimator.labels_
min_ap_dist =
|
np.argmin([r.ap_dist for r in [rings[i] for i in other]])
|
numpy.argmin
|
import math
import numpy as np
import torch
"""This file contains a few tools as functions that others might be useful when
analysing snapshots. It less well tested or clean than the rest of
astrodynamo."""
def patternspeed(snap, rrange=(1, 4), n=range(2, 12, 2), combine=True,
plot=None):
"""Compute the pattern speed of a snapshot. Uses continuity equation in cylindrical coordinates. Integrating over
all z and taking fourier transform in angular direction gives i m R F(Sigma) = d/dR{F(Sigma vR)} + i m F(sigma vT)
where F(y) is the angular fourier transform of y. This provides an equation at each radius and m for Sigma. By
default combine m=2,4,6,8,10 over the radial range 1->4 into one measurement."""
rcyl = torch.norm(snap.positions[:, 0:2], dim=-1)
vr = torch.einsum('...i,...i->...', snap.positions[:, 0:2],
snap.velocities[:, 0:2]) / rcyl
vt = (snap.positions[:, 1] * snap.velocities[:, 0] -
snap.positions[:, 0] * snap.velocities[:, 1]) / rcyl
rbins = np.linspace(0., 10, 50)
rmid, (surfdensft, surfdensrvrft, surfdensvtft) = bar_cyl_fft(snap,
rbins=rbins,
weights=(None,
vr * rcyl,
vt))
dr = rbins[1] - rbins[0]
dsurfdensrvr_drft = np.zeros_like(surfdensrvrft)
dsurfdensrvr_drft[0, :] = (surfdensrvrft[1, :] - surfdensrvrft[0, :]) / dr
dsurfdensrvr_drft[-1, :] = (surfdensrvrft[-1, :] - surfdensrvrft[-2, :]) / dr
dsurfdensrvr_drft[1:-1, :] = (surfdensrvrft[2:, :] - surfdensrvrft[:-2]) / (
2 * dr)
omegas = []
omegaerrs = []
idx_good_r = (rmid > rrange[0]) & (rmid < rrange[1])
for i in n:
omega = surfdensvtft[:, i] / surfdensft[:, i] / rmid + \
1j * dsurfdensrvr_drft[:, i] / surfdensft[:, i] / i / rmid
omegas.append(np.abs(np.mean(omega[idx_good_r])))
omegaerrs.append(
np.std(np.abs(omega[idx_good_r])) / np.sqrt(np.sum(idx_good_r)))
omegas, omegaerrs = np.array(omegas), np.array(omegaerrs)
# combine by weighted mean
omega = np.sum(omegas / omegaerrs ** 2) / np.sum(1 / omegaerrs ** 2)
omegaerr = 1 / np.sqrt(np.sum(1 / omegaerrs ** 2))
if plot is not None:
omega, omegaerr = np.sum(omegas / omegaerrs ** 2) / np.sum(
1 / omegaerrs ** 2), 1 / np.sqrt(
np.sum(1 / omegaerrs ** 2))
plot.errorbar(np.arange(2, 12, 2), y=omegas, yerr=omegaerrs, fmt='ko',
markersize=3)
plot.axhline(y=omega, color='r')
plot.axhspan(ymin=omega - omegaerr, ymax=omega + omegaerr, color='r',
alpha=0.2)
plot.set_ylim([omega - 10 * omegaerr, omega + 10 * omegaerr])
plot.set_ylabel(r'$\Omega$')
plot.set_xlabel('Mode $m$')
if not combine:
return omegas, omegaerrs
else:
return omega, omegaerr
def bar_cyl_fft(snap, rbins=None, phibins=None, weights=(None,)):
"""Takes fft in the phi direction in each radial bin. Bins in the fi direction using phibins. Returns list of ffts
each weighted by respective weights (None corresponds to by mass.)"""
if rbins is None:
rbins = np.linspace(0., 10, 50)
if phibins is None:
phibins = np.linspace(-math.pi, math.pi, 361)
rcyl = torch.norm(snap.positions[:, 0:2], dim=-1)
phi = torch.atan2(snap.positions[:, 1], snap.positions[:, 0])
ft_out = []
for weight in weights:
if weight is None:
totalweight = snap.masses
else:
totalweight = snap.masses * weight
h, redges, phiedges = np.histogram2d(rcyl.detach().cpu().numpy(),
phi.detach().cpu().numpy(),
(rbins, phibins),
weights=totalweight.detach().cpu().numpy())
area = 0.5 * (redges[1:, np.newaxis] ** 2 - redges[:-1, np.newaxis] ** 2) * \
(phiedges[np.newaxis, 1:] - phiedges[np.newaxis, :-1])
surfdens = h / area
ft_out += [np.fft.fft(surfdens, axis=1)]
if len(ft_out) == 1:
ft_out = ft_out[0]
rmid = 0.5 * (redges[:-1] + redges[1:])
return rmid, ft_out
def compute_bar_angle(snap, max_r=5, deg=True):
"""Computes the bar angle of a snapshot from angle of the m=2 mode at its maximum"""
r, surfdensft = bar_cyl_fft(snap)
gd_i = (r < max_r)
m2 = np.abs(surfdensft[gd_i, 2]) / np.abs(surfdensft[gd_i, 0])
ifid = np.argmax(m2)
bar_angle = -0.5 * np.angle(surfdensft[gd_i, 2][ifid], deg=deg)
return bar_angle
def align_bar(snap, snaps=None, max_r=5):
"""Rotates the bar so that it is aligned to the x-axis. Specifically the m=2 mode is rotated to lie along the x-axis
at its maximum. If snaps argument is supplied then these snapshots are rotated by the same angle as found for the
first snap."""
bar_angle = compute_bar_angle(snap, max_r=max_r, deg=False)
_ = snap.rotate_snap([-bar_angle], snap.positions, snap.velocities,
deg=False,
inplace=True)
if snaps is not None:
for snap in snaps:
_ = snap.rotate_snap([-bar_angle], snap.positions, snap.velocities,
deg=False,
inplace=True)
def barlen(snap, phaselim=None, fractional_m2=None, max_r=7):
"""Computes the bar length of a snapshot using either the point where the m=2 mode twists by phaselim,
or the power in m=2/m=0 drops by fractional_m2 of its maximum."""
barlens = ()
rmid, surfdensft = bar_cyl_fft(snap)
gd_i = (rmid < max_r)
m2 =
|
np.abs(surfdensft[gd_i, 2])
|
numpy.abs
|
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_almost_equal
from skimage import data
from skimage._shared._warnings import expected_warnings
from skimage.metrics import (peak_signal_noise_ratio, normalized_root_mse,
mean_squared_error, normalized_mutual_information)
np.random.seed(5)
cam = data.camera()
sigma = 20.0
cam_noisy = np.clip(cam + sigma * np.random.randn(*cam.shape), 0, 255)
cam_noisy = cam_noisy.astype(cam.dtype)
def test_PSNR_vs_IPOL():
""" Tests vs. imdiff result from the following IPOL article and code:
https://www.ipol.im/pub/art/2011/g_lmii/.
Notes
-----
To generate p_IPOL, we need a local copy of cam_noisy:
>>> from skimage import io
>>> io.imsave('/tmp/cam_noisy.png', cam_noisy)
Then, we use the following command:
$ ./imdiff -m psnr <path to camera.png>/camera.png /tmp/cam_noisy.png
Values for current data.camera() calculated by <NAME> on Sep, 2020.
Available at:
https://github.com/scikit-image/scikit-image/pull/4913#issuecomment-700653165
"""
p_IPOL = 22.409353363576034
p = peak_signal_noise_ratio(cam, cam_noisy)
assert_almost_equal(p, p_IPOL, decimal=4)
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_PSNR_float(dtype):
p_uint8 = peak_signal_noise_ratio(cam, cam_noisy)
camf = (cam / 255.).astype(dtype, copy=False)
camf_noisy = (cam_noisy / 255.).astype(dtype, copy=False)
p_float64 = peak_signal_noise_ratio(camf, camf_noisy, data_range=1)
assert p_float64.dtype == np.float64
decimal = 3 if dtype == np.float16 else 5
assert_almost_equal(p_uint8, p_float64, decimal=decimal)
# mixed precision inputs
p_mixed = peak_signal_noise_ratio(cam / 255., np.float32(cam_noisy / 255.),
data_range=1)
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
# mismatched dtype results in a warning if data_range is unspecified
with expected_warnings(['Inputs have mismatched dtype']):
p_mixed = peak_signal_noise_ratio(cam / 255.,
np.float32(cam_noisy / 255.))
assert_almost_equal(p_mixed, p_float64, decimal=decimal)
def test_PSNR_errors():
# shape mismatch
with pytest.raises(ValueError):
peak_signal_noise_ratio(cam, cam[:-1, :])
@pytest.mark.parametrize('dtype', [np.float16, np.float32, np.float64])
def test_NRMSE(dtype):
x =
|
np.ones(4, dtype=dtype)
|
numpy.ones
|
"""
Absorption-only Reconstruction,
comparison with other methods (ASTRA + Mitsuba)
Command lines:
Skull: python3 compare_reconstruction.py results/volume/density/skull7absorption config-files/skull7absorption.json --views 64 --diffdvrL1 --visCropSlice 73:2:64:96 --visCropRendering 62:250:192:128 --visRenderingDiffScaling 20 --visSliceDiffScaling 5 --visSliceRotate 3
Plume: python3 compare_reconstruction.py results/volume/density/plume123absorption config-files/plume123-linear-absorption.json --views 64 --diffdvrL1 --visCropSlice 95:125:96:64 --visCropRendering 90:30:192:128 --visRenderingDiffScaling 20 --visSliceDiffScaling 5 --visSliceRotate 2
Thorax: python3 compare_reconstruction.py results/volume/density/thorax2absorption config-files/thorax2absorption.json --views 64 --diffdvrL1 --visSliceIndividualNormalize --visCropSlice 104:37:96:64 --visCropRendering 30:215:192:128 --visRenderingDiffScaling 20 --visSliceDiffScaling 5 --visSliceRotate 0
First run with -amd for the reconstruction,
then run Mitsuba separately,
then visualize the result with -v .
"""
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
import argparse
import imageio
from typing import Optional
import subprocess
import time
import tqdm
import matplotlib.pyplot as plt
import matplotlib.cm
import matplotlib.colors
from collections import defaultdict
from diffdvr import Renderer, CameraOnASphere, Settings, setup_default_settings, \
fibonacci_sphere, renderer_dtype_torch, renderer_dtype_np, VolumeDensities, \
TfTexture, SmoothnessPrior, toCHW, VolumePreshaded, cvector_to_numpy
from losses.lossbuilder import LossBuilder
import pyrenderer
def _lerp(x, a, b):
return a + x * (b-a)
def _init_parser(parser: argparse.ArgumentParser):
parser.add_argument('outputFolder', type=str, help="Output folder with results and intermediate data")
# dataset
parser.add_argument('settingsFile', type=str, help="Settings .json file")
parser.add_argument('-a', action='store_true', help="Run ASTRA reconstruction")
parser.add_argument('-m', action='store_true', help="Run/prepare Mitsuba reconstruction")
parser.add_argument('-d', action='store_true', help="Run DiffDvr reconstruction")
parser.add_argument('-v', action='store_true', help="Visualize results")
parser.add_argument('-r', action='store_true', help="Render and save all reference images")
parser.add_argument('--views', default=8, type=int, help="Number of views")
parser.add_argument('--imgres', default=512, type=int, help="Image resolution")
parser.add_argument('--astraIterations', default=500, type=int)
parser.add_argument('--diffdvrIterations', default=10, type=int)
parser.add_argument('--diffdvrLastIterations', default=50, type=int)
parser.add_argument('--diffdvrMultiscale', default=32, type=int, help="start resolution for multiscale")
parser.add_argument('--diffdvrL1', action='store_true')
parser.add_argument('--diffdvrL2', action='store_true')
parser.add_argument('--diffdvrPriorSmoothing', default=0.5, type=float)
parser.add_argument('--diffdvrOptimizer', default='Adam', type=str)
parser.add_argument('--diffdvrLR', default=0.3, type=float)
parser.add_argument('--diffdvrBatches', default=8, type=int)
parser.add_argument('--seed', default=124, type=int)
# for visualization (has no impact on the reconstruction / statistics)
parser.add_argument('--visSliceIndividualNormalize', action='store_true',
help="If specified, each slice is normalized individually by max absorption instead of the whole volume")
parser.add_argument('--visCropSlice', default=None, type=str,
help="Format 'x:y:w:h', specifies crops for the slice images")
parser.add_argument('--visCropRendering', default=None, type=str,
help="Format 'x:y:w:h', specifies crops for the rendered images")
parser.add_argument('--visCropSliceThickness', default=2, type=int)
parser.add_argument('--visCropRenderingThickness', default=4, type=int)
parser.add_argument('--visSliceDiffScaling', default=None, type=float,
help="Scaling for the slice difference images. Default=None: normalize to the range")
parser.add_argument('--visRenderingDiffScaling', default=None, type=float,
help="Scaling for the rendering difference images. Default=None: normalize to the range")
parser.add_argument('--visSliceRotate', default=0, type=int,
help="Number of times the slice image is rotated by 90°")
def _prepare_volume(settings_file: str, views: int):
# settings
s = Settings(settings_file)
reference_volume = s.load_dataset()
reference_volume_data = reference_volume.getDataCpu(0)
device = reference_volume_data.device
world_size = cvector_to_numpy(reference_volume.world_size)
print("world size:", world_size)
# camera
camera_config = s.get_camera()
camera_pitch_cpu, camera_yaw_cpu = fibonacci_sphere(views, dtype=renderer_dtype_np)
camera_distance_cpu = camera_config.distance * np.ones((views,), dtype=renderer_dtype_np)
camera_center_cpu = np.stack([camera_config.center] * views, axis=0).astype(dtype=renderer_dtype_np)
camera_fov_radians = camera_config.fov_y_radians
camera_module = CameraOnASphere(camera_config.orientation)
cameras = camera_module(
torch.from_numpy(camera_center_cpu).to(device=device),
torch.from_numpy(camera_yaw_cpu).to(device=device).unsqueeze(1),
torch.from_numpy(camera_pitch_cpu).to(device=device).unsqueeze(1),
torch.from_numpy(camera_distance_cpu).to(device=device).unsqueeze(1))
# reference camera
reference_cameras = camera_module(
torch.from_numpy(camera_center_cpu[:1]).to(device=device),
camera_config.yaw_radians * torch.ones((1, 1), dtype=renderer_dtype_torch).to(device=device),
camera_config.pitch_radians * torch.ones((1, 1), dtype=renderer_dtype_torch).to(device=device),
camera_config.distance * torch.ones((1, 1), dtype=renderer_dtype_torch).to(device=device))
# TF
min_density = s._data["tfEditor"]["minDensity"]
max_density = s._data["tfEditor"]["maxDensity"]
opacity_scaling = s._data["tfEditor"]["opacityScaling"]
g = s._data["tfEditor"]['editorLinear']
densityAxisOpacity = g['densityAxisOpacity']
assert len(densityAxisOpacity) == 2
opacityAxis = g['opacityAxis']
assert len(opacityAxis) == 2
actual_min_density = _lerp(densityAxisOpacity[0], min_density, max_density)
actual_max_density = _lerp(densityAxisOpacity[1], min_density, max_density)
absorption_scaling = 1 # opacityAxis[1]
# transform volume data from [actual_min_density, actual_max_density] to [0,1] (with clamping)
# and then multiply with absorption_scaling
print("Transform volume data from [%.4f, %.4f] to [0,1] and scale by %.4f" % (
actual_min_density, actual_max_density, absorption_scaling))
reference_volume_data = torch.clip(
(reference_volume_data - actual_min_density) / (actual_max_density - actual_min_density),
0.0, 1.0, out=reference_volume_data)
reference_volume_data *= absorption_scaling
return {
'settings': s,
'reference_volume': reference_volume,
'reference_volume_data': reference_volume_data,
'cameras': cameras,
'camera_fov_radians': camera_fov_radians,
'world_size': world_size,
'reference_cameras': reference_cameras,
}
def _setup_renderer(data:dict, resolution: int, with_reference_camera=False):
cuda_device = torch.device("cuda")
rs = setup_default_settings(
data['reference_volume'], resolution, resolution,
data['settings'].get_stepsize(), False)
rs.tf_mode = pyrenderer.TFMode.Identity
tf_reference = torch.tensor([[
# r,g,b,a,pos
[1, 1]
]], dtype=renderer_dtype_torch, device=cuda_device)
if with_reference_camera:
cameras = torch.cat([data['reference_cameras'], data['cameras']])
else:
cameras = data['cameras']
cameras_cuda = cameras.to(device=cuda_device)
renderer = Renderer(rs, optimize_volume=True,
gradient_method='adjoint')
return rs, cameras_cuda, tf_reference, renderer
def _render_reference(data:dict, output_path_template: Optional[str], resolution: int):
cuda_device = torch.device("cuda")
volume_data_cuda = data['reference_volume_data'].to(cuda_device)
rs, cameras_cuda, tf_reference, renderer = \
_setup_renderer(data, resolution)
reference_images = renderer(
camera=cameras_cuda, fov_y_radians=data['camera_fov_radians'],
tf=tf_reference, volume=volume_data_cuda)
reference_images = toCHW(reference_images).detach()
if output_path_template is not None:
absorption_images = torch.stack([reference_images[:, 3, :, :]] * 3, dim=-1).cpu().numpy()
max_absorption = np.max(absorption_images)
for v in range(cameras_cuda.shape[0]):
absorption_image = (absorption_images[v] / max_absorption * 255).astype(np.uint8)
imageio.imwrite(output_path_template % v, absorption_image)
return reference_images
def _call_astra(cfg: dict, folder: str, resolution: int, iterations: int):
# export to numpy for ASTRA
print("export to numpy for ASTRA")
astra_input_file = os.path.join(folder, "astra-input.npz")
astra_output_file = os.path.join(folder, "astra-output.npz")
np.savez(astra_input_file,
volume=cfg['reference_volume_data'],
cameras=cfg['cameras'],
fov_y_radians=cfg['camera_fov_radians'],
world_size=cfg['world_size'])
# call astra
cwd = os.path.abspath(os.path.join(os.path.split(__file__)[0], "astra"))
print("working directory:", cwd)
args = [
"conda", "run", "-n", "py37astra",
"--cwd", cwd,
"python", "VolumeReconstruction.py",
"--output", astra_output_file,
"--input", astra_input_file,
"--resolution", "%d"%resolution,
"--iterations", "%d"%iterations,
]
ret = subprocess.run(args)
def _call_mitsuba(cfg: dict, folder: str, resolution: int, opt):
# export volumes
mitsuba_scene = os.path.join(folder, "mitsuba_scene.xml")
mitsuba_cfg = os.path.join(folder, "mitsuba_cfg.py")
mitsuba_reference = os.path.join(folder, "mitsuba_reference-%03d.exr")
mitsuba_volume_reference = os.path.join(folder, "mitsuba_reference.vol")
mitsuba_volume_initial = os.path.join(folder, "mitsuba_initial.vol")
volume = cfg['reference_volume_data'].cpu().numpy()[0]
print("Strides:", volume.strides)
cameras = cfg['cameras'].cpu().numpy()
fov_y_radians = cfg['camera_fov_radians']
world_size = cfg['world_size']
num_cameras = cameras.shape[0]
def write_grid_binary_data(filename, values):
values = np.array(values)
with open(filename, 'wb') as f:
f.write(b'V')
f.write(b'O')
f.write(b'L')
f.write(np.uint8(3).tobytes()) # Version
f.write(np.int32(1).tobytes()) # type
f.write(np.int32(volume.shape[0]).tobytes()) # size
f.write(np.int32(volume.shape[1]).tobytes())
f.write(np.int32(volume.shape[2]).tobytes())
f.write(np.int32(1).tobytes()) # channels
f.write(
|
np.float32(0.0)
|
numpy.float32
|
import os
import sqlite3
import numpy as np
import pandas as pd
import imp
imp.load_source("PostProcessDbUtils", os.path.join(os.path.dirname(os.path.realpath(__file__)), "post-process-db-utils.py"))
from PostProcessDbUtils import *
imp.load_source("PostProcessPdUtils", os.path.join(os.path.dirname(os.path.realpath(__file__)), "post-process-pandas-utils.py"))
from PostProcessPdUtils import *
imp.load_source("PostProcessPlotUtils", os.path.join(os.path.dirname(os.path.realpath(__file__)), "post-process-plot-utils.py"))
from PostProcessPlotUtils import *
def traceTimes_aggregateTraceRange(db, runID, processID, nodeEntryId, nodeExitId):
cur = db.cursor()
traceTimesProcess_tmpTableName = "traceTime_r{0}_p{1}".format(runID, processID)
if not temp_table_exists(db, traceTimesProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM TraceTimeData WHERE RunID = {1} AND ProcessID = {2}".format(traceTimesProcess_tmpTableName, runID, processID)
cur.execute(query)
callpathProcess_tmpTableName = "callpath_p{0}".format(processID)
if not temp_table_exists(db, callpathProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM CallPathData WHERE ProcessID = {1}".format(callpathProcess_tmpTableName, processID)
cur.execute(query)
premergedData_tmpTableName = "traceTimeCallpath_r{0}_p{1}".format(runID, processID)
if not temp_table_exists(db, premergedData_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * ".format(premergedData_tmpTableName) + \
"FROM {0} NATURAL JOIN {1} NATURAL JOIN ProfileNodeData NATURAL JOIN ProfileNodeType".format(traceTimesProcess_tmpTableName, callpathProcess_tmpTableName)
cur.execute(query)
## Query to get trace entries that occur between nodeEntryId and nodeExitId, summing by CallPathId:
qGetTraceWalltime = "SELECT TraceTimeID, CallPathID, SUM(WallTime) AS WallTime, ParentNodeID, TypeName " + \
"FROM {0} ".format(premergedData_tmpTableName) + \
"WHERE NodeEntryID >= {0} AND NodeExitID <= {1} ".format(nodeEntryId, nodeExitId) + \
"GROUP BY CallPathID"
## Query to, for each trace entry in range, sum walltimes of its immediate children
qGetChildrenWalltime = "SELECT ParentNodeID AS PID, SUM(WallTime) AS ChildrenWalltime " + \
"FROM {0} ".format(premergedData_tmpTableName) +\
"WHERE NodeEntryID >= {0} AND NodeExitID <= {1} ".format(nodeEntryId, nodeExitId) + \
"GROUP BY ParentNodeID"
## Query to join the above two queries into one, bringing together inclusive waltims of each node and its children. Seems to work.
query12 = "SELECT TraceTimeID, WallTime, ChildrenWalltime, TypeName FROM ({0}) AS A LEFT OUTER JOIN ({1}) AS B ON A.CallPathID = B.PID".format(qGetTraceWalltime, qGetChildrenWalltime)
cur.execute(query12)
rows = np.array(cur.fetchall())
# Set all to have same TraceTimeID, for easier grouping later
for i in range(len(rows)):
rows[i][0] = rows[0][0]
return rows
def traceTimes_aggregateTraceSubRanges(db, runID, processID, nodeEntryId, nodeExitId, subnodeEntryIds, subnodeExitIds):
cur = db.cursor()
traceTimesProcess_tmpTableName = "traceTime_r{0}_p{1}".format(runID, processID)
if not temp_table_exists(db, traceTimesProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM TraceTimeData WHERE RunID = {1} AND ProcessID = {2}".format(traceTimesProcess_tmpTableName, runID, processID)
cur.execute(query)
callpathProcess_tmpTableName = "callpath_p{0}".format(processID)
if not temp_table_exists(db, callpathProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM CallPathData WHERE ProcessID = {1}".format(callpathProcess_tmpTableName, processID)
cur.execute(query)
premergedData_tmpTableName = "traceTimeCallpath_r{0}_p{1}".format(runID, processID)
if not temp_table_exists(db, premergedData_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM {1} NATURAL JOIN {2} NATURAL JOIN ProfileNodeData NATURAL JOIN ProfileNodeType".format(premergedData_tmpTableName, traceTimesProcess_tmpTableName, callpathProcess_tmpTableName)
cur.execute(query)
## Query to get trace entries that occur between nodeEntryId and nodeExitId, summing by CallPathId:
qGetTraceWalltime = "SELECT MIN(TraceTimeID) AS TraceTimeID, MIN(NodeEntryID) AS NodeEntryID, CallPathID, SUM(WallTime) AS WallTime, ParentNodeID, TypeName " + \
"FROM {0} ".format(premergedData_tmpTableName) + \
"WHERE NodeEntryID >= {0} AND NodeExitID <= {1} ".format(nodeEntryId, nodeExitId) + \
"GROUP BY CallPathID"
## Query to, for each trace entry in range, sum walltimes of its immediate children
qGetChildrenWalltime = "SELECT ParentNodeID AS PID, SUM(WallTime) AS ChildrenWalltime " + \
"FROM {0} ".format(premergedData_tmpTableName) + \
"WHERE NodeEntryID >= {0} AND NodeExitID <= {1} GROUP BY ParentNodeID".format(nodeEntryId, nodeExitId)
## Query to join the above two queries into one, bringing together inclusive waltimes of each node and its children. Seems to work.
query12 = "SELECT NodeEntryID, TraceTimeID, WallTime, ChildrenWalltime, TypeName FROM ({0}) AS A LEFT OUTER JOIN ({1}) AS B ON A.CallPathID = B.PID".format(qGetTraceWalltime, qGetChildrenWalltime)
cur.execute(query12)
rows = np.array(cur.fetchall())
## Finally: filter rows to retain those that occur between pairs of subnodeEntryIds and subnodeExitIds.
## It should only be necessary to filter against the first pair, because 'qGetTraceWalltime' query only retains MIN(NodeEntryID); but to be safe, will check against all pairs.
rows_filtered = None
for r in rows:
nid = r[0]
for i in range(len(subnodeEntryIds)):
if nid >= subnodeEntryIds[i] and nid <= subnodeExitIds[i]:
## Append, dropping the NodeEntryID
if rows_filtered is None:
rows_filtered = np.array([r[1:]])
else:
rows_filtered = np.append(rows_filtered, [r[1:]], axis=0)
break
# Set all to have same TraceTimeID, for easier grouping later
for i in range(len(rows_filtered)):
rows_filtered[i][0] = rows_filtered[0][0]
return rows_filtered
def traceTimes_aggregateByNode(db, runID, processID, tree, nodeName, nodeOfInterestName=None):
db.row_factory = sqlite3.Row
cur = db.cursor()
cid = getNodeCallpathId(db, processID, nodeName)
traceTimesProcess_tmpTableName = "traceTime_r{0}_p{1}".format(runID, processID)
if not temp_table_exists(db, traceTimesProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM TraceTimeData WHERE RunID = {1} AND ProcessID = {2}".format(traceTimesProcess_tmpTableName, runID, processID)
cur.execute(query)
query = "SELECT NodeEntryID, NodeExitID FROM {0} WHERE CallPathID = {1};".format(traceTimesProcess_tmpTableName, cid)
cur.execute(query)
result = cur.fetchall()
nodeEntryIds = [row['NodeEntryID'] for row in result]
nodeExitIds = [row['NodeExitID'] for row in result]
if len(nodeEntryIds) == 0:
return None
if not nodeOfInterestName is None:
## ONLY aggregate across 'nodeOfInterestName', but still aggregating by 'nodeName'.
## The scenario is a solver loop 'nodeName', where only part of it is of interest ('nodeOfInterestName')
## First, confirm that 'nodeOfInterest' is child of 'nodeName':
t = findTreeNodeByName(tree, nodeName)
if t is None:
raise Exception("'{0}' not in call tree".format(nodeName))
t = findTreeNodeByName(tree, nodeOfInterestName)
if t is None:
raise Exception("'{0}' not in call tree".format(nodeOfInterestName))
t = findTreeNodeByName(findTreeNodeByName(tree, nodeName), nodeOfInterestName)
if t is None:
raise Exception("'{0}' not child of '{1}'".format(nodeOfInterestName, nodeName))
cid = getNodeCallpathId(db, processID, nodeOfInterestName)
query = "SELECT NodeEntryID, NodeExitID FROM {0} WHERE CallPathID = {0};".format(traceTimesProcess_tmpTableName, cid)
cur.execute(query)
result = cur.fetchall()
subnodeEntryIds = [row['NodeEntryID'] for row in result]
subnodeExitIds = [row['NodeExitID'] for row in result]
rows_all = None
for i in range(len(nodeEntryIds)):
entryID = nodeEntryIds[i]
exitID = nodeExitIds[i]
subnodeEntryIdsInRange = [i for i in subnodeEntryIds if (i >= entryID and i <= exitID)]
subnodeExitIdsInRange = [i for i in subnodeExitIds if (i >= entryID and i <= exitID)]
rows = traceTimes_aggregateTraceSubRanges(db, runID, processID, entryID, exitID, subnodeEntryIdsInRange, subnodeExitIdsInRange)
if rows_all is None:
rows_all = rows
else:
rows_all = np.append(rows_all, rows, axis=0)
else:
rows_all = None
for i in range(len(nodeEntryIds)):
rows = traceTimes_aggregateTraceRange(db, runID, processID, nodeEntryIds[i], nodeExitIds[i])
if rows_all is None:
rows_all = rows
else:
rows_all = np.append(rows_all, rows, axis=0)
# Pack rows into a DataFrame for final analysis
fields = ["TraceTimeID", "WallTime", "ChildrenWalltime", "TypeName"]
columns = {}
columns["TraceTimeID"] = [r[0] for r in rows_all]
columns["WallTime"] = [r[1] for r in rows_all]
columns["ChildrenWallTime"] = [r[2] for r in rows_all]
columns["TypeName"] = [r[3] for r in rows_all]
df = pd.DataFrame(columns)
df.loc[df["ChildrenWallTime"].isna(), "ChildrenWallTime"] = 0.0
df["InclusiveTime"] = df["WallTime"] - df["ChildrenWallTime"]
if sum(df["InclusiveTime"] < 0.0) > 0:
print(df)
raise Exception("Negative inclusive times calculated for trace {0}!".format(tid))
df = df.drop(["WallTime", "ChildrenWallTime"], axis=1)
df["Type"] = ""
df.loc[df["TypeName"].isin(["MPICollectiveCall", "MPICommCall", "MPISyncCall"]), "Type"] = "MPI"
df.loc[df["TypeName"].isin(["Method", "Loop", "Compute", "Block"]), "Type"] = "Compute"
df.loc[df["TypeName"].isin(["TraceConductor", "AggregationStepper"]), "Type"] = "Compute"
if sum(df["Type"]=="") > 0:
print(df["TypeName"].unique())
raise Exception("Unhandled TypeName values, investigate")
df = df.drop(["TypeName"], axis=1)
df = df.groupby(["TraceTimeID", "Type"]).sum().reset_index()
df2 = df.groupby("TraceTimeID").sum().reset_index().rename(columns={"InclusiveTime":"TotalTime"})
df = df.merge(df2)
df["InclusiveTime %"] = df["InclusiveTime"] / df["TotalTime"] * 100.0
df = df.drop("TotalTime", axis=1)
return df
def traceParameter_aggregateTraceRange(db, runID, processID, paramTable, paramName, nodeEntryId, nodeExitId):
cur = db.cursor()
traceParamIdColMap = {}
traceParamIdColMap["TraceParameterBoolData"] = "TraceParamBoolID"
traceParamIdColMap["TraceParameterIntData"] = "TraceParamIntID"
traceParamIdColMap["TraceParameterLongData"] = "TraceParamLongID"
traceParamIdColMap["TraceParameterDoubleData"] = "TraceParamDoubleID"
traceParamProcess_tmpTableName = "traceParam_{0}_r{1}_p{2}".format(paramName, runID, processID)
if not temp_table_exists(db, traceParamProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM {1} ".format(traceParamProcess_tmpTableName, paramTable) + \
"WHERE RunID = {0} AND ProcessID = {1} AND ParamName = \"{2}\"".format(runID, processID, paramName)
cur.execute(query)
callpathProcess_tmpTableName = "callpath_p{0}".format(processID)
if not temp_table_exists(db, callpathProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM CallPathData WHERE ProcessID = {1}".format(callpathProcess_tmpTableName, processID)
cur.execute(query)
premergedData_tmpTableName = "traceParamCallpath_{0}_r{1}_p{2}".format(paramName, runID, processID)
if not temp_table_exists(db, premergedData_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM {1} NATURAL JOIN {2} NATURAL JOIN ProfileNodeData NATURAL JOIN ProfileNodeType".format(premergedData_tmpTableName, traceParamProcess_tmpTableName, callpathProcess_tmpTableName)
cur.execute(query)
## To reduce code development time, check that parameter only has one value in specified nodeID range.
## If this is not the case, then need to think about what statistics to report (average? max and min? variance?).
## Multiple parameter values cannot be handled like runtimes (which can be summed).
qCountQuery = "SELECT COUNT(*) FROM {0} WHERE NodeEntryID >= {1} AND NodeExitID <= {2} GROUP BY ParentNodeID".format(premergedData_tmpTableName, nodeEntryId, nodeExitId)
cur.execute(qCountQuery)
res = cur.fetchone()
if res is None:
return None
count = res[0]
if count > 1:
raise Exception("Parameter '{0}' recorded multiple values between a specific nodeID range. This situation has not been coded in TreeTimer, contact developers to request average, variance, or some other aggregating function.")
query = "SELECT {0} AS TraceParamId, ParamValue FROM {1} WHERE NodeEntryID >= {2} AND NodeExitID <= {3} GROUP BY ParentNodeID".format(traceParamIdColMap[paramTable], premergedData_tmpTableName, nodeEntryId, nodeExitId)
cur.execute(query)
res = cur.fetchone()
row = [res["TraceParamId"], res["ParamValue"]]
return [row]
def traceParameter_aggregateByNode(db, runID, processID, tree, nodeName, paramName):
db.row_factory = sqlite3.Row
cur = db.cursor()
## First, determine parameter type:
paramTable = None
for t in ["TraceParameterBoolData", "TraceParameterIntData", "TraceParameterFloatData"]:
traceParamsProcess_tmpTableName = "{0}_r{1}_p{2}".format(t, runID, processID)
if not temp_table_exists(db, traceParamsProcess_tmpTableName):
query = "CREATE TEMPORARY TABLE {0} AS SELECT * FROM {1} ".format(traceParamsProcess_tmpTableName, t) + \
"WHERE RunID = {0} AND ProcessID = {1}".format(runID, processID)
cur.execute(query)
query = "SELECT COUNT(*) FROM {0} WHERE ParamName = \"{1}\" ;".format(traceParamsProcess_tmpTableName, paramName)
cur.execute(query)
count = cur.fetchone()[0]
if count > 0:
if not paramTable is None:
raise Exception("ParamName {0} is present in multiple tables: {1} and {2}".format(paramName, paramTable, t))
paramTable = t
if paramTable is None:
# raise Exception("ParamName {0} not found in any TraceParameter* table".format(paramName))
# print("WARNING: ParamName {0} not found in any TraceParameter* table".format(paramName))
return None
cid = getNodeCallpathId(db, processID, nodeName)
traceParamsProcess_tmpTableName = "{0}_r{1}_p{2}".format(paramTable, runID, processID)
query = "SELECT NodeEntryID, NodeExitID FROM {0} WHERE CallPathID = {1};".format(traceParamsProcess_tmpTableName, cid)
cur.execute(query)
result = cur.fetchall()
nodeEntryIds = [row['NodeEntryID'] for row in result]
nodeExitIds = [row['NodeExitID'] for row in result]
if len(nodeEntryIds) == 0:
return None
rows_all = None
for i in range(len(nodeEntryIds)):
rows = traceParameter_aggregateTraceRange(db, runID, processID, paramTable, paramName, nodeEntryIds[i], nodeExitIds[i])
if not rows is None:
if rows_all is None:
rows_all = rows
else:
rows_all = np.append(rows_all, rows, axis=0)
# Pack rows into a DataFrame for final analysis
fields = ["TraceParamID", "ParamName", "Value"]
columns = {}
columns["TraceParamID"] = [r[0] for r in rows_all]
columns["ParamName"] = [paramName]*len(rows_all)
columns["Value"] = [r[1] for r in rows_all]
df = pd.DataFrame(columns)
return df
def add_unit_stride_index_column(df, id_colname):
## Add a unit-stride index column:
## Note: different ranks can have different TraceTimeID for same solver timestep. So,
## need to process each rank individually.
col_ranks = []
col_ids = []
col_indices = []
for r in df["Rank"].unique():
traceTimesIDs = df.loc[df["Rank"]==r, id_colname].unique()
traceTimesIDs.sort()
n = len(traceTimesIDs)
indices = np.arange(0,n)
col_ranks += [r]*n
col_ids += traceTimesIDs.tolist()
col_indices += indices.tolist()
df_ids = pd.DataFrame({"Rank":col_ranks, id_colname:col_ids, "TimestepIndex":col_indices})
df = df.merge(df_ids, validate="many_to_one")
return df
def traceTimes_chartDynamicLoadBalance(traces_df, output_dirpath=None, filename_suffix=None):
# Restrict to MPI time %:
mpi_traces = traces_df[traces_df["Type"]=="MPI"]
if mpi_traces.shape[0] == 0:
print("No MPI functions were timed")
return
mpi_traces = mpi_traces.drop(["Type", "InclusiveTime"], axis=1)
mpi_traces = mpi_traces.rename(columns={"InclusiveTime %":"MPI %"})
r_root = mpi_traces["Rank"].min()
num_ts_root = mpi_traces[mpi_traces["Rank"]==r_root].shape[0]
for r in mpi_traces["Rank"].unique():
num_ts = mpi_traces[mpi_traces["Rank"]==r].shape[0]
if num_ts != num_ts_root:
raise Exception("Ranks {0} and {1} performed different number of solver timesteps - {2} vs {3}".format(r_root, r, num_ts_root, num_ts))
## Add a unit-stride index column:
mpi_traces = add_unit_stride_index_column(mpi_traces, "TraceTimeID")
## Can drop 'TraceTimeID', a SQL relic:
mpi_traces = mpi_traces.drop("TraceTimeID", axis=1)
# # Discard first N timesteps as warming-up:
# N = 2
# if N < mpi_traces["TimestepIndex"].max():
# mpi_traces = mpi_traces[mpi_traces["TimestepIndex"]>N].reset_index(drop=True)
# mpi_traces["TimestepIndex"] -= N
## Evenly sample 100 timepoints, so that final chart is legible:
mpi_traces = sample_n_timesteps(mpi_traces, 100, "TimestepIndex")
timestepIndices = mpi_traces["TimestepIndex"].unique()
timestepIndices.sort()
## Ensure table is sorted for calculation
mpi_traces = mpi_traces.sort_values(["TimestepIndex", "Rank"])
## Construct heatmap, of MPI % during simulation:
if not filename_suffix is None:
fig_filename = "mpi-pct-heatmap.{0}.png".format(filename_suffix)
else:
fig_filename = "mpi-pct-heatmap.png"
if not output_dirpath is None:
fig_filepath = os.path.join(output_dirpath, fig_filename)
else:
fig_filepath = fig_filename
plot_heatmap(mpi_traces, "TimestepIndex", "MPI %", fig_filepath)
## Finally! Calculate difference in MPI % over time:
col_ranks = None
col_index = None
col_diffs = None
# step = 1
# step = 10
step = mpi_traces[mpi_traces["Rank"]==mpi_traces["Rank"].min()].shape[0]//2
# step = 50
# # Zero up until step kicks in
# for ts in range(0, step):
# ts0 = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[ts]]
# col_ranks = ts0["Rank"].values if (col_ranks is None) else np.append(col_ranks, ts0["Rank"].values)
# col_index = ts0["TimestepIndex"].values if (col_index is None) else np.append(col_index, ts0["TimestepIndex"].values)
# diff = [0.0]*ts0["Rank"].shape[0]
# col_diffs = diff if (col_diffs is None) else np.append(col_diffs, diff)
# ts0 = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[0]]
# for ts in range(step, len(timestepIndices)):
# ts1 = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[ts]]
# ## Calculate diff, etc
# col_ranks = ts1["Rank"].values if (col_ranks is None) else np.append(col_ranks, ts1["Rank"].values)
# col_index = ts1["TimestepIndex"].values if (col_index is None) else np.append(col_index, ts1["TimestepIndex"].values)
# diff = np.absolute(ts1["MPI %"].values - ts0["MPI %"].values)
# col_diffs = diff if (col_diffs is None) else np.append(col_diffs, diff)
# ts0 = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[ts-step]]
## Calculate difference against last:
tsLast = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[-1]]
for ts in range(0, len(timestepIndices)):
ts1 = mpi_traces[mpi_traces["TimestepIndex"]==timestepIndices[ts]]
## Calculate diff, etc
col_ranks = ts1["Rank"].values if (col_ranks is None) else np.append(col_ranks, ts1["Rank"].values)
col_index = ts1["TimestepIndex"].values if (col_index is None) else np.append(col_index, ts1["TimestepIndex"].values)
#diff = np.absolute(tsLast["MPI %"].values - ts1["MPI %"].values)
diff = ts1["MPI %"].values - tsLast["MPI %"].values
col_diffs = diff if (col_diffs is None) else
|
np.append(col_diffs, diff)
|
numpy.append
|
#Copyright (c) 2018-2020 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as NP
import pandas as pd
from sklearn.metrics import roc_auc_score
def snv_calculate(cdata, snvs, alt, tot, min_expressed=10, gkey='genotype'):
gts = cdata[gkey].values.copy()
gt1 = NP.where(gts == 0)[0]
gt2 = NP.where(gts == 1)[0]
nz1 = NP.count_nonzero(tot[:,gt1], axis=1)
nz2 = NP.count_nonzero(tot[:,gt2], axis=1)
sidx = NP.where((nz1 >= min_expressed) & (nz2 >= min_expressed))[0]
ridx = sidx[:,NP.newaxis]
oaf1 = alt[ridx,gt1].sum(axis=1) / tot[ridx,gt1].sum(axis=1)
oaf2 = alt[ridx,gt2].sum(axis=1) / tot[ridx,gt2].sum(axis=1)
af1 =
|
NP.nanmean(alt[ridx,gt1] / tot[ridx,gt1], axis=1)
|
numpy.nanmean
|
#!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import warnings
from mpi4py import MPI
rank = MPI.COMM_WORLD.rank
#warnings.filterwarnings("ignore")
if rank!=0: warnings.filterwarnings("ignore")
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
#import matplotlib
#matplotlib.use('pdf')
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from lab import mapbias as mapp
from lab import mapnoise as mapn
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
#
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', help='save files or make plot', default='plot')
parser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=1024, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=256, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.03, type=float)
parser.add_argument( '--pp', help='upsample', default=1)
args = parser.parse_args()
figpath = './figs/'
bs, nc, aa = args.bs, args.nmesh, args.aa
nc2 = nc*2
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
if args.pp: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc2, nc2, nc2])
else: pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
################
def savestd():
#for ia, aa in enumerate([0.3333, 0.2000, 0.1429]):
for ia, aa in enumerate([0.1429]):
#for ia, aa in enumerate([0.3333, 0.2000, 0.1429]):
zz = 1/aa-1
for iw, wopt in enumerate(['opt', 'pess']):
#for iw, wopt in enumerate(['opt']):
for it, thopt in enumerate(['opt', 'pess', 'reas']):
#for it, thopt in enumerate([ 'pess']):
if rank == 0: print(aa, wopt, thopt)
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_%s/'%(aa, 0.03, wopt)
dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)
ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/'%(wopt, thopt)
fname = ofolder + 'std-L%04d_%0.4f.txt'%(bs, aa)
fxname = ofolder + 'xstd-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
if args.pp : fxname = fxname[:-4] + '-up.txt'
try:
rep = np.loadtxt(fname+'s').T
except:
try:
if args.pp:
std = BigFileMesh(dpath+'/stdrecon_up-noise', 'std').paint()
ss = BigFileMesh(dpath+'/datap_up', 's').paint()
else:
std = BigFileMesh(dpath+'/stdrecon-noise', 'std').paint()
ss = BigFileMesh(dpath+'/datap', 's').paint()
p0 = FFTPower(std, mode='1d').power
px = FFTPower(std, second=ss, mode='1d').power
if rank == 0: np.savetxt(fname, np.stack([p0['k']]+ [p0['power'].real]).T, header='k, p0')
if rank == 0: np.savetxt(fxname, np.stack([px['k']]+ [px['power'].real]).T, header='k, px')
except Exception as e:
print(e)
def savestd2d(Nmu=4):
#for ia, aa in enumerate([0.3333, 0.2000, 0.1429]):
for ia, aa in enumerate([0.1429]):
zz = 1/aa-1
for iw, wopt in enumerate(['opt', 'pess']):
#for iw, wopt in enumerate(['opt']):
for it, thopt in enumerate(['opt', 'pess', 'reas']):
#for it, thopt in enumerate([ 'reas']):
if rank == 0: print(aa, wopt, thopt)
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_%s/'%(aa, 0.03, wopt)
dpath += 'L%04d-N%04d-R//thermal-%s-hex/ZA/opt_s999_h1massA_fourier_rsdpos/'%(bs, nc, thopt)
ofolder = '../../data/ZArecon-rsd/kmin-003_wedge-%s/thermal-%s-hex/Nmu%d/'%(wopt, thopt, Nmu)
fname = ofolder + 'std-L%04d_%0.4f.txt'%(bs, aa)
fxname = ofolder + 'xstd-L%04d_%0.4f.txt'%(bs, aa)
if args.pp : fname = fname[:-4] + '-up.txt'
if args.pp : fxname = fxname[:-4] + '-up.txt'
try:
rep =
|
np.loadtxt(fname+'s')
|
numpy.loadtxt
|
# Copyright 2019 The ROBEL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Avoid task.
The robot must navigate to a target point while avoiding an obstacle.
By default, this environment does not employ any randomizations, and so will
exhibit poor sim2real transfer. For better transfer, one may want to borrow
some of the randomizations used in DKittyWalkRandomDynamics.
"""
import collections
from typing import Dict, Optional, Sequence, Union
import numpy as np
from robel.components.tracking import TrackerComponentBuilder, TrackerState
from robel.dkitty.walk import BaseDKittyWalk
from robel.utils.math_utils import calculate_cosine
DKITTY_ASSET_PATH = 'robel/dkitty/assets/dkitty_avoid-v0.xml'
DEFAULT_OBSERVATION_KEYS = (
'root_pos',
'root_euler',
'kitty_qpos',
'root_vel',
'root_angular_vel',
'kitty_qvel',
'last_action',
'upright',
'heading',
'block_error',
'target_error',
)
class DKittyAvoid(BaseDKittyWalk):
"""Avoid task."""
def __init__(
self,
asset_path: str = DKITTY_ASSET_PATH,
observation_keys: Sequence[str] = DEFAULT_OBSERVATION_KEYS,
block_tracker_id: Optional[Union[str, int]] = None,
frame_skip: int = 40,
upright_threshold: float = 0.9, # cos(~25deg)
upright_reward: float = 1,
falling_reward: float = 0, # Don't penalize falling.
**kwargs):
"""Initializes the environment.
Args:
See BaseDKittyWalk.
"""
self._block_tracker_id = block_tracker_id
super().__init__(
asset_path=asset_path,
observation_keys=observation_keys,
frame_skip=frame_skip,
upright_threshold=upright_threshold,
upright_reward=upright_reward,
falling_reward=falling_reward,
**kwargs)
self._initial_block_pos = np.array([1., 0., 0.2])
def _configure_tracker(self, builder: TrackerComponentBuilder):
"""Configures the tracker component."""
super()._configure_tracker(builder)
builder.add_tracker_group(
'block',
hardware_tracker_id=self._block_tracker_id,
sim_params=dict(
element_name='block',
element_type='body',
))
def _reset(self):
"""Resets the environment."""
target_dist = self.np_random.uniform(low=1.5, high=2.5)
target_theta = np.pi / 2 + self.np_random.uniform(low=-1, high=1)
self._initial_target_pos = target_dist * np.array([
np.cos(target_theta), np.sin(target_theta), 0
])
block_dist = max(
0.6, target_dist * self.np_random.uniform(low=0.3, high=0.8))
block_theta = target_theta + self.np_random.uniform(low=-0.5, high=0.5)
self._initial_block_pos = np.array([
block_dist * np.cos(block_theta),
block_dist * np.sin(block_theta),
0.2,
])
self._reset_dkitty_standing()
target_pos = self._initial_target_pos
heading_pos = self._initial_heading_pos
if heading_pos is None:
heading_pos = target_pos
block_pos = self._initial_block_pos
# Set the tracker locations.
self.tracker.set_state({
'torso': TrackerState(pos=np.zeros(3), rot=
|
np.identity(3)
|
numpy.identity
|
# --------------------------------------------------------
# Visual Detection: State-of-the-Art
# Copyright: <NAME>
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
import cv2
import networkx as nx
import matplotlib.pyplot as plt
from model.utils.config import cfg
from model.utils.net_utils import create_mrt
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
# Numpy data viewer to demonstrate detection results or ground truth.
class dataViewer(object):
def __init__(self, classes):
self.color_pool = [(255, 207, 136), (68, 187, 92), (153, 255, 0), (68, 187, 187), (0, 153, 255), (187, 68, 163),
(255, 119, 119), (116, 68, 187), (68, 187, 163), (163, 187, 68), (0, 204, 255), (68, 187, 140),
(204, 0, 255), (255, 204, 0), (102, 0, 255), (255, 0, 0), (68, 140, 187), (187, 187, 68),
(0, 255, 153), (119, 255, 146), (187, 163, 68), (187, 140, 68), (255, 153, 0), (255, 255, 0),
(153, 0, 255), (0, 255, 204), (68, 116, 187), (0, 255, 51), (187, 68, 68), (140, 187, 68),
(68, 163, 187), (187, 116, 68), (163, 68, 187), (204, 255, 0), (255, 0, 204), (0, 255, 255),
(140, 68, 187), (0, 102, 255), (153, 214, 255), (255, 102, 0)]
self.classes = classes
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self.ind_to_class = dict(zip(xrange(self.num_classes), self.classes))
self.color_dict = dict(zip(self.classes, self.color_pool[:self.num_classes]))
def draw_single_bbox(self, img, bbox, bbox_color=(163, 68, 187), text_str="", test_bg_color = None):
if test_bg_color is None:
test_bg_color = bbox_color
bbox = tuple(bbox)
text_rd = (bbox[2], bbox[1] + 25)
cv2.rectangle(img, bbox[0:2], bbox[2:4], bbox_color, 2)
cv2.rectangle(img, bbox[0:2], text_rd, test_bg_color, -1)
cv2.putText(img, text_str, (bbox[0], bbox[1] + 20),
cv2.FONT_HERSHEY_PLAIN,
2, (255, 255, 255), thickness=2)
return img
def draw_single_grasp(self, img, grasp, test_str=None, text_bg_color=(255, 0, 0)):
gr_c = (int((grasp[0] + grasp[4]) / 2), int((grasp[1] + grasp[5]) / 2))
for j in range(4):
if j % 2 == 0:
color = (0, 0, 255)
else:
color = (255, 0, 0)
p1 = (int(grasp[2 * j]), int(grasp[2 * j + 1]))
p2 = (int(grasp[(2 * j + 2) % 8]), int(grasp[(2 * j + 3) % 8]))
cv2.line(img, p1, p2, color, 2)
# put text
if test_str is not None:
text_len = len(test_str)
text_w = 17 * text_len
gtextpos = (gr_c[0] - text_w / 2, gr_c[1] + 20)
gtext_lu = (gr_c[0] - text_w / 2, gr_c[1])
gtext_rd = (gr_c[0] + text_w / 2, gr_c[1] + 25)
cv2.rectangle(img, gtext_lu, gtext_rd, text_bg_color, -1)
cv2.putText(img, test_str, gtextpos,
cv2.FONT_HERSHEY_PLAIN,
2, (255, 255, 255), thickness=2)
return img
def draw_graspdet(self, im, dets, g_inds=None):
"""
:param im: original image numpy array
:param dets: detections. size N x 8 numpy array
:param g_inds: size N numpy array
:return: im
"""
# make memory contiguous
im = np.ascontiguousarray(im)
if dets.shape[0] == 0:
return im
dets = dets[dets[:, 0] > 0].astype(np.int)
num_grasp = dets.shape[0]
for i in range(num_grasp):
im = self.draw_single_grasp(im, dets[i], str(g_inds[i]) if g_inds is not None else None)
return im
def draw_objdet(self, im, dets, o_inds = None):
"""
:param im: original image
:param dets: detections. size N x 5 with 4-d bbox and 1-d class
:return: im
"""
# make memory contiguous
im = np.ascontiguousarray(im)
if dets.shape[0] == 0:
return im
dets = dets[dets[:,0] > 0].astype(np.int)
num_grasp = dets.shape[0]
for i in range(num_grasp):
cls = self.ind_to_class[dets[i, -1]]
if o_inds is None:
im = self.draw_single_bbox(im, dets[i][:4], self.color_dict[cls], cls)
else:
im = self.draw_single_bbox(im, dets[i][:4], self.color_dict[cls], '%s ind:%d' % (cls, o_inds[i]))
return im
def draw_graspdet_with_owner(self, im, o_dets, g_dets, g_inds):
"""
:param im: original image numpy array
:param o_dets: object detections. size N x 5 with 4-d bbox and 1-d class
:param g_dets: grasp detections. size N x 8 numpy array
:param g_inds: grasp indice. size N numpy array
:return:
"""
im = np.ascontiguousarray(im)
if o_dets.shape[0] > 0:
o_inds =
|
np.arange(o_dets.shape[0])
|
numpy.arange
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import os
import random
from sklearn.metrics.pairwise import cosine_similarity
import sys
def read_image(img_path, style="Gray", show=False):
"""Reads an image into memory
"""
if style == "Gray":
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
elif style == "RGB":
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
# img = cv2.imread(img_path)
if img is None:
return None
if not img.dtype == np.uint8:
pass
if show:
show_image(img)
return img
def show_image(img, delay=1000):
"""Shows an image.
"""
cv2.namedWindow('image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('image', img)
cv2.waitKey(delay)
cv2.destroyAllWindows()
def read_batch(paths, sty="Gray"):
imgs = []
for i in range(len(paths)):
img = read_image(paths[i], style=sty)
if img is None:
continue
imgs.append(img)
print(str(i) + "th image shape: ", img.shape)
return imgs
def feature_detect(imgs, num_feature=0, contrast_th=0.04):
sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_feature, contrastThreshold=contrast_th)
kp_list = []
des_list = []
# img_output=[]
for i in range(len(imgs)):
kp, des = sift.detectAndCompute(imgs[i], None)
kp_list.append(kp)
des_list.append(des)
# img_output.append(cv2.drawKeypoints(imgs[i],kp,np.array([])))
return kp_list, des_list
def homography_trans(coor, H):
u = coor[0] # column number
v = coor[1] # row number
vec = np.array([u, v, 1]).reshape(-1, 1)
return H.dot(vec)
def refine_keypoint(keypoints, img_size, tol):
tol_coor = tolerant_area(tol)
kp_neighbor_list = -1 * np.ones(img_size, dtype=np.int)
for i in range(len(keypoints)):
u = int(keypoints[i].pt[0]) # column number
v = int(keypoints[i].pt[1])
neighbor_ok = True
for j in range(tol_coor.shape[0]):
r = tol_coor[j][0] + v
c = tol_coor[j][1] + u
if (r >= 0) and (r < img_size[0]) and (c >= 0) and (c < img_size[1]):
if kp_neighbor_list[r][c] != -1:
neighbor_ok = False
break
if neighbor_ok and kp_neighbor_list[v][u] == -1:
kp_neighbor_list[v][u] = i
add_list = []
for i in range(len(keypoints)):
u = int(keypoints[i].pt[0]) # column number
v = int(keypoints[i].pt[1]) # row number
if kp_neighbor_list[v][u] != -1:
add_list.append(kp_neighbor_list[v][u])
kp_neighbor_list[v][u] = -1
return add_list
def tolerant_area(tol):
arr = []
for i in range(-tol, tol + 1):
for j in range(-tol, tol + 1):
if i != 0 or j != 0: # avoid centor point
arr.append([i, j])
arr = np.array(arr)
return arr
def get_refined_list(add_list, keypoints, descriptors):
# Get refined keypoints
add_sort = sorted(add_list)
keypoints_new = []
descriptors_new = []
j = 0
i = 0
while i < len(keypoints) and j < len(add_sort):
if i == add_sort[j]:
keypoints_new.append(keypoints[i])
descriptors_new.append(list(descriptors[i, :]))
j += 1
elif i > add_sort[j]:
print("Add list error")
i += 1
while i < len(keypoints):
keypoints_new.append(keypoints[i])
descriptors_new.append(list(descriptors[i, :]))
i += 1
return keypoints_new, np.array(descriptors_new)
def refine_feature(kp_list, des_list, img_size, tol):
kp_list_new = []
des_list_new = []
for i in range(len(kp_list)):
rm_list = refine_keypoint(kp_list[i], img_size, tol)
kp_new, des_new = get_refined_list(rm_list, kp_list[i], des_list[i])
kp_list_new.append(kp_new)
des_list_new.append(des_new)
return kp_list_new, des_list_new
def draw(imgs, kp_list, rich_draw=False):
img_output = []
for i in range(len(imgs)):
if rich_draw:
flg = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS
else:
flg = 0
img_output.append(cv2.drawKeypoints(imgs[i], kp_list[i], np.array([]), flags=flg))
return img_output
def shuffle_partition(n_pick, n_data):
all_index = np.arange(n_data)
np.random.shuffle(all_index)
index_pick = all_index[0:n_pick]
index_test = all_index[n_pick:]
return index_pick, index_test
def feature_pair(des1, des2):
cos_sim = cosine_similarity(des1, des2)
# print(cos_sim.shape)
pairs = []
sims = []
for i in range(cos_sim.shape[0]):
index = np.flip(np.argsort(cos_sim[i, :]))
# print(cos_sim[i, index[0:4]])
for j in range(cos_sim.shape[1] - 1):
if cos_sim[i, index[j + 1]] < cos_sim[i, index[j]] * 0.98:
pairs.append([i, index[j]])
sims.append(cos_sim[i, index[j]])
break
return np.array(pairs), np.array(sims)
def extract_point_coor(pair_feat, keypoints, keypoints_prime):
kp = []
kp_prime = []
for i in range(pair_feat.shape[0]):
index_kp = pair_feat[i, 0]
index_kp_prime = pair_feat[i, 1]
kp.append(keypoints[index_kp].pt)
kp_prime.append(keypoints_prime[index_kp_prime].pt)
return np.array(kp), np.array(kp_prime) # n*2 array
def cal_fit_error(H, kp, kp_prime): # kp=[[u,v],], is a n*2 array
n = kp.shape[0]
add = np.ones((1, n))
X = np.vstack((kp.T, add)) # 3*n_points
X_p =
|
np.vstack((kp_prime.T, add))
|
numpy.vstack
|
import numpy as np
from gym.envs.mujoco import HumanoidEnv as HumanoidEnv
from . import register_env
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
return (np.sum(mass * xpos, 0) /
|
np.sum(mass)
|
numpy.sum
|
import tensorflow as tf
import numpy as np
from scipy import signal
from scipy.ndimage import gaussian_filter
from PIL import Image, ImageDraw
import random
import glob, os
import csv
from multiprocessing import Pool
import subprocess
import time
width = 512
height = 512
scale = 2
np.random.seed(os.getpid() + int(time.time()))
random.seed(os.getpid() + int(time.time()))
class BaseData:
def __init__(self):
self.load_idmap()
def load_idmap(self):
self.glyph_id = {}
self.glyphs = {}
self.glyph_type = {}
self.glyph_id[''] = 0
self.glyphs[0] = ''
with open(os.path.join('data','codepoints.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
codehex = row[1]
if len(codehex) > 7:
code = eval('"' + ''.join(['\\u' + codehex[i*4:i*4+4] for i in range(len(codehex) // 4)]) + '"')
else:
code = chr(int(codehex, 16))
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
with open(os.path.join('data','id_map.csv'),'r') as f:
reader = csv.reader(f)
for row in reader:
code = bytes.fromhex(row[2]).decode()
if code in self.glyph_id:
k = self.glyph_id[code]
else:
i = int.from_bytes(code.encode('utf-32le'), 'little')
self.glyph_id[code] = i
self.glyphs[i] = code
k = i
self.glyph_type[k] = int(row[3])
self.id_count = len(self.glyph_id)
def sub_load(args):
exe = os.path.join('data','load_font','load_font.exe')
if not os.path.exists(exe):
exe = os.path.join('data','load_font','load_font')
proc = subprocess.Popen([
exe,
args[0],
'128',
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = {}
for c in args[1]:
if len(c) == 1:
charbuf = c.encode("utf-32-le")
proc.stdin.write(charbuf[:4])
proc.stdin.flush()
result = proc.stdout.read(32)
code = result[:4]
rows = int.from_bytes(result[4:8], 'little')
width = int.from_bytes(result[8:12], 'little')
boundingWidth = int.from_bytes(result[12:16], 'little', signed=True)
boundingHeight = int.from_bytes(result[16:20], 'little', signed=True)
horiBearingX = int.from_bytes(result[20:24], 'little', signed=True)
horiBearingY = int.from_bytes(result[24:28], 'little', signed=True)
horiAdvance = int.from_bytes(result[28:32], 'little', signed=True)
if rows * width == 0:
continue
assert(charbuf == code)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
horiBearingX = horiBearingX / 64
horiBearingY = horiBearingY / 64
horiAdvance = horiAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': horiAdvance,
'image': img,
}
}
result = proc.stdout.read(28)
rows = int.from_bytes(result[:4], 'little')
width = int.from_bytes(result[4:8], 'little')
boundingWidth = int.from_bytes(result[8:12], 'little', signed=True)
boundingHeight = int.from_bytes(result[12:16], 'little', signed=True)
vertBearingX = int.from_bytes(result[16:20], 'little', signed=True)
vertBearingY = int.from_bytes(result[20:24], 'little', signed=True)
vertAdvance = int.from_bytes(result[24:28], 'little', signed=True)
boundingWidth = boundingWidth / 64
boundingHeight = boundingHeight / 64
vertBearingX = vertBearingX / 64
vertBearingY = vertBearingY / 64
vertAdvance = vertAdvance / 64
buffer = proc.stdout.read(rows*width)
img = np.frombuffer(buffer, dtype='ubyte').reshape(rows,width)
value['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': boundingWidth,
'boundingHeight': boundingHeight,
'vertBearingX': vertBearingX,
'vertBearingY': vertBearingY,
'vertAdvance': vertAdvance,
'image': img,
}
ret[(args[0],c)] = value
else:
pass
proc.stdin.close()
return ret
def sub_load_image(path):
dirnames = glob.glob(os.path.join(path, '*'))
ret = {}
for d in dirnames:
c_code = os.path.basename(d)
char = str(bytes.fromhex(c_code), 'utf-8')
count = 0
for f in glob.glob(os.path.join(d, '*.png')):
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%count,char)] = {
'horizontal': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'horiBearingX': horiBearingX,
'horiBearingY': horiBearingY,
'horiAdvance': 96.0,
'image': img,
},
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
}
count += 1
vert_imgs = glob.glob(os.path.join(d, 'vert', '*.png'))
if 0 < len(vert_imgs) <= count:
for i in range(count):
f = vert_imgs[i % len(vert_imgs)]
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%i,char)]['vertical'] = {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': horiBearingX,
'vertBearingY': horiBearingY,
'vertAdvance': 96.0,
'image': img,
}
elif 0 < len(vert_imgs):
vcount = 0
for f in vert_imgs:
rawim = np.asarray(Image.open(f).convert('L'))
ylines = np.any(rawim < 255, axis=1)
content = np.where(ylines)[0]
rows = content[-1] - content[0] + 1
horiBearingY = 128 - 16 - content[0]
vertBearingY = content[0] - 16
y = content[0]
xlines = np.any(rawim < 255, axis=0)
content = np.where(xlines)[0]
width = content[-1] - content[0] + 1
horiBearingX = content[0] - 16
vertBearingX = content[0] - 64
x = content[0]
if rows == 0 or width == 0:
continue
img = 255 - rawim[y:y+rows,x:x+width]
ret[('hand%06d'%vcount,char)] = {
'horizontal': ret[('hand%06d'%(vcount % count),char)]['horizontal'],
'vertical': {
'rows': rows,
'width': width,
'boundingWidth': width,
'boundingHeight': rows,
'vertBearingX': vertBearingY,
'vertBearingY': vertBearingX,
'vertAdvance': 96.0,
'image': img,
}
}
vcount += 1
return ret
def gaussian_kernel(kernlen=7, xstd=1., ystd=1.):
gkern1dx = signal.gaussian(kernlen, std=xstd).reshape(kernlen, 1)
gkern1dy = signal.gaussian(kernlen, std=ystd).reshape(kernlen, 1)
gkern2d = np.outer(gkern1dy, gkern1dx)
return gkern2d
def apply_random_filter(images):
p = np.random.uniform(0., 1.)
if p < 0.25:
sigma = np.random.uniform(0., 1.75)
return gaussian_filter(images, sigma=sigma)
if p < 0.5:
sigma = np.random.uniform(0., 6.)
gauss = gaussian_filter(images, sigma=sigma)
gain = np.random.uniform(0., 5.)
return (1 + gain) * images - gain * gauss
return images
def is_Font_match(font, target):
if target.startswith('hand'):
return font.startswith('hand')
else:
return font == target
class FontData(BaseData):
def __init__(self):
super().__init__()
self.img_cache = {}
print('loading handwrite image')
self.img_cache.update(sub_load_image(os.path.join('data','handwritten')))
print('loading enfont')
enfont_files = sorted(glob.glob(os.path.join('data','enfont','*.ttf')) + glob.glob(os.path.join('data','enfont','*.otf')))
en_glyphs = [self.glyphs[key] for key in self.glyphs.keys() if self.glyph_type.get(key,-1) in [0,1,2,6]]
items = [(f, en_glyphs) for f in enfont_files]
total = len(enfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
print('loading jpfont')
jpfont_files = sorted(glob.glob(os.path.join('data','jpfont','*.ttf')) + glob.glob(os.path.join('data','jpfont','*.otf')))
items = [(f, list(self.glyphs.values())) for f in jpfont_files]
total = len(jpfont_files)
with Pool() as pool:
progress = tf.keras.utils.Progbar(total, unit_name='item')
dicts = pool.imap_unordered(sub_load, items)
for dictitem in dicts:
self.img_cache.update(dictitem)
progress.add(1)
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
for key in self.img_cache:
i = self.glyph_id[key[1]]
if i not in self.glyph_type:
self.glyph_type[i] = type_count_max
type_count_max = max([self.glyph_type[k] for k in self.glyph_type]) + 1
gtype_count = [0 for _ in range(type_count_max)]
type_count = [0 for _ in range(type_count_max)]
for key in self.img_cache:
t = self.glyph_type[self.glyph_id[key[1]]]
type_count[t] += 1
for k in self.glyph_type:
gtype_count[self.glyph_type[k]] += 1
self.image_keys = list(self.img_cache.keys())
self.test_keys = self.get_test_keys()
self.train_keys = self.get_train_keys()
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1, 1.0]
self.prob_map = [p/t for p,t in zip(self.prob_map, type_count)]
self.random_probs_train = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.random_probs_test = [self.prob_map[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_kanji = [0, 0, 0, 0, 0, 1.0, 0, 0, 1.0, 1.0, 0.5, 0]
self.prob_map_kanji = [p/t for p,t in zip(self.prob_map_kanji, type_count)]
self.kanji_probs_train = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.kanji_probs_test = [self.prob_map_kanji[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_num = [1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_num = [p/t for p,t in zip(self.prob_map_num, type_count)]
self.num_probs_train = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.num_probs_test = [self.prob_map_num[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_alpha = [0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_alpha = [p/t for p,t in zip(self.prob_map_alpha, type_count)]
self.alpha_probs_train = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.alpha_probs_test = [self.prob_map_alpha[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
# 0 1 2 3 4 5 6 7 8 9 10 11
self.prob_map_hira = [0, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0]
self.prob_map_hira = [p/t for p,t in zip(self.prob_map_hira, type_count)]
self.hira_probs_train = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.train_keys]
self.hira_probs_test = [self.prob_map_hira[self.glyph_type[self.glyph_id[key[1]]]] for key in self.test_keys]
self.train_keys_num = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.train_num_fonts = list(set([key[0] for key in self.train_keys_num]))
self.test_keys_num = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 0]
self.test_num_fonts = list(set([key[0] for key in self.test_keys_num]))
self.train_keys_capital = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.train_capital_fonts = list(set([key[0] for key in self.train_keys_capital]))
self.test_keys_capital = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 1]
self.test_capital_fonts = list(set([key[0] for key in self.test_keys_capital]))
self.train_keys_small = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.train_small_fonts = list(set([key[0] for key in self.train_keys_small]))
self.test_keys_small = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] == 2]
self.test_small_fonts = list(set([key[0] for key in self.test_keys_small]))
self.train_keys_alpha = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.train_alpha_fonts = list(set([key[0] for key in self.train_keys_alpha]))
self.test_keys_alpha = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [0,1,2,6]]
self.test_alpha_fonts = list(set([key[0] for key in self.test_keys_alpha]))
self.train_keys_jp = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.test_keys_jp = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4,5,7,8,9]]
self.train_jp_fonts = list(set([key[0] for key in self.train_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_jp_fonts])
self.train_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_jp_fonts]
self.test_jp_fonts = list(set([key[0] for key in self.test_keys_jp]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_jp_fonts])
self.test_jp_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_jp_fonts]
self.train_keys_hira = [x for x in self.train_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.test_keys_hira = [x for x in self.test_keys if self.glyph_type[self.glyph_id[x[1]]] in [3,4]]
self.train_hira_fonts = list(set([key[0] for key in self.train_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.train_hira_fonts])
self.train_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.train_hira_fonts]
self.test_hira_fonts = list(set([key[0] for key in self.test_keys_hira]))
p_sum = sum([0 if '.' in f else 1 for f in self.test_hira_fonts])
self.test_hira_fonts_p = [1. if '.' in f else 1/p_sum for f in self.test_hira_fonts]
self.train_keys_jpnum = [x for x in self.train_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.train_jp_fonts)]
self.test_keys_jpnum = [x for x in self.test_keys if (self.glyph_type[self.glyph_id[x[1]]] in [0,3,4,5,7]) and (x[0] in self.test_jp_fonts)]
self.train_jpnum_fonts = list(set([key[0] for key in self.train_keys_jpnum]))
self.train_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.train_jpnum_fonts]
self.test_jpnum_fonts = list(set([key[0] for key in self.test_keys_jpnum]))
self.test_jpnum_fonts_p = [1. if '.' in f else 0. for f in self.test_jpnum_fonts]
self.prob_map_clustering = [
gtype_count[0] / type_count[0],
gtype_count[1] / type_count[1],
gtype_count[2] / type_count[2],
gtype_count[3] / type_count[3],
gtype_count[4] / type_count[4],
gtype_count[5] / type_count[5],
gtype_count[6] / type_count[6],
0.,
0.,
0.,
0.,
0.
]
self.random_background = glob.glob(os.path.join('data','background','*'))
self.max_std = 8.0
self.min_ker = 4
def get_test_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
test_keys = [k for k in keys if fontname(k[0]).startswith('Noto')]
return test_keys
def get_train_keys(self):
def fontname(fontpath):
return os.path.splitext(os.path.basename(fontpath))[0]
keys = self.image_keys
train_keys = [k for k in keys if not fontname(k[0]).startswith('Noto')]
return train_keys
def load_background_images(self):
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency (http://stackoverflow.com/a/1963146)
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL (http://stackoverflow.com/a/1963146)
alpha = im.convert('RGBA').getchannel('A')
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
# (http://stackoverflow.com/a/8720632 and http://stackoverflow.com/a/9459208)
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
im_file = random.choice(self.random_background)
im = Image.open(im_file)
im = remove_transparency(im).convert('RGB')
scale_min = max(width / im.width, height / im.height)
scale_max = max(scale_min + 0.5, 1.5)
s = np.random.uniform(scale_min, scale_max)
im = im.resize((int(im.width * s)+1, int(im.height * s)+1))
x1 = np.random.randint(0, im.width - width)
y1 = np.random.randint(0, im.height - height)
im_crop = im.crop((x1, y1, x1 + width, y1 + height))
img =
|
np.asarray(im_crop)
|
numpy.asarray
|
import numpy as np
import pdb
try:
import pyBigWig
except:
pdb.set_trace()
import pyBigWig
from keras.preprocessing.image import Iterator
# Modified from keras
class DataIterator(Iterator):
def __init__(self, data_list, genome, batch_size, seqlen, bigwig_rc_order=None, shuffle=False, seed=1337):
self.data_list = data_list
if data_list is None or len(data_list) == 0:
self.num_bigwigs = 0
else:
self.num_bigwigs = len(data_list[0][4])
self.num_meta = len(data_list[0][5])
if bigwig_rc_order is None:
self.bigwig_rc_order = np.arange(self.num_bigwigs)
else:
self.bigwig_rc_order = bigwig_rc_order
self.genome = genome
self.seqlen = seqlen
self.nucleotides = np.array(['A', 'C', 'G', 'T'])
if data_list is None or len(data_list) == 0:
self.labeled = False
else:
self.labeled = len(data_list[0]) == 7
if self.labeled:
print("label True")
self.num_tfs = len(data_list[0][6])
super(DataIterator, self).__init__(len(data_list), batch_size, shuffle, seed)
def __len__(self):
return len(self.data_list)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
batch_X_seq = np.zeros((current_batch_size, self.seqlen, 4), dtype=bool)
batch_X_bigwig = np.zeros((current_batch_size, self.seqlen, self.num_bigwigs), dtype=np.float32)
if self.num_meta:
batch_X_meta = np.zeros((current_batch_size, self.num_meta), dtype=np.float32)
if self.labeled:
batch_y = np.zeros((current_batch_size, self.num_tfs), dtype=bool)
for i, j in enumerate(index_array):
data = self.data_list[j]
chrom = data[0]
start = data[1]
stop = data[2]
shift = data[3]
bigwig_files = data[4]
meta = data[5]
if shift:
s = np.random.randint(-shift, shift+1)
start += s
stop += s
med = (start + stop) / 2
start = med - self.seqlen / 2
stop = med + self.seqlen / 2
batch_X_seq[i] = self.genome[chrom][start:stop]
if self.num_meta:
batch_X_meta[i] = meta
for k, bigwig_file in enumerate(bigwig_files):
bigwig = pyBigWig.open(bigwig_file)
sample_bigwig = np.array(bigwig.values(chrom, start, stop))
bigwig.close()
sample_bigwig[np.isnan(sample_bigwig)] = 0
batch_X_bigwig[i, :, k] = sample_bigwig
if k == 1:
batch_X_bigwig[i, :, k] = sample_bigwig
if k == 2:
#batch_X_bigwig[i, :, k-1] = 0.5*batch_X_bigwig[i, :, k-1]+0.5*batch_X_bigwig[i, :, k]
batch_X_bigwig[i, :, k-1] = (1-0)*batch_X_bigwig[i, :, k-1]+batch_X_bigwig[i, :, k]
if self.labeled:
batch_y[i] = data[6]
# otherwise the binding code is 'U', so leave as 0
batch_X_seq_rc = batch_X_seq[:, ::-1, ::-1]
if k == 2:
batch_X_bigwig = batch_X_bigwig[:,:,:2]
batch_X_bigwig_rc = batch_X_bigwig[:, ::-1, self.bigwig_rc_order[:2]]
else:
batch_X_bigwig_rc = batch_X_bigwig[:, ::-1, self.bigwig_rc_order]
batch_X_fwd = np.concatenate([batch_X_seq, batch_X_bigwig,batch_X_bigwig_rc], axis=-1)
batch_X_rev = np.concatenate([batch_X_seq_rc, batch_X_bigwig_rc,batch_X_bigwig], axis=-1)
batch_X_fwd = np.expand_dims(batch_X_fwd,axis=1)
batch_X_rev =
|
np.expand_dims(batch_X_rev,axis=1)
|
numpy.expand_dims
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Provide functions that operate on projections
"""
import itertools
import warnings
import multiprocessing
import tomopy
import numpy as np
import scipy as sp
import concurrent.futures as cf
from typing import Tuple
from scipy.signal import medfilt
from scipy.signal import medfilt2d
from scipy.ndimage import gaussian_filter
from scipy.ndimage import gaussian_filter1d
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from skimage import exposure
from skimage.transform import probabilistic_hough_line
from skimage.feature import canny
from skimage.feature import register_translation
from sklearn.cluster import KMeans
from tifffile import imread
from lmfit.models import GaussianModel
from lmfit.models import LorentzianModel
from tomopy import minus_log
from tomopy import find_center_pc
from tomoproc.util.npmath import rescale_image
from tomoproc.util.peakfitting import fit_sigmoid
from tomoproc.util.npmath import rescale_image
from tomoproc.util.npmath import binded_minus_log
def detect_sample_in_sinogram(
sino: np.ndarray,
kernel_size: int=3,
sigma: int=50,
minimum_distance_to_edge: int=5,
) -> Tuple[int, int]:
"""
Description
-----------
Automatically detect the left and right edge of the sample region
in a sinogram with median and gaussian filtering.
Parameters
----------
sino: np.ndarray
Sinogram for evaluation
kernel_size: int
median filter (quick denoising) kernel size
sigma: int
gaussian filter kernel size
minimum_distance_to_edge: int
minimum amount of pixels to sinogram edge
Returns
-------
(int, int)
left and right edge of the sample region
"""
# use median filter and gaussian filter to locate the sample region
# -- median filter is to counter impulse noise
# -- gaussian filter is for estimating the sample location
prf = np.gradient(
np.sum(
gaussian_filter(
medfilt2d(sino, kernel_size=kernel_size),
sigma=sigma,
),
axis=0,
)
)
return (
max(prf.argmin(), minimum_distance_to_edge),
min(prf.argmax(), sino.shape[1]-minimum_distance_to_edge),
)
def detect_corrupted_proj(
projs: np.ndarray,
omegas: np.ndarray,
threshold: float=0.8,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Description
-----------
Corrupted frames/projections will add a forgy layer (artifact) of random
noise to the final reconstruction. These corrupted frames can be detected
through 180 degree pair-wise checking.
Parameters
----------
projs: np.ndarray
tomography image stack [axis_omega, axis_imgrow, axis_imgcol]
omegas: np.ndarray
angular position vector
threshold: float
Threshold for picking out the outliers
Returns
-------
tuple(np.ndarray, np.ndarray)
idx_BAD idx_GOOD
Return the indices of BAD frames and GOOD frames/projections
"""
# assume equal step, find the index range equals to 180 degree
dn = int(np.pi/abs(omegas[1] - omegas[0]))
# get the cnts from each 180 pairs
# use the faster version instead
with cf.ProcessPoolExecutor() as e:
_jobs = [
e.submit(
tomopy.find_center_pc,
rescale_image(binded_minus_log(projs[nimg,:,:])),
rescale_image(binded_minus_log(projs[nimg+dn,:,:])),
)
for nimg in range(dn)
]
cnts = [me.result() for me in _jobs]
# 180 -> 360
cnts = np.array(cnts + cnts)
# locate outlier
diff = np.absolute(cnts - medfilt(cnts))/cnts
return np.where(diff>threshold)[0], np.where(diff<=threshold)[0]
def guess_slit_box(img: np.ndarray, boost: bool=True) -> dict:
"""
Description
-----------
Auto detect/guess the four blades position (in pixels) for given image
Parameters
----------
img: np.ndarray
2D tomography image with slit box
Returns
-------
dict:
dictionary contains the approximated position (in pixel) for the four
slit blades
NOTE
----
For images without any slit blades, a random (probably useless) region
will be returned.
Relative fast:
tested on MacBookPro13,3
395 ms ± 14 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
"""
if boost:
# Contrast stretching
pl, ph = np.percentile(img, (2, 98))
img = exposure.rescale_intensity(img, in_range=(pl, ph))
# equilize hist
img = exposure.equalize_adapthist(img)
# map to log to reveal transition box
img = np.log(medfilt2d(img.astype(float))+1)
# get row and col profile gradient
pdot_col = np.gradient(gaussian_filter1d(
|
np.average(img, axis=0)
|
numpy.average
|
import random
import os
import math
from vtkmodules import all as vtk
from vtkmodules.util import numpy_support
import numpy as np
from matplotlib import pyplot as plt
from scipy.spatial import KDTree
from multiprocessing import Pool
import torch
from torch.utils.data import Dataset
from utils.sdf import SDFRead
from thingking import loadtxt
from utils.attr_kdtree import KDTree as AKDTree
class PointData(Dataset):
#dataset for individual point
def __init__(self,data ,k ,r ,ball ,sampler ):
self.k = int(k)
self.r = r
self.ball = ball
# assume data alreade normalized coord [0, 1] attr [0.1, 0.9]
coord = data[:,:3]
attr = data[:,3:]
kd = KDTree(coord,leafsize=100)
if isinstance(sampler,int):
sample_id = uniform_sample(sampler,attr)
self.center = data[sample_id,:3]
else:
# use input index as samples
sampler = np.array(sampler)
if len(sampler.shape) == 1:
# sample index
sample_id = sampler
self.center = data[sample_id,:3]
else:
# sample centers (normalized)
self.center = sampler
self.sample_id = sample_id
if self.ball:
self.nn = kd.query_ball_point(self.center,self.r,workers=8, return_sorted=False)
# return_sorted means sort by index but not distance
else:
self.dist, self.nn = kd.query(self.center,self.k,workers=8)
# already assume ordered by distance
self.data = data
self.center = self.center[:,None,:]
def __getitem__(self, index):
# renormalize the point cloud
nn_id = self.nn[index]
center = self.center[index]
pc = self.data[nn_id]
pc[...,:3] -= center[...,:3]
if self.ball:
# reorder the point cloud according to distance
dist = np.sum(pc[:,:3]**2,axis=1)
order = np.argsort(dist)
pc = pc[order]
return pc
def __len__(self):
return len(self.nn)
def uniform_sample(sample_size,attr):
# uniform samples of attributes
max_level = round(math.log2(sample_size))
attr_kd = AKDTree(attr, leafsize=1, max_level=max_level)
leaf = all_leaf_nodes_at_level(attr_kd.tree, max_level)
rand = np.random.rand((len(leaf)))
idx = []
for i,l in enumerate(leaf):
indices = l.idx
idx.append(indices[int(len(indices)*rand[i])])
return idx
def halo_reader(filename):
try:
ID, DescID, Mvir, Vmax, Vrms, Rvir, Rs, Np, x, y, z, VX, VY, VZ, JX, JY, JZ, Spin, rs_klypin, Mvir_all, M200b, M200c, M500c, M2500c, Xoff, Voff, spin_bullock, b_to_a, c_to_a, A_x_, A_y_, A_z_, b_to_a_500c_, c_to_a_500c_, A_x__500c_, A_y__500c_, A_z__500c_, TU, M_pe_Behroozi, M_pe_Diemer = \
loadtxt(filename, unpack=True)
r = Rvir/1000
try:
halo_num = len(x)
return np.stack([x,y,z],axis=1),r
except TypeError:
return np.array([[x,y,z]]),np.array([r])
except ValueError:
return [],[]
def IoU(predict,target):
assert len(predict) == len(target)
predict = np.array(predict)
target = np.array(target)
union = np.logical_or(predict,target)
inter = np.logical_and(predict,target)
return np.sum(inter)/np.sum(union)
def scatter_3d(array,vmin=None,vmax=None,threshold = -1e10,center=None,save=False,fname=None):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
array = array[array[:,3] > threshold]
ax.scatter(array[:,0],array[:,1],array[:,2],c=array[:,3],marker='.',vmin=vmin,vmax=vmax)
if center is not None:
ax.scatter(center[0],center[1],center[2],c="red",marker='o')
# ax2 = fig.add_subplot(122,projection='3d',sharex=ax,sharey=ax,sharez=ax)
# ax2.scatter(array2[:,0],array2[:,1],array2[:,2],c=array2[:,3],marker='^',vmin=-1,vmax=1)
if save:
plt.savefig(fname)
else:
plt.show()
def data_reader(filename, type):
if type == 'cos':
data = sdf_reader(filename)
attr_min = np.array([-2466, -2761, -2589, -17135.6, -20040, -20096, -6928022])
attr_max = np.array([2.7808181e+03, 2.9791230e+03, 2.6991892e+03, 1.9324572e+04, 2.0033873e+04, 1.7973633e+04, 6.3844562e+05])
# data_min= np.array([0,0,0, -2466, -2761, -2589, -17135.6, -20040, -20096, -6928022])
# data_max= np.array([6.2500008e+01, 6.2500000e+01, 6.2500000e+01, 2.7808181e+03, 2.9791230e+03, 2.6991892e+03, 1.9324572e+04, 2.0033873e+04, 1.7973633e+04, 6.3844562e+05])
# mean = [30.4, 32.8, 32.58, 0, 0, 0, 0, 0, 0, -732720]
# std = [18.767, 16.76, 17.62, 197.9, 247.2, 193.54, 420.92, 429, 422.3, 888474]
elif type == 'fpm':
data = fpm_reader(filename)
attr_min = np.array([0, -5.63886223e+01, -3.69567909e+01, -7.22953186e+01])
attr_max = np.array([357.19000244, 38.62746811, 48.47133255, 50.60621262])
# data_min = np.array([-5, -5, 0, 0, -5.63886223e+01, -3.69567909e+01, -7.22953186e+01])
# data_max = np.array([ 5, 5, 10.00022221, 357.19000244, 38.62746811, 48.47133255, 50.60621262])
# mean = [0, 0, 5, 23.9, 0, 0, 0.034]
# std = [2.68, 2.68, 3.09, 55.08, 0.3246, 0.3233, 0.6973]
elif type =='jet3b':
data = jet3b_reader(filename)
attr_min = np.array([-1.50166025e+01, 1.47756422e+00])
attr_max = np.array([1.24838667e+01, 1.00606432e+01])
# normalize coordinates
coord = data[:,:3]
coord_min = coord.min(0)
coord_max = coord.max(0)
coord = (coord - coord_min) / (coord_max-coord_min)
coord = np.clip(coord,0,1)
# normalize attr to [0.1,0.9]
attr = data[:,3:]
attr = (attr - attr_min) / (attr_max - attr_min)
attr = attr * 0.8 + 0.1
attr =
|
np.clip(attr,0.1,0.9)
|
numpy.clip
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# File: useful functions
import os
import datetime
import numpy as np
from scipy import signal
from scipy.io import wavfile
class Logger:
def __init__(self, logf, add=True):
if not add and os.path.isfile(logf):
os.remove(logf)
self.out = open(logf, 'a')
self.out.write("\n{}\n".format(datetime.datetime.now()))
def __del__(self):
if self.out is not None:
self.close()
def __call__(self, msg):
print(msg)
self.out.write("{}\n".format(msg))
self.out.flush()
def close(self):
self.out.close()
self.out = None
def set_log(path, add=True):
if path.endswith('.txt'):
folder = os.path.dirname(path)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
else:
os.makedirs(path, exist_ok=True)
path = os.path.join(path, "log.txt")
logprint = Logger(path, add)
return path, logprint
def dat_load_trunc(files, path, seg_len, max_size):
i = 0
X_train = np.array([])
for f in files:
file_path = path + f
X = np.load(file_path)
if i == 0:
X_train = X[1:, :].T
frame_num_list = np.array([X.shape[1]])
else:
X_train = np.append(X_train, X[1:, :].T, axis=0)
frame_num_list = np.append(frame_num_list, X.shape[1])
i += 1
n_frame, n_freq = X_train.shape
if n_frame > seg_len:
n_frame = int(n_frame / seg_len) * seg_len
X_train = X_train[:n_frame, :]
X_train = X_train.reshape(-1, seg_len, n_freq)
else:
X_tmp = np.zeros((seg_len, n_freq), dtype=X_train.dtype)
X_tmp[:n_frame, :] = X_train
X_train = X_tmp.reshape(-1, seg_len, n_freq)
n_seg = X_train.shape[0]
Y = X_train.real[np.newaxis]
Y = np.append(Y, X_train.imag[np.newaxis], axis=0)
Y = np.transpose(Y, (1, 0, 3, 2))
if n_seg > max_size:
Y = Y[:maxbsize]
frame_num_list = frame_num_list[:maxbsize]
return Y, frame_num_list
def prenorm(stat_path, X):
# X must be a 4D array with size (N, n_ch, n_freq, n_frame)
# stat_path is a path for a txt file containing mean and standard deviation of X
# The txt file is assumed to contain a 1D array with size 2 where
# the first and second elements are the mean and standard deviation of X.
if stat_path is None or not os.path.exists(stat_path):
X_abs = np.linalg.norm(X, axis=1, keepdims=True)
gv = np.mean(np.power(X_abs, 2), axis=(0, 1, 2, 3), keepdims=True)
gs = np.sqrt(gv)
X = X / gs
else:
gs = np.load(stat_path)[1]
X = X / gs
return X, gs
def back_projection(Y, X):
I, J, M = Y.shape
if X.shape[2] == 1:
A = np.zeros((1, M, I), dtype=np.complex)
Z = np.zeros((I, J, M),dtype=np.complex)
for i in range(I):
Yi = np.squeeze(Y[i, :, :]).T # channels x frames (M x J)
Yic = np.conjugate(Yi.T)
A[0, :, i] = X[i, :, 0] @ Yic @ np.linalg.inv(Yi @ Yic)
A[
|
np.isnan(A)
|
numpy.isnan
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import torch.nn.functional as F
def running_avg(x, y, k=.99):
return k * x + (1 - k) * y
def softmax(x, d=-1):
tmp = np.exp(np.array(x))
return tmp / tmp.sum(axis=d, keepdims=True)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def inv_sigmoid(x):
return np.log(x / (1 - x))
def dist(a, b):
return np.linalg.norm(a - b)
def smooth_arr(arr, window=3):
to_flatten = False
if arr.ndim == 1:
to_flatten = True
arr = np.expand_dims(arr, 1)
pad = window // 2
tmp_arr = F.pad(
torch.unsqueeze(torch.Tensor(arr.T), 0), [pad, pad], mode='reflect')
tmp_arr = np.array(F.avg_pool1d(tmp_arr, window, stride=1).data)
tmp_arr = tmp_arr[0].T
if to_flatten:
tmp_arr = tmp_arr[:, 0]
return tmp_arr
def decathlon_score(scores, task_idxs=None):
if task_idxs is None:
task_idxs = [i for i in range(10)]
baseline_err = 1 - np.array([
59.87, 60.34, 82.12, 92.82, 55.53, 97.53, 81.41, 87.69, 96.55, 51.20
]) / 100
baseline_err = baseline_err[task_idxs]
num_tasks = len(task_idxs)
max_err = 2 * baseline_err
gamma_vals = np.ones(num_tasks) * 2
alpha_vals = 1000 * (max_err)**(-gamma_vals)
err = 1 - scores
if num_tasks == 1:
err = [err]
all_scores = []
for i in range(num_tasks):
all_scores += [alpha_vals[i] * max(0, max_err[i] - err[i])**gamma_vals[i]]
return sum(all_scores), all_scores
def rescale(x, min_val, max_val, invert=False):
if not invert:
return x * (max_val - min_val) + min_val
else:
return (x - min_val) / (max_val - min_val)
def pow10(x, min_val, max_val, invert=False):
log_fn = np.log if type(x) is float else torch.log
if not invert:
return 10**rescale(x,
np.log(min_val) / np.log(10),
np.log(max_val) / np.log(10))
else:
return rescale(
log_fn(x) / np.log(10),
np.log(min_val) / np.log(10),
|
np.log(max_val)
|
numpy.log
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import numpy as np
from bisect import insort, bisect_left
from warnings import warn
def rel_dist(x,y):
x = np.asarray(x)
y = np.asarray(y)
return np.linalg.norm(x-y)/np.linalg.norm(np.mean((x,y)))
class Anchor(tuple):
"""
Class for a single anchor. Behaves mostly like a tuple, except that the respective components can also be accessed via the attributes `time`, `state`, and `diff`, and some copying and checks are performed upon creation.
Also, it implements the less-than operator (<) for comparison by time, which allows to use Python’s sort routines.
"""
def __new__( cls, time, state, diff ):
state = np.atleast_1d(np.array(state,dtype=float,copy=True))
diff = np.atleast_1d(np.array(diff ,dtype=float,copy=True))
if len(state.shape) != 1:
raise ValueError("State must be a number or one-dimensional iterable.")
if state.shape != diff.shape:
raise ValueError("State and diff do not match in shape.")
return super().__new__(cls,(time,state,diff))
def __init__(self, *args):
self.time = self[0]
self.state = self[1]
self.diff = self[2]
# This is for sorting, which is guaranteed (docs.python.org/3/howto/sorting.html) to use __lt__, and bisect_left:
def __lt__(self,other):
if isinstance(other,Anchor):
return self.time < other.time
else:
return self.time < float(other)
def interpolate(t,i,anchors):
"""
Returns the `i`-th value of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
return interpolate_vec(t,anchors)[i]
def interpolate_vec(t,anchors):
"""
Returns all values of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
q = (anchors[1].time-anchors[0].time)
x = (t-anchors[0].time) / q
a = anchors[0].state
b = anchors[0].diff * q
c = anchors[1].state
d = anchors[1].diff * q
return (1-x) * ( (1-x) * (b*x + (a-c)*(2*x+1)) - d*x**2) + c
def interpolate_diff(t,i,anchors):
"""
Returns the `i`-th component of the derivative of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
return interpolate_diff_vec(t,anchors)[i]
def interpolate_diff_vec(t,anchors):
"""
Returns the derivative of a cubic Hermite interpolant of the `anchors` at time `t`.
"""
q = (anchors[1].time-anchors[0].time)
x = (t-anchors[0].time) / q
a = anchors[0].state
b = anchors[0].diff * q
c = anchors[1].state
d = anchors[1].diff * q
return ( (1-x)*(b-x*3*(2*(a-c)+b+d)) + d*x ) /q
sumsq = lambda x: np.sum(x**2)
# The matrix induced by the scalar product of the cubic Hermite interpolants of two anchors, if their distance is normalised to 1.
sp_matrix = np.array([
[156, 22, 54, -13],
[ 22, 4, 13, -3],
[ 54, 13, 156, -22],
[-13, -3, -22, 4],
])/420
# The matrix induced by the scalar product of the cubic Hermite interpolants of two anchors, if their distance is normalised to 1, but the initial portion z of the interval is not considered for the scalar product.
def partial_sp_matrix(z):
h_1 = - 120*z**7 - 350*z**6 - 252*z**5
h_2 = - 60*z**7 - 140*z**6 - 84*z**5
h_3 = - 120*z**7 - 420*z**6 - 378*z**5
h_4 = - 70*z**6 - 168*z**5 - 105*z**4
h_6 = - 105*z**4 - 140*z**3
h_7 = - 210*z**4 - 420*z**3
h_5 = 2*h_2 + 3*h_4
h_8 = - h_5 + h_7 - h_6 - 210*z**2
return np.array([
[ 2*h_3 , h_1 , h_7-2*h_3 , h_5 ],
[ h_1 , h_2 , h_6-h_1 , h_2+h_4 ],
[ h_7-2*h_3, h_6-h_1, 2*h_3-2*h_7-420*z, h_8 ],
[ h_5 , h_2+h_4, h_8 , -h_1+h_2+h_5+h_6 ]
])/420
def norm_sq_interval(anchors, indices):
"""
Returns the squared norm of the interpolant of `anchors` for the `indices`.
"""
q = (anchors[1].time-anchors[0].time)
vector = np.vstack([
anchors[0].state[indices] , # a
anchors[0].diff[indices] * q, # b
anchors[1].state[indices] , # c
anchors[1].diff[indices] * q, # d
])
return np.einsum(
vector , [0,2],
sp_matrix, [0,1],
vector , [1,2],
)*q
def norm_sq_partial(anchors, indices, start):
"""
Returns the sqared norm of the interpolant of `anchors` for the `indices`, but only taking into account the time after `start`.
"""
q = (anchors[1].time-anchors[0].time)
z = (start-anchors[1].time) / q
vector = np.vstack([
anchors[0].state[indices] , # a
anchors[0].diff[indices] * q, # b
anchors[1].state[indices] , # c
anchors[1].diff[indices] * q, # d
])
return np.einsum(
vector , [0,2],
partial_sp_matrix(z), [0,1],
vector , [1,2],
)*q
def scalar_product_interval(anchors, indices_1, indices_2):
"""
Returns the (integral) scalar product of the interpolants of `anchors` for `indices_1` (one side of the product) and `indices_2` (other side).
"""
q = (anchors[1].time-anchors[0].time)
vector_1 = np.vstack([
anchors[0].state[indices_1], # a_1
anchors[0].diff[indices_1] * q, # b_1
anchors[1].state[indices_1], # c_1
anchors[1].diff[indices_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0].state[indices_2], # a_2
anchors[0].diff[indices_2] * q, # b_2
anchors[1].state[indices_2], # c_2
anchors[1].diff[indices_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
sp_matrix, [0,1],
vector_2, [1,2]
)*q
def scalar_product_partial(anchors, indices_1, indices_2, start):
"""
Returns the scalar product of the interpolants of `anchors` for `indices_1` (one side of the product) and `indices_2` (other side), but only taking into account the time after `start`.
"""
q = (anchors[1].time-anchors[0].time)
z = (start-anchors[1].time) / q
vector_1 = np.vstack([
anchors[0].state[indices_1], # a_1
anchors[0].diff[indices_1] * q, # b_1
anchors[1].state[indices_1], # c_1
anchors[1].diff[indices_1] * q, # d_1
])
vector_2 = np.vstack([
anchors[0].state[indices_2], # a_2
anchors[0].diff[indices_2] * q, # b_2
anchors[1].state[indices_2], # c_2
anchors[1].diff[indices_2] * q, # d_2
])
return np.einsum(
vector_1, [0,2],
partial_sp_matrix(z), [0,1],
vector_2, [1,2]
)*q
class Extrema(object):
"""
Class for containing the extrema and their positions in `n` dimensions. These can be accessed via the attributes `minima`, `maxima`, `arg_min`, and `arg_max`.
"""
def __init__(self,n):
self.arg_min = np.full(n,np.nan)
self.arg_max = np.full(n,np.nan)
self.minima = np.full(n, np.inf)
self.maxima = np.full(n,-np.inf)
def update(self,times,values,condition=True):
"""
Updates the extrema if `values` are more extreme.
Parameters
----------
condition : boolean or array of booleans
Only the components where this is `True` are updated.
"""
update_min = np.logical_and(values<self.minima,condition)
self.arg_min = np.where(update_min,times ,self.arg_min)
self.minima = np.where(update_min,values,self.minima )
update_max = np.logical_and(values>self.maxima,condition)
self.arg_max = np.where(update_max,times ,self.arg_max)
self.maxima = np.where(update_max,values,self.maxima )
def extrema_from_anchors(anchors,beginning=None,end=None,target=None):
"""
Finds minima and maxima of the Hermite interpolant for the anchors.
Parameters
----------
beginning : float or `None`
Beginning of the time interval for which extrema are returned. If `None`, the time of the first anchor is used.
end : float or `None`
End of the time interval for which extrema are returned. If `None`, the time of the last anchor is used.
target : Extrema or `None`
If an Extrema instance, this one is updated with the newly found extrema and also returned (which means that newly found extrema will be ignored when the extrema in `target` are more extreme).
Returns
-------
extrema: Extrema object
An `Extrema` instance containing the extrema and their positions.
"""
q = (anchors[1].time-anchors[0].time)
retransform = lambda x: q*x+anchors[0].time
a = anchors[0].state
b = anchors[0].diff * q
c = anchors[1].state
d = anchors[1].diff * q
evaluate = lambda x: (1-x)*((1-x)*(b*x+(a-c)*(2*x+1))-d*x**2)+c
left_x = 0 if beginning is None else (beginning-anchors[0].time)/q
right_x = 1 if end is None else (end -anchors[0].time)/q
beginning = anchors[0].time if beginning is None else beginning
end = anchors[1].time if end is None else end
extrema = Extrema(len(anchors[0].state)) if target is None else target
extrema.update(beginning,evaluate(left_x ))
extrema.update(end ,evaluate(right_x))
radicant = b**2 + b*d + d**2 + 3*(a-c)*(3*(a-c) + 2*(b+d))
A = 1/(2*a + b - 2*c + d)
B = a + 2*b/3 - c + d/3
for sign in (-1,1):
with np.errstate(invalid='ignore'):
x = (B+sign*np.sqrt(radicant)/3)*A
extrema.update(
retransform(x),
evaluate(x),
np.logical_and.reduce(( radicant>=0, left_x<=x, x<=right_x ))
)
return extrema
def solve_from_anchors(anchors,i,value,beginning=None,end=None):
"""
Finds the times at which a component of the Hermite interpolant for the anchors assumes a given value and the derivatives at those points (allowing to distinguish upwards and downwards threshold crossings).
Parameters
----------
i : integer
The index of the component.
value : float
The value that shall be assumed
beginning : float or `None`
Beginning of the time interval for which positions are returned. If `None`, the time of the first anchor is used.
end : float or `None`
End of the time interval for which positions are returned. If `None`, the time of the last anchor is used.
Returns
-------
positions : list of pairs of floats
Each pair consists of a time where `value` is assumed and the derivative (of `component`) at that time.
"""
q = (anchors[1].time-anchors[0].time)
retransform = lambda x: q*x+anchors[0].time
a = anchors[0].state[i]
b = anchors[0].diff[i] * q
c = anchors[1].state[i]
d = anchors[1].diff[i] * q
left_x = 0 if beginning is None else (beginning-anchors[0].time)/q
right_x = 1 if end is None else (end -anchors[0].time)/q
candidates = np.roots([
2*a + b - 2*c + d,
-3*a - 2*b + 3*c - d,
b,
a - value,
])
solutions = sorted(
retransform(candidate.real)
for candidate in candidates
if np.isreal(candidate) and left_x<=candidate<=right_x
)
return [ (t,interpolate_diff(t,i,anchors)) for t in solutions ]
class CubicHermiteSpline(list):
"""
Class for a cubic Hermite Spline of one variable (time) with `n` values. This behaves like a list with additional functionalities and checks. Note that the times of the anchors must always be in ascending order.
Parameters
----------
n : integer
Dimensionality of the values. If `None`, the following argument must be an instance of CubicHermiteSpline.
anchors : iterable of triplets
Contains all the anchors with which the spline is initiated.
If `n` is `None` and this is an instance of CubicHermiteSpline, all properties are copied from it.
"""
def __init__(self,n=None,anchors=()):
if n is None:
assert isinstance(anchors,CubicHermiteSpline)
CubicHermiteSpline.__init__( self, anchors.n, anchors)
else:
self.n = n
super().__init__( [self.prepare_anchor(anchor) for anchor in anchors] )
self.sort()
def prepare_anchor(self,x):
x = x if isinstance(x,Anchor) else Anchor(*x)
if x.state.shape != (self.n,):
raise ValueError("State has wrong shape.")
return x
def append(self,anchor):
anchor = self.prepare_anchor(anchor)
if self and anchor.time <= self[-1].time:
raise ValueError("Anchor must follow last one in time. Consider using `add` instead.")
super().append(anchor)
def extend(self,anchors):
for anchor in anchors:
self.append(anchor)
def copy(self):
# Using type so this works with inheritance.
return type(self)(anchors=self)
def __setitem__(self,key,item):
anchor = self.prepare_anchor(item)
if (
(key!= 0 and key!=-len(self) and self[key-1].time>=anchor.time)
or (key!=-1 and key!= len(self)-1 and self[key+1].time<=anchor.time)
):
raise ValueError("Anchor’s time does not fit.")
super().__setitem__(key,anchor)
def insert(self,key,item):
anchor = self.prepare_anchor(item)
if (
(key!= 0 and key!=-len(self) and self[key-1].time>=anchor.time)
or ( key!= len(self) and self[key ].time<=anchor.time)
):
raise ValueError("Anchor’s time does not fit. Consider using `add` instead")
super().insert(key,anchor)
def sort(self):
self.check_for_duplicate_times()
super().sort()
def check_for_duplicate_times(self):
if len({anchor.time for anchor in self}) != len(self):
raise ValueError("You cannot have two anchors with the same time.")
def add(self,anchor):
"""
Inserts `anchor` at the appropriate time.
"""
insort(self,self.prepare_anchor(anchor))
def clear_from(self,n):
"""
Removes all anchors with an index of `n` or higher.
"""
while len(self)>n:
self.pop()
def clear(self):
super().__init__()
def reverse(self):
raise AssertionError("Anchors must be ordered by time. Therefore this does not make sense.")
@property
def t(self):
"""
The time of the last anchor. This may be overwritten in subclasses such that `self.t` and the time of the last anchor are not identical anymore.
"""
return self[-1].time
@property
def times(self):
"""
The times of all anchors.
"""
return [anchor.time for anchor in self]
def last_index_before(self,time):
"""
Returns the index of the last anchor before `time`.
Returns 0 if `time` is before the first anchor.
"""
return bisect_left(self,float(time),lo=1)-1
def constant(self,state,time=0):
"""
makes the spline constant, removing possibly previously existing anchors.
Parameters
----------
state : iterable of floats
time : float
The time of the last point.
"""
if self:
warn("The spline already contains points. This will remove them. Be sure that you really want this.")
self.clear()
self.append(( time-1., state, np.zeros_like(state) ))
self.append(( time , state, np.zeros_like(state) ))
def from_function(self,function,times_of_interest=None,max_anchors=100,tol=5):
"""
makes the spline interpolate a given function at heuristically determined points. More precisely, starting with `times_of_interest`, anchors are added until either:
* anchors are closer than the tolerance
* the value of an anchor is approximated by the interpolant of its neighbours within the tolerance
* the maximum number of anchors is reached.
This removes possibly previously existing anchors.
Parameters
----------
function : callable or iterable of SymPy/SymEngine expressions
The function to be interpolated.
If callable, this is interpreted like a regular function.
If an iterable of expressions, each expression represents the respective component of the function.
times_of_interest : iterable of numbers
Initial set of time points considered for the interpolation. All created anhcors will between the minimal and maximal timepoint.
max_anchors : positive integer
The maximum number of anchors that this routine will create (including those for the `times_of_interest`).
tol : integer
This is a parameter for the heuristics, more precisely the number of digits considered for tolerance in several places.
"""
assert tol>=0, "tol must be non-negative."
assert max_anchors>0, "Maximum number of anchors must be positive."
assert len(times_of_interest)>=2, "I need at least two time points of interest."
if self:
warn("The spline already contains points. This will remove them. Be sure that you really want this.")
self.clear()
# A happy anchor is sufficiently interpolated by its neighbours, temporally close to them, or at the border of the interval.
def unhappy_anchor(*args):
result = Anchor(*args)
result.happy = False
return result
if callable(function):
array_function = lambda time: np.asarray(function(time))
def get_anchor(time):
value = array_function(time)
eps = time*10**-tol or 10**-tol
derivative = (array_function(time+eps)-value)/eps
return unhappy_anchor(time,value,derivative)
else:
import sympy
function = [ sympy.sympify(comp) for comp in function ]
symbols = set.union(*(comp.free_symbols for comp in function))
if len(symbols)>2:
raise ValueError("Expressions must contain at most one free symbol")
def get_anchor(time):
substitutions = {symbol:time for symbol in symbols}
evaluate = lambda expr: expr.subs(substitutions).evalf(tol)
return unhappy_anchor(
time,
np.fromiter((evaluate(comp ) for comp in function),dtype = float),
np.fromiter((evaluate(comp.diff()) for comp in function),dtype = float),
)
for time in sorted(times_of_interest):
self.append(get_anchor(time))
self[0].happy = self[-1].happy = True
# Insert at least one anchor, if there are only two:
if len(self)==2<max_anchors:
time = np.mean((self[0].time,self[1].time))
self.insert(1,get_anchor(time))
while not all(anchor.happy for anchor in self) and len(self)<=max_anchors:
for i in range(len(self)-2,-1,-1):
# Update happiness
if not self[i].happy:
guess = interpolate_vec( self[i].time, (self[i-1], self[i+1]) )
self[i].happy = (
rel_dist(guess,self[i].state) < 10**-tol or
rel_dist(self[i+1].time,self[i-1].time) < 10**-tol
)
# Add new anchors, if unhappy
if not (self[i].happy and self[i+1].happy):
time = np.mean((self[i].time,self[i+1].time))
self.insert(i+1,get_anchor(time))
if len(self)>max_anchors:
break
@classmethod
def from_data(cls,times,states):
"""
Creates a new cubic Hermite spline based on a provided dataset. The derivative of a given anchor is estimated from a quadratic interpolation of that anchor and the neighbouring ones. (For the first and last anchor, it’s only a linear interpolation.)
This is only a best general guess how to interpolate the data. Often you can apply your knowledge of the data to do better.
Parameters
----------
times : array-like
The times of the data points.
states : array-like
The values of the data. The first dimension has to have the same length as `times`.
"""
assert len(times)==len(states)
states = np.asarray(states)
assert states.ndim==2
spline = cls(n=states.shape[1])
diffs =
|
np.empty_like(states)
|
numpy.empty_like
|
#! /usr/bin/python
from __future__ import absolute_import
from __future__ import print_function
from six.moves import range
from six.moves import input
import math
import time
import pickle
import pylab
import numpy as np
import pandas as pd
#import scipy
import matplotlib
from matplotlib import pyplot
from numpy import linspace,zeros,cumsum,mean
from six.moves import range
__version__ = "0.5.0"
# submodules
from .srnumerical import *
from .row_functions import *
# global parameters
# boat drag coefficient
#alfa = 3.5 # 2.95?? for skif, from <NAME>
alfa = 3.06 # best fit to Kleshnev data for single
# alfaatkinson = 3.18 # use for Atkinson
alfaatkinson = 3.4
rho_air = 1.226 # kg/m3
Cdw = 1.1 # for all boats - big approximation
#crewarea = 1.4
crewarea = 2.0
scalepower = 0.67
def main():
return "Executing rowingphysics version %s." % __version__
def time500mtovavg(minutes,secs):
""" Calculates velocity from pace in minutes, seconds)
"""
seconds = 60.*minutes+secs
vavg = 500./seconds
return vavg
def vavgto500mtime(vavg):
""" Calculates 500m time (minutes, seconds) from velocity
"""
seconds = 500.0/vavg
minutes = np.floor(seconds/60.)
secs = seconds-60.0*minutes
return [minutes,secs]
def write_obj(obj,filename):
pickle.dump(obj,open(filename,"wb"))
def read_obj(filename):
res = pickle.load(open(filename))
return res
def testbladeforce(fhandle,rigging,vb,oarangle=0.01,aantal=10):
""" iterates slip using "real" fulcrum point
aantal = nr iterations
"""
lin = rigging.lin
lscull = rigging.lscull
lout = lscull - lin
oarangle = oarangle*np.pi/180.
Fblade = fhandle*lin/lout
res = blade_force(oarangle,rigging,vb,Fblade)
phidot = res[0]
print((Fblade,180*phidot/np.pi,180.*vb*np.cos(0.01)/(np.pi*lout)))
Fb = zeros(aantal)
itern = list(range(aantal))
for i in range(aantal):
l2 = lout
Fb[i] = fhandle*lin/l2
res = blade_force(oarangle,rigging,vb,Fb[i])
phidot = res[0]
print((Fb[i],180.*phidot/np.pi))
Fdot = fhandle + Fb
# plot
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(itern, Fb,'ro',label = 'Blade Force')
pyplot.plot(itern, Fdol,'bo',label = 'Oarlock Force')
pylab.legend()
pyplot.xlabel("Iteration")
pyplot.ylabel('Force (N)')
pyplot.show()
def plotforce(fhandle,rigging,vb,oarangle=0.01):
""" iterates slip using "real" fulcrum point
aantal = nr iterations
"""
lin = rigging.lin
lscull = rigging.lscull
lout = lscull - lin
oarangle = oarangle*np.pi/180.
Fblade = fhandle*lin/lout
res = blade_force(oarangle,rigging,vb,Fblade,doplot=1)
phidot = res[0]
print((Fblade,180*phidot/np.pi,180.*vb*np.cos(0.01)/(np.pi*lout)))
def empirical(datafile,vavg,crew,rigging,tstroke,trecovery,doplot=1):
""" Reads in empirical acceleration data to be compared with
acceleration plot
"""
lin = rigging.lin
lscull = rigging.lscull
lout = lscull - lin
tempo = crew.tempo
mc = crew.mc
mb = rigging.mb
Nrowers = rigging.Nrowers
try:
dragform = rigging.dragform
except AttributeError:
dragform = 1.0
catchangle = rigging.oarangle(0)
empdata = np.genfromtxt(datafile, delimiter = ',',skip_header=1)
emptime = empdata[:,0]
empdt = emptime[1]-emptime[0]
empdtarray = gradient(emptime)
xdotdot1 = empdata[:,1]
wh_stroke = min(where(emptime>=tstroke)[0])
wh_recovery = min(where(emptime>=trecovery)[0])
xdotdot = 0*emptime
xdotdot[:xdotdot.size-wh_recovery] = xdotdot1[wh_recovery:]
xdotdot[xdotdot.size-wh_recovery:] = xdotdot1[:wh_recovery]
wh_stroke = xdotdot.size-wh_recovery+wh_stroke
xdot = cumsum(xdotdot)*empdt
xdot = xdot-mean(xdot)+vavg
Fdrag = drag_eq((Nrowers*mc)+mb,xdot,doprint=0,alfaref=alfa*dragform)
ydotdot = 0*xdotdot
zdotdot = 0*xdotdot
# Recovery based
ydotdot[0:wh_stroke] = (-Fdrag[0:wh_stroke]-(mb+(Nrowers*mc))*xdotdot[0:wh_stroke])/(Nrowers*mc)
ydot = empdt*cumsum(ydotdot)
Fhelp = mb*xdotdot+Fdrag
# calculate phidot, phi
phidot1 = xdot/lout
phidot1[:wh_stroke] = 0
phi1 = cumsum(phidot1)*empdt
phi1 = phi1+catchangle - phi1[wh_stroke]
phidot2 = xdot/(lout*np.cos(phi1))
phidot2[:wh_stroke] = 0
phi2 = cumsum(phidot2)*empdt
phi2 = phi2+catchangle - phi2[wh_stroke]
phidot3 = xdot/(lout*np.cos(phi2))
phidot3[:wh_stroke] = 0
phi3 = cumsum(phidot3)*empdt
phi3 = phi3+catchangle - phi3[wh_stroke]
phidot = xdot/(lout*np.cos(phi3))
phidot[:wh_stroke] = 0
phi = cumsum(phidot)*empdt
phi = phi+catchangle - phi[wh_stroke]
vhand = phidot*lin*np.cos(phi)
vhand[:wh_stroke] = 0
handlepos = cumsum(vhand)*empdt
ydot[wh_stroke+1:] = crew.vcma(vhand[wh_stroke+1:],handlepos[wh_stroke+1:])
ydotdot = gradient(ydot+xdot,empdtarray)
zdot = (mc*(ydot+xdot)+mb*xdot)/(mc+mb)
zdotdot = gradient(zdot,empdtarray)
Fblade = (mc+mb)*zdotdot+Fdrag
Fhandle = lout*Fblade/lin
Ffoot = mc*(xdotdot+ydotdot)+Fhandle
Pw = drag_eq((Nrowers*mc)+mb,xdot,alfaref=alfa*dragform)*xdot
Edrag = cumsum(Pw)*empdt
Pq = (Nrowers*mc)*(ydotdot)*ydot
Pqrower = abs(Pq)
Pdiss = Pqrower-Pq
print(('Drag Power',mean(Pw)))
print(('Kinetic Power loss',mean(Pdiss)))
print(('Stroke length ',max(handlepos)))
forcearray = transpose([handlepos[wh_stroke-1:],Fhandle[wh_stroke-1:]])
savetxt('empforce.txt',forcearray,delimiter=',',fmt='%4.2e')
recoveryarray = transpose([emptime[:wh_stroke-1],-ydot[:wh_stroke-1]])
savetxt('emprecovery.txt',recoveryarray,delimiter=',',fmt='%4.2e')
if (doplot==1):
pyplot.clf()
pyplot.plot(emptime,xdotdot, 'r-',label = 'Measured Boat Acceleration')
pyplot.plot(emptime,ydotdot+xdotdot,'b-',label = 'Crew Acceleration')
pyplot.plot(emptime,zdotdot,'g-',label = 'System Acceleration')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('a (m/s^2)')
pyplot.show()
if (doplot==2):
pyplot.clf()
pyplot.plot(emptime, xdot, 'r-',label = 'Boat Speed')
pyplot.plot(emptime, xdot+ydot, 'b-',label = 'Crew Speed')
pyplot.plot(emptime, zdot, 'g-',label = 'System speed')
pylab.legend(loc='lower left')
pyplot.xlabel("time (s)")
pyplot.ylabel('v (m/s)')
pyplot.show()
if (doplot==3):
pyplot.clf()
pyplot.plot(emptime, Fdrag, 'r-',label = 'Drag Force')
pyplot.plot(emptime, Fhelp, 'g-',label = 'Foar-Ffoot')
pyplot.plot(emptime, Fblade, 'b-',label = 'Fblade')
pyplot.plot(emptime, Ffoot,'y-', label = 'Ffoot')
pyplot.plot(emptime, Fhandle,'k-', label = 'Fhandle')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel("F (N)")
pyplot.show()
if (doplot==4):
pyplot.clf()
pyplot.plot(emptime, phidot1, 'r-',label = 'Angular velocity')
pyplot.plot(emptime, phidot2, 'g-',label = 'Iteration 2')
pyplot.plot(emptime, phidot3, 'y-',label = 'Iteration 3')
pyplot.plot(emptime, phidot, 'b-',label = 'Iteration 4')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel("rad/s")
pyplot.show()
if (doplot==5):
pyplot.clf()
pyplot.plot(emptime, numpy.degrees(phi1), 'r-',label = 'Oar angle')
pyplot.plot(emptime, numpy.degrees(phi2), 'g-',label = 'Iteration 2')
pyplot.plot(emptime, numpy.degrees(phi3), 'y-',label = 'Iteration 3')
pyplot.plot(emptime, numpy.degrees(phi), 'b-',label = 'Iteration 4')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel("degrees")
pyplot.show()
if (doplot==6):
pyplot.clf()
pyplot.plot(emptime, handlepos, 'r-', label = 'Handle position')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel("y (m)")
pyplot.show()
if (doplot==7):
pyplot.clf()
pyplot.plot(handlepos,Fhandle,'r-', label = 'Handle Force')
pylab.legend(loc='upper left')
pyplot.xlabel("x (m)")
pyplot.ylabel("F (N)")
pyplot.show()
return mean(Pw+Pdiss)
def energybalance(F,crew,rigging,v0=4.3801,dt=0.03,doplot=1,doprint=0,
timewise=0,index_offset=1,empirical=0,empt0=0,vb0=0,
catchacceler=5.0,emptype='acceler',
windv=0,dowind=1):
""" calculates one stroke with average handle force as input
slide velocity and stroke/recovery ratio are calculated
knows about slip, lift, drag. Plots energy balance.
windv is wind speed in m/s. Positive values are tailwind.
"""
# initialising output values
dv = 100.
vavg = 0.0
vend = 0.0
ratio = 0.0
power = 0.0
if (vb0==0):
vb0 = v0
if (catchacceler>50):
catchacceler = 50
# stroke parameters
lin = rigging.lin
lscull = rigging.lscull
lout = lscull - lin
tempo = crew.tempo
mc = crew.mc
mb = rigging.mb
recprofile = crew.recprofile
d = crew.strokelength
Nrowers = rigging.Nrowers
try:
dragform = rigging.dragform
except:
dragform = 1.0
catchacceler = max(catchacceler,2.0)
# nr of time steps
aantal = 1+int(round(60./(tempo*dt)))
time = linspace(0,60./tempo,aantal)
vs = zeros(len(time))+v0
vb = zeros(len(time))+v0
vc = zeros(len(time))+v0
oarangle = zeros(len(time))
xblade = zeros(len(time))
Fhandle = zeros(len(time))
Fblade = zeros(len(time))
Fprop = zeros(len(time))
Fhandle[0:2] = 0
Pbladeslip = zeros(len(time)) # H
xdotdot = zeros(len(time))
zdotdot = zeros(len(time))
ydotdot = zeros(len(time))
xdot = zeros(len(time))+v0
ydot = zeros(len(time))+v0
zdot = zeros(len(time))+v0
Pf = zeros(len(time))
Foarlock = zeros(len(time))
Flift = zeros(len(time))
Fbldrag = zeros(len(time))
attackangle = zeros(len(time))
Clift = zeros(len(time))
Cdrag = zeros(len(time))
handlepos = 0
# initial handle and boat velocities
vs[0] = v0
vb[0] = vb0
vc[0] = ((Nrowers*mc+mb)*vs[0]-mb*vb[0])/(Nrowers*mc)
oarangle[0] = rigging.oarangle(0)
xblade[0] = -lout*np.sin(oarangle[0])
i=1
vcstroke = 0
vcstroke2 = 1
# catch
vblade = xdot[i-1]
while (vcstroke < vcstroke2):
vhand = catchacceler*(time[i]-time[0])
vcstroke = crew.vcm(vhand, handlepos)
phidot = vb[i-1]*np.cos(oarangle[i-1])
vhand = phidot*lin*np.cos(oarangle[i-1])
ydot[i] = vcstroke
Fdrag = drag_eq((Nrowers*mc)+mb,xdot[i-1],alfaref=alfa*dragform)
zdotdot[i] = -Fdrag/((Nrowers*mc)+mb)
vw = windv-vcstroke-zdot[i-1]
Fwind = 0.5*crewarea*Cdw*rho_air*(Nrowers**scalepower)*vw*abs(vw)*dowind
# print(Fwind,crewarea,dowind)
zdotdot[i] = zdotdot[i] + Fwind/((Nrowers*mc)+mb)
zdot[i] = zdot[i-1]+dt*zdotdot[i]
xdot[i] = zdot[i]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydot[i]
Fi = crew.forceprofile(F,handlepos)
Fbladei = Fi*lin/lout
res = blade_force(oarangle[i-1],rigging,vb[i-1],Fbladei)
phidot2 = res[0]
vhand2 = phidot2*lin*np.cos(oarangle[i-1])
vcstroke2 = crew.vcm(vhand2,handlepos)
vblade = xdot[i]-phidot*lout*np.cos(oarangle[i-1])
# print(i,vhand,vhand2,vcstroke,vcstroke2)
vs[i] = zdot[i]
vc[i] = xdot[i]+ydot[i]
vb[i] = xdot[i]
ydotdot[i] = (ydot[i]-ydot[i-1])/dt
xdotdot[i] = zdotdot[i]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydotdot[i]
handlepos = handlepos+ydot[i]*dt
Fhandle[i] = 0
oarangle[i] = rigging.oarangle(handlepos)
i = i+1
# stroke
while (handlepos<d) & (i<len(time)):
if (timewise == 1):
Fi = crew.forceprofile(F,handlepos)*np.cos(oarangle[i-1])
else:
Fi = crew.forceprofile(F,handlepos)
Fhandle[i-1] = Fi
Fblade[i-1] = Fi*lin/lout
res = blade_force(oarangle[i-1],rigging,vb[i-1],Fblade[i-1])
phidot = res[0]
Fprop[i-1] = res[2]*Nrowers
Flift[i-1] = res[3]*Nrowers
Fbldrag[i-1] = res[4]*Nrowers
Clift[i-1] = res[5]
Cdrag[i-1] = res[6]
attackangle[i-1] = res[7]
phidot = res[0]
vhand = phidot*lin*np.cos(oarangle[i-1])
vcstroke = crew.vcm(vhand, handlepos)
Pbladeslip[i-1] = Nrowers*res[1]*(phidot*lout - vb[i-1]*np.cos(oarangle[i-1]))
Fdrag = drag_eq((Nrowers*mc)+mb,xdot[i-1],alfaref=alfa*dragform)
zdotdot[i] = (Fprop[i-1] - Fdrag)/((Nrowers*mc)+mb)
vw = windv-vcstroke-zdot[i-1]
Fwind = 0.5*crewarea*Cdw*rho_air*(Nrowers**scalepower)*vw*abs(vw)*dowind
zdotdot[i] = zdotdot[i] + Fwind/((Nrowers*mc)+mb)
zdot[i] = zdot[i-1]+dt*zdotdot[i]
ydot[i] = vcstroke
xdot[i] = zdot[i]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydot[i]
handlepos = handlepos+vhand*dt
vs[i] = zdot[i]
vc[i] = xdot[i]+ydot[i]
vb[i] = xdot[i]
ydotdot[i] = (ydot[i]-ydot[i-1])/dt
xdotdot[i] = zdotdot[i]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydotdot[i]
Pf[i-1] = Nrowers*Fblade[i-1]*xdot[i]*np.cos(oarangle[i-1])
oarangle[i] = rigging.oarangle(handlepos)
i = i+1
i=i-1;
# recovery
trecovery = max(time)-time[i]
ratio = time[i]/max(time)
aantalstroke = i
if (recprofile == 1): # oude methode (sinus)
vhandmax = -np.pi*d/(2*trecovery)
vhand = vhandmax*np.sin(np.pi*(time-time[i])/trecovery)
for k in range(i+1,aantal):
Fdrag = drag_eq((Nrowers*mc)+mb,xdot[k-1],alfaref=alfa*dragform)
zdotdot[k] = (- Fdrag)/((Nrowers*mc)+mb)
vw = windv-vcstroke-zdot[k-1]
Fwind = 0.5*crewarea*Cdw*rho_air*(Nrowers**scalepower)*vw*abs(vw)*dowind
zdotdot[k] = zdotdot[k] + Fwind/((Nrowers*mc)+mb)
zdot[k] = zdot[k-1]+dt*zdotdot[k]
ydot[k] = crew.vcm(vhand[k], handlepos)
xdot[k] = zdot[k]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydot[k]
vs[k] = zdot[k]
vc[k] = xdot[k]+ydot[k]
vb[k] = xdot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
xdotdot[k] = zdotdot[k]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydotdot[k]
handlepos = handlepos+vhand[k]*dt
oarangle[k] = rigging.oarangle(handlepos)
else:
vavgrec = d/trecovery
vcrecovery = zeros(aantal)
for k in range(i+1,aantal):
vhand = crew.vhandle(vavgrec,trecovery,time[k]-time[i])
vcrecovery[k] = crew.vcm(vhand, handlepos)
Fdrag = drag_eq((Nrowers*mc)+mb,xdot[k-1],alfaref=alfa*dragform)
zdotdot[k] = (- Fdrag)/((Nrowers*mc)+mb)
vw = windv-vcstroke-zdot[k-1]
Fwind = 0.5*crewarea*Cdw*rho_air*(Nrowers**scalepower)*vw*abs(vw)*dowind
zdotdot[k] = zdotdot[k] + Fwind/((Nrowers*mc)+mb)
zdot[k] = zdot[k-1]+dt*zdotdot[k]
ydot[k] = vcrecovery[k]
xdot[k] = zdot[k]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydot[k]
vs[k] = zdot[k]
vc[k] = xdot[k]+ydot[k]
vb[k] = xdot[k]
ydotdot[k] = (ydot[k]-ydot[k-1])/dt
xdotdot[k] = zdotdot[k]-((Nrowers*mc)/((Nrowers*mc)+mb))*ydotdot[k]
handlepos = d+d*crew.dxhandle(vavgrec,trecovery,time[k]-time[i])
# handlepos = handlepos+vhand*dt
oarangle[k] = rigging.oarangle(handlepos)
# blade positions
xblade=dt*cumsum(vb)-np.sin(oarangle)*lout
yblade=lout*np.cos(oarangle)+rigging.spread
# velocities
xdot = vb
zdot = vs
ydot = vc-vb
xdotdot[1]=(xdot[1]-xdot[0])/dt
ydotdot[1]=(ydot[1]-ydot[0])/dt
Pq = (Nrowers*mc)*(xdotdot+ydotdot)*ydot
# Ekinb = 0.5*mb*xdot**2 - 0.5*mb*v0**2
# Ekinc = 0.5*mc*(xdot+ydot)**2 - 0.5*mc*v0**2
Pw = drag_eq((Nrowers*mc)+mb,xdot,alfaref=alfa*dragform)*xdot
Pwmin = drag_eq((Nrowers*mc)+mb,mean(xdot),alfaref=alfa*dragform)*mean(xdot)
Pmb = mb*xdot*xdotdot
Pmc = (Nrowers*mc)*(xdot+ydot)*(xdotdot+ydotdot)
# Phandle = Nrowers*Fhandle*(xdot+ydot)*np.cos(oarangle)
Phandle = Nrowers*Fhandle*(xdot)*np.cos(oarangle)
Pleg = Nrowers*mc*(xdotdot+ydotdot)*ydot
Ekinb = cumsum(Pmb)*dt
Ekinc = cumsum(Pmc)*dt
Pqrower = abs(Pq)
Pdiss = Pqrower-Pq
Ef = cumsum(Pf)*dt
Eq = cumsum(Pq)*dt
Eblade = cumsum(Pbladeslip)*dt
Eqrower = cumsum(Pqrower)*dt
Ediss = cumsum(Pdiss)*dt
Ew = cumsum(Pw)*dt
Ewmin = Pwmin*(max(time)-min(time))
Eleg = cumsum(Pleg)*dt
Ehandle = cumsum(Phandle)*dt
Ekin0 = 0.5*(Nrowers*mc+mb)*zdot[0]**2
Ekinend = 0.5*(Nrowers*mc+mb)*zdot[aantal-1]**2
Eloss = Ekin0-Ekinend
Fbltotal = (Fbldrag**2 + Flift**2)**(0.5)
# empirical data
if (empirical!=0):
empdata = np.genfromtxt(empirical, delimiter = ',',skip_header=1)
emptime = empdata[:,0]
if (max(emptime)>10):
emptime = emptime/1000.
emptime = emptime + empt0
empdt = emptime[1]-emptime[0]
if (emptype == 'acceler'):
empxdotdot = empdata[:,1]
empxdot = cumsum(empxdotdot)*empdt
empxdot = empxdot-mean(empxdot)+mean(xdot)
else:
empdtarray = gradient(emptime)
empxdot = empdata[:,1]
empxdotdot = gradient(empxdot,empdtarray)
empRIM_E = max(cumsum(empxdot-min(empxdot))*empdt)
empRIM_check = max(empxdot)-min(empxdot)
if (doprint == 1):
print(("RIM E (measured)",empRIM_E))
print(("RIM Check (meas)",empRIM_check))
# some other calculations
strokelength_cm = max(cumsum(ydot)*dt)
# printing
if (doprint==1):
print(("E blade ",Eblade[aantal-1]))
print(("Ediss rower ",Ediss[aantal-1]))
print(("E drag ",Ew[aantal-1]))
print(("Eleg ",Eleg[aantal-1]))
print(("Ehandle ",Ehandle[aantal-1]))
print(("Epropulsion ",Ef[aantal-1]))
print(("Ekin loss ",Eloss))
print("")
print(("P blade ",Eblade[aantal-1]/time[aantal-1]))
print(("P leg ",Eleg[aantal-1]/time[aantal-1]))
print(("P handle ",Ehandle[aantal-1]/time[aantal-1]))
print(("P drag ",Ew[aantal-1]/time[aantal-1]))
print(("P propulsion ",Ef[aantal-1]/time[aantal-1]))
print("")
print(("Stroke length CM ",strokelength_cm))
print("")
# plotting
if (doplot==1):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, xdot,'r-',label = 'Boat velocity')
pyplot.plot(time, xdot+ydot,'g-',label = 'Crew velocity')
pyplot.plot(time, zdot,'b-',label = 'CM velocity')
if (empirical!=0):
pyplot.plot(emptime, empxdot, 'y-',label = 'Measured')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('v (m/s)')
pyplot.show()
if(doplot==18):
pyplot.clf()
pyplot.plot(time,numpy.degrees(oarangle),'y.',label='oar angle')
pylab.legend(loc='upper right')
pyplot.ylabel("Oar Angle (o)")
pyplot.show()
if (doplot==2):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Pf,'r-',label = 'Propulsive power')
pyplot.plot(time, Pq,'b-',label = 'Kinetic power')
pyplot.plot(time, Pbladeslip,'k-',label = 'Puddle power')
pyplot.plot(time, Pf+Pq+Pbladeslip,'g-',label = 'Leg power')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==3):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Ef,'r-',label = 'Propulsive Energy')
pyplot.plot(time, Eqrower,'b-',label = 'Kinetic Energy')
pyplot.plot(time, Ef+Eqrower+Eblade,'g-',label = 'Total Energy')
pyplot.plot(time, Eblade,'k-',label = 'Puddle Energy')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==4):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Pw,'r-',label = 'Drag sink')
pyplot.plot(time, Pbladeslip,'k-',label = 'Blade slip sink')
pyplot.plot(time, Pmb,'b-',label = 'Kinetic energy change boat')
pyplot.plot(time, Pmc,'g-',label = 'Kinetic energy change crew')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==5):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Ew+Ediss+Eblade,'r-',label = 'Drag energy + Rower Diss + Blade Slip')
pyplot.plot(time, Ew, 'y-', label = 'Drag Energy')
pyplot.plot(time, Ekinb,'b-',label = 'Boat Kinetic energy')
pyplot.plot(time, Ekinc,'g-',label = 'Crew Kinetic energy')
pyplot.plot(time, Ew+Ediss+Ekinb+Ekinc+Eblade, 'k-', label = 'Ew + Ediss + Ekinb + Ekinc+Eblade')
pylab.legend(loc='upper left')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==6):
pyplot.clf()
pyplot.subplot(121)
pyplot.plot(time, Pq,'k-',label = 'Kinetic power')
pyplot.plot(time, 0*Pq, 'k-')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.subplot(122)
pyplot.plot(time, Pqrower,'b-',label = 'Kinetic power rower')
pyplot.plot(time, Pdiss,'k-',label = 'Kinetic energy dissipation')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==7):
pyplot.clf()
pyplot.plot(time, Ew+Ediss+Ekinb+Ekinc+Eblade, 'r-', label = 'Total Sinks')
pyplot.plot(time, Ef+Eqrower+Eblade,'g-',label = 'Total Sources')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel('energy (J)')
pyplot.show()
if (doplot==8):
pyplot.clf()
pyplot.subplot(111)
pyplot.plot(time, Phandle,'r-',label = 'Handle power (crew)')
pyplot.plot(time, Pbladeslip,'g-',label = 'Puddle power')
pyplot.plot(time, Pf, 'y-', label = 'Propulsive power')
pyplot.plot(time, Pf+Pbladeslip,'k-',label = 'Propulsive+Puddle Power')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel('power (W)')
pyplot.show()
if (doplot==9):
pyplot.clf()
ax1 = pyplot.subplot(111)
pyplot.plot(xblade,yblade,label='blade centre')
pylab.legend(loc='best')
pyplot.xlabel("x (m)")
pyplot.ylabel('y (m)')
ax1.axis('equal')
xblade2 = xblade[0:len(xblade):4]
yblade2 = yblade[0:len(xblade):4]
oarangle2 = oarangle[0:len(xblade):4]
for i in range(len(xblade2)):
x1 = xblade2[i]+rigging.bladelength*np.sin(oarangle2[i])/2.
x2 = xblade2[i]-rigging.bladelength*np.sin(oarangle2[i])/2.
y1 = yblade2[i]-rigging.bladelength*np.cos(oarangle2[i])/2.
y2 = yblade2[i]+rigging.bladelength*np.cos(oarangle2[i])/2.
pyplot.plot([x1,x2],[y1,y2],'r-')
pyplot.show()
if (doplot==10):
pyplot.clf()
pyplot.plot(time, Fhandle, 'r-', label = 'Handle Force')
pyplot.plot(time, Fblade, 'g-', label = 'Blade Force')
pyplot.plot(time, Fprop, 'k-', label = 'Propulsive Force')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel('Force (N)')
pyplot.show()
if (doplot==11):
pyplot.clf()
pyplot.plot(numpy.degrees(oarangle), Clift, 'r-', label = 'Lift coefficient')
pyplot.plot(numpy.degrees(oarangle), Cdrag, 'g-', label = 'Drag coefficient')
pylab.legend(loc='lower right')
pyplot.xlabel("Oar Angle (degree)")
pyplot.ylabel("Coefficient")
pyplot.show()
if (doplot==12):
pyplot.clf()
ax1 = pyplot.subplot(111)
pyplot.plot(numpy.degrees(oarangle), Flift, 'r-', label = 'Lift Force')
pyplot.plot(numpy.degrees(oarangle), Fbldrag, 'g-', label = 'Drag Force')
pyplot.plot(numpy.degrees(oarangle), Fbltotal, 'k-', label = 'Total blade Force')
pyplot.plot(numpy.degrees(oarangle),numpy.degrees(attackangle),'y.',label='angle of attack')
pylab.legend(loc='lower right')
pyplot.xlabel("Oar Angle (degree)")
pyplot.ylabel("Blade Force")
ax2 = pyplot.twinx()
pyplot.plot(numpy.degrees(oarangle),numpy.degrees(attackangle),'y.',label='angle of attack')
pylab.legend(loc='upper right')
pyplot.ylabel("Angle of attack (o)")
ax2.yaxis.tick_right()
ax1 = pyplot.subplot(111)
pyplot.show()
if (doplot==13):
pyplot.clf()
pyplot.plot(time, ydot, 'r-', label = 'Crew velocity')
pylab.legend(loc='lower right')
pyplot.xlabel("time (s)")
pyplot.ylabel("v (m/s)")
pyplot.show()
if (doplot==14):
pyplot.clf()
pyplot.plot(time, xdotdot, 'r-', label = 'Boat acceleration')
pyplot.plot(time, zdotdot, 'g-', label = 'System acceleration')
pyplot.plot(time, ydotdot, 'b-', label = 'Crew acceleration')
if (empirical!=0):
pyplot.plot(emptime,empxdotdot, 'y-', label = 'Measured')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Boat Acceleration (m/s2)")
pyplot.show()
if (doplot==15):
pyplot.clf()
pyplot.plot(time, ydot, 'r-', label = 'Recovery speed')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Recovery Speed (m/s)")
pyplot.show()
if (doplot==16):
pyplot.clf()
pyplot.plot(time, numpy.degrees(oarangle), 'r-', label = 'Oar Angle')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Oar angle (o)")
pyplot.show()
if (doplot==19):
pyplot.clf()
ax1 = pyplot.subplot(111)
pyplot.plot(time, xdotdot, 'r-', label = 'Boat acceleration')
pyplot.plot(time, zdotdot, 'g-', label = 'System acceleration')
pyplot.plot(time, ydotdot, 'b-', label = 'Crew acceleration')
if (empirical!=0):
pyplot.plot(emptime,empxdotdot, 'y-', label = 'Measured')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Boat Acceleration (m/s2)")
ax2 = pyplot.twinx()
pyplot.plot(time,numpy.degrees(oarangle),'y-',label='oar angle')
pylab.legend(loc='upper left')
pyplot.ylabel("Oar Angle (o)")
ax2.yaxis.tick_right()
pyplot.show()
try:
instanteff = (Pf+Pq)/(Pf+Pq+Pbladeslip)
except RuntimeWarning:
instanteff = 0.0
if (doplot==17):
pyplot.clf()
pyplot.plot(time, instanteff, 'r-', label = 'Efficiency')
pylab.legend(loc='upper right')
pyplot.xlabel("time (s)")
pyplot.ylabel("Efficiency")
pyplot.show()
# calculate check
decel = -(abs(xdotdot[index_offset:])-xdotdot[index_offset:])/2.
indices = decel.nonzero()
decelmean =
|
mean(decel[indices])
|
numpy.mean
|
from __future__ import print_function, division
import numpy as np
import random
import torch
import torch.utils.data
from .misc import *
# -- 1.0 dataset --
# dataset class for synaptic cleft inputs
class BaseDataset(torch.utils.data.Dataset):
"""
# sample_input_size: sample input size
"""
def __init__(self,
volume, label=None,
sample_input_size=(8, 64, 64),
sample_label_size=None,
sample_stride=(1, 1, 1),
augmentor=None,
mode='train'):
self.mode = mode
# for partially labeled data
# m1 (no): sample chunks with over certain percentage
# = online version: rejection sampling can be slow
# = offline version: need to keep track of the mask
# self.label_ratio = label_ratio
# m2: make sure the center is labeled
# data format
self.input = volume
self.label = label
self.augmentor = augmentor # data augmentation
# samples, channels, depths, rows, cols
self.input_size = [np.array(x.shape) for x in self.input] # volume size, could be multi-volume input
self.sample_input_size = np.array(sample_input_size) # model input size
self.sample_label_size = np.array(sample_label_size) # model label size
# compute number of samples for each dataset (multi-volume input)
self.sample_stride = np.array(sample_stride, dtype=np.float32)
self.sample_size = [count_volume(self.input_size[x], self.sample_input_size, np.array(self.sample_stride))
for x in range(len(self.input_size))]
# total number of possible inputs for each volume
self.sample_num = np.array([np.prod(x) for x in self.sample_size])
# check partial label
self.label_invalid = [False]*len(self.sample_num)
if self.label is not None:
for i in range(len(self.sample_num)):
seg_id = np.array([-1]).astype(self.label[i].dtype)[0]
seg = self.label[i][sample_label_size[0]//2:-sample_label_size[0]//2,\
sample_label_size[1]//2:-sample_label_size[1]//2,\
sample_label_size[2]//2:-sample_label_size[2]//2]
if np.any(seg == seg_id):
print('dataset %d: needs mask for invalid region'%(i))
self.label_invalid[i] = True
self.sample_num[i] = np.count_nonzero(seg != seg_id)
self.sample_num_a = np.sum(self.sample_num)
self.sample_num_c = np.cumsum([0] + list(self.sample_num))
'''
Image augmentation
1. self.simple_aug: Simple augmentation, including mirroring and transpose
2. self.intensity_aug: Intensity augmentation
'''
if mode=='test': # for test
self.sample_size_vol = [np.array([np.prod(x[1:3]), x[2]]) for x in self.sample_size]
def __getitem__(self, index):
raise NotImplementedError("Need to implement getitem() !")
def __len__(self): # number of possible position
return self.sample_num_a
def get_pos_dataset(self, index):
return np.argmax(index < self.sample_num_c) - 1 # which dataset
def get_pos(self, vol_size, index):
pos = [0, 0, 0, 0]
# support random sampling using the same 'index'
seed =
|
np.random.RandomState(index)
|
numpy.random.RandomState
|
from pervect import PersistenceVectorizer
from sklearn.utils.estimator_checks import (
check_estimator,
check_estimators_dtypes,
check_fit_score_takes_y,
check_dtype_object,
check_pipeline_consistency,
check_estimators_nan_inf,
check_estimator_sparse_data,
check_estimators_pickle,
check_transformer_data_not_an_array,
check_transformer_general,
check_fit2d_predict1d,
check_methods_subset_invariance,
check_fit2d_1sample,
check_fit2d_1feature,
check_dict_unchanged,
check_dont_overwrite_parameters,
check_fit_idempotent,
)
from sklearn.utils.validation import check_random_state
from sklearn.metrics import pairwise_distances
from pervect.pervect_ import (
GaussianMixture,
vectorize_diagram,
wasserstein_diagram_distance,
pairwise_gaussian_ground_distance,
add_birth_death_line,
persistence_wasserstein_distance,
)
import umap
from packaging import version
umap_version = version.parse(umap.__version__)
if umap_version >= version.parse("0.4.0rc1"):
umap_metric="hellinger"
else:
umap_metric = "cosine"
import pytest
import numpy as np
np.random.seed(42)
base_data = np.vstack(
[np.random.beta(1, 5, size=100), np.random.gamma(shape=0.5, scale=1.0, size=100),]
).T
def test_pervect_estimator():
for estimator, check in check_estimator(PersistenceVectorizer, generate_only=True):
# These all pass in unsuitable data, so skip them
if check.func in (
check_estimators_dtypes,
check_fit_score_takes_y,
check_dtype_object,
check_pipeline_consistency,
check_estimators_nan_inf,
check_estimator_sparse_data,
check_estimators_pickle,
check_transformer_data_not_an_array,
check_transformer_general,
check_fit2d_predict1d,
check_methods_subset_invariance,
check_fit2d_1sample,
check_fit2d_1feature,
check_dict_unchanged,
check_dont_overwrite_parameters,
check_fit_idempotent,
):
pass
else:
check(estimator)
def test_pervect_transform_base():
random_seed = check_random_state(42)
model = PersistenceVectorizer(n_components=4, random_state=random_seed).fit(
base_data
)
model_result = model.transform(base_data)
random_seed = check_random_state(42)
gmm = GaussianMixture(n_components=4, random_state=random_seed).fit(base_data)
util_result = np.array([vectorize_diagram(diagram, gmm) for diagram in base_data])
assert np.allclose(model.mixture_model_.means_, gmm.means_)
assert np.allclose(model.mixture_model_.covariances_, gmm.covariances_)
assert np.allclose(model_result, util_result)
random_seed = check_random_state(42)
model_result = PersistenceVectorizer(
n_components=4, random_state=random_seed
).fit_transform(base_data)
assert np.allclose(model_result, util_result)
def test_pervect_transform_umap():
random_seed = check_random_state(42)
gmm = GaussianMixture(n_components=4, random_state=random_seed).fit(base_data)
util_result = np.array([vectorize_diagram(diagram, gmm) for diagram in base_data])
model = PersistenceVectorizer(
n_components=4, random_state=42, apply_umap=True,
).fit(base_data)
model_result = model.transform(base_data)
assert np.allclose(model.mixture_model_.means_, gmm.means_)
assert np.allclose(model.mixture_model_.covariances_, gmm.covariances_)
umap_util_result = umap.UMAP(
metric=umap_metric, random_state=42
).fit_transform(util_result)
assert np.allclose(model_result, umap_util_result)
def test_pervect_transform_umap_wasserstein():
model = PersistenceVectorizer(
n_components=4,
random_state=42,
apply_umap=True,
umap_metric="wasserstein",
).fit(base_data)
model_result = model.umap_.embedding_
precomputed_dmat = model.pairwise_p_wasserstein_distance(base_data)
assert np.allclose(precomputed_dmat, model._distance_matrix)
umap_util_result = umap.UMAP(
metric="precomputed", random_state=42
).fit_transform(precomputed_dmat)
assert np.allclose(model_result, umap_util_result)
def test_model_wasserstein():
random_seed = check_random_state(42)
model = PersistenceVectorizer(n_components=4, random_state=random_seed).fit(
base_data
)
model_dmat = model.pairwise_p_wasserstein_distance(base_data[:10])
random_seed = check_random_state(42)
gmm = GaussianMixture(n_components=4, random_state=random_seed).fit(base_data)
vec_data = [vectorize_diagram(base_data[i], gmm) for i in range(10)]
raw_ground_distance = pairwise_gaussian_ground_distance(
gmm.means_, gmm.covariances_,
)
ground_distance = add_birth_death_line(
raw_ground_distance, gmm.means_, gmm.covariances_, y_axis="lifetime",
)
util_dmat = pairwise_distances(
vec_data,
metric=persistence_wasserstein_distance,
ground_distance=ground_distance,
)
assert
|
np.allclose(model_dmat, util_dmat)
|
numpy.allclose
|
import pytest
import scipy as sp
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from spherecluster import VonMisesFisherMixture
from spherecluster import von_mises_fisher_mixture
from spherecluster import sample_vMF
def test_vmf_log_dense():
"""
Test that approximation approaches whatever scipy has.
"""
n_examples = 2
n_features = 50
kappas = np.linspace(2, 600, 20)
mu = np.random.randn(n_features)
mu /= np.linalg.norm(mu)
X = np.random.randn(n_examples, n_features)
for ee in range(n_examples):
X[ee, :] /= np.linalg.norm(X[ee, :])
diffs = []
for kappa in kappas:
v = von_mises_fisher_mixture._vmf_log(X, kappa, mu)
v_approx = von_mises_fisher_mixture._vmf_log_asymptotic(X, kappa, mu)
normalized_approx_diff = np.linalg.norm(v - v_approx) / np.linalg.norm(v)
print(normalized_approx_diff)
diffs.append(normalized_approx_diff)
assert diffs[0] > 10 * diffs[-1]
def test_vmf_log_detect_breakage():
"""
Find where scipy approximation breaks down.
This doesn't really test anything but demonstrates where approximation
should be applied instead.
"""
n_examples = 3
kappas = [5, 30, 100, 1000, 5000]
n_features = range(2, 500)
breakage_points = []
for kappa in kappas:
first_breakage = None
for n_f in n_features:
mu = np.random.randn(n_f)
mu /= np.linalg.norm(mu)
X = np.random.randn(n_examples, n_f)
for ee in range(n_examples):
X[ee, :] /= np.linalg.norm(X[ee, :])
try:
von_mises_fisher_mixture._vmf_log(X, kappa, mu)
except:
if first_breakage is None:
first_breakage = n_f
breakage_points.append(first_breakage)
print(
"Scipy vmf_log breaks for kappa={} at n_features={}".format(
kappa, first_breakage
)
)
print(breakage_points)
assert_array_equal(breakage_points, [141, 420, 311, 3, 3])
def test_maximization():
num_points = 5000
n_features = 500
posterior = np.ones((1, num_points))
kappas = [5000, 8000, 16400]
for kappa in kappas:
mu = np.random.randn(n_features)
mu /= np.linalg.norm(mu)
X = sample_vMF(mu, kappa, num_points)
centers, weights, concentrations = von_mises_fisher_mixture._maximization(
X, posterior
)
print("center estimate error", np.linalg.norm(centers[0, :] - mu))
print(
"kappa estimate",
np.abs(kappa - concentrations[0]) / kappa,
kappa,
concentrations[0],
)
assert_almost_equal(1., weights[0])
assert_almost_equal(0.0, np.abs(kappa - concentrations[0]) / kappa, decimal=2)
assert_almost_equal(0.0, np.linalg.norm(centers[0, :] - mu), decimal=2)
@pytest.mark.parametrize(
"params_in",
[
{"posterior_type": "soft"},
{"posterior_type": "hard"},
{"posterior_type": "soft", "n_jobs": 2},
{"posterior_type": "hard", "n_jobs": 3},
{"posterior_type": "hard", "force_weights":
|
np.ones(5)
|
numpy.ones
|
from __future__ import absolute_import, print_function
import numpy as np
from scipy import ndimage
import scipy.spatial.distance as dist
from scipy.linalg import svd
from scipy.ndimage import measurements as meas
import numpy.ma as ma
from skimage.morphology import skeletonize_3d as sk3d
from functools import partial
from scipy.stats import mstats as mstats
from .morphology import MorphologyOps
class CacheFunctionOutput(object):
"""
this provides a decorator to cache function outputs
to avoid repeating some heavy function computations
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, _=None):
if obj is None:
return self
return partial(self, obj) # to remember func as self.func
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
value = cache[key]
except KeyError:
value = cache[key] = self.func(*args, **kw)
return value
class PairwiseMeasures(object):
def __init__(self, seg_img, ref_img,
measures=None, num_neighbors=8, pixdim=[1, 1, 1],
empty=False, list_labels=None):
self.m_dict = {
'green volume': (self.n_pos_ref, 'Volume_(Green)'),
'red volume': (self.n_pos_seg, 'Volume_(Red)'),
'n_intersection': (self.n_intersection, 'Intersection'),
'n_union': (self.n_union, 'Union'),
'IoU': (self.intersection_over_union, 'IoU'),
'coverage': (self.overlap, 'Overlap'),
'vol_diff': (self.vol_diff, 'VolDiff'),
'ave_dist': (self.measured_average_distance, 'AveDist'),
'haus_dist': (self.measured_hausdorff_distance, 'HausDist'),
'haus_dist95': (self.measured_hausdorff_distance_95, 'HausDist95'),
'com_dist': (self.com_dist, 'COM distance'),
'com_ref': (self.com_ref, 'COM red'),
'com_seg': (self.com_seg, 'COM green')
}
self.seg = seg_img
self.ref = ref_img
self.seg_bin = np.where(seg_img > 0, np.ones_like(seg_img),
np.zeros_like(seg_img))
self.ref_bin = np.where(ref_img > 0, np.ones_like(ref_img),
np.zeros_like(ref_img))
self.list_labels = list_labels
self.flag_empty = empty
self.measures = measures if measures is not None else self.m_dict
self.neigh = num_neighbors
self.pixdim = pixdim
self.m_dict_result = {}
def __fp_map(self):
"""
This function calculates the false positive map
:return: FP map
"""
return np.asarray((self.seg - self.ref) > 0.0, dtype=np.float32)
def __fn_map(self):
"""
This function calculates the false negative map
:return: FN map
"""
return np.asarray((self.ref - self.seg) > 0.0, dtype=np.float32)
def __tp_map(self):
"""
This function calculates the true positive map
:return: TP map
"""
return np.asarray((self.ref + self.seg) > 1.0, dtype=np.float32)
def __tn_map(self):
"""
This function calculates the true negative map
:return: TN map
"""
return np.asarray((self.ref + self.seg) < 0.5, dtype=np.float32)
def __union_map(self):
"""
This function calculates the union map between segmentation and
reference image
:return: union map
"""
return np.asarray((self.ref + self.seg) > 0.5, dtype=np.float32)
def __intersection_map(self):
"""
This function calculates the intersection between segmentation and
reference image
:return: intersection map
"""
return np.multiply(self.ref_bin, self.seg_bin)
@CacheFunctionOutput
def n_pos_ref(self):
return np.sum(self.ref)
@CacheFunctionOutput
def n_neg_ref(self):
return np.sum(1 - self.ref)
@CacheFunctionOutput
def n_pos_seg(self):
return np.sum(self.seg)
@CacheFunctionOutput
def n_neg_seg(self):
return np.sum(1 - self.seg)
@CacheFunctionOutput
def fp(self):
return np.sum(self.__fp_map())
@CacheFunctionOutput
def fn(self):
return np.sum(self.__fn_map())
@CacheFunctionOutput
def tp(self):
return np.sum(self.__tp_map())
@CacheFunctionOutput
def tn(self):
return np.sum(self.__tn_map())
@CacheFunctionOutput
def n_intersection(self):
return np.sum(self.__intersection_map())
@CacheFunctionOutput
def n_union(self):
return np.sum(self.__union_map())
def overlap(self):
return np.sum(self.ref)/np.sum(self.seg) * 100 # Modifying to be green / red *100
def intersection_over_union(self):
"""
This function the intersection over union ratio - Definition of
jaccard coefficient
:return:
"""
return self.n_intersection() / self.n_union()
def com_dist(self):
"""
This function calculates the euclidean distance between the centres
of mass of the reference and segmentation.
:return:
"""
print('calculating com_dist')
if self.flag_empty:
return -1
else:
com_ref = ndimage.center_of_mass(self.ref)
com_seg = ndimage.center_of_mass(self.seg)
com_dist = np.sqrt(np.dot(np.square(np.asarray(com_ref) -
np.asarray(com_seg)), np.square(
self.pixdim)))
return com_dist
def com_ref(self):
"""
This function calculates the centre of mass of the reference
segmentation
:return:
"""
return ndimage.center_of_mass(self.ref)
def com_seg(self):
"""
This functions provides the centre of mass of the segmented element
:return:
"""
if self.flag_empty:
return -1
else:
return ndimage.center_of_mass(self.seg)
def list_labels(self):
if self.list_labels is None:
return ()
return tuple(np.unique(self.list_labels))
def vol_diff(self):
"""
This function calculates the ratio of difference in volume between
the reference and segmentation images.
:return: vol_diff
"""
return np.abs(self.n_pos_ref() - self.n_pos_seg()) / self.n_pos_ref()
# @CacheFunctionOutput
# def _boundaries_dist_mat(self):
# dist = DistanceMetric.get_metric('euclidean')
# border_ref = MorphologyOps(self.ref, self.neigh).border_map()
# border_seg = MorphologyOps(self.seg, self.neigh).border_map()
# coord_ref = np.multiply(np.argwhere(border_ref > 0), self.pixdim)
# coord_seg = np.multiply(np.argwhere(border_seg > 0), self.pixdim)
# pairwise_dist = dist.pairwise(coord_ref, coord_seg)
# return pairwise_dist
@CacheFunctionOutput
def border_distance(self):
"""
This functions determines the map of distance from the borders of the
segmentation and the reference and the border maps themselves
:return: distance_border_ref, distance_border_seg, border_ref,
border_seg
"""
border_ref = MorphologyOps((self.ref>0), self.neigh).border_map()
border_seg = MorphologyOps((self.seg>0),
self.neigh).border_map()
oppose_ref = 1 - self.ref/np.where(self.ref == 0, np.ones_like(
self.ref), self.ref)
oppose_seg = 1 - self.seg/np.where(self.seg == 0, np.ones_like(
self.seg), self.seg)
distance_ref = ndimage.distance_transform_edt(oppose_ref)
distance_seg = ndimage.distance_transform_edt(oppose_seg)
distance_border_seg = border_ref * distance_seg
distance_border_ref = border_seg * distance_ref
return distance_border_ref, distance_border_seg, border_ref, border_seg
def measured_distance(self):
"""
This functions calculates the average symmetric distance and the
hausdorff distance between a segmentation and a reference image
:return: hausdorff distance and average symmetric distance
"""
ref_border_dist, seg_border_dist, ref_border, \
seg_border = self.border_distance()
average_distance = (np.sum(ref_border_dist) + np.sum(
seg_border_dist)) / (np.sum(seg_border+ref_border))
hausdorff_distance = np.max([np.max(ref_border_dist), np.max(
seg_border_dist)])
hausdorff_distance_95 = np.max([np.percentile(ref_border_dist[
self.ref+self.seg > 0],
q=95),
np.percentile(
seg_border_dist[self.ref+self.seg > 0], q=95)])
return hausdorff_distance, average_distance, hausdorff_distance_95
def measured_average_distance(self):
"""
This function returns only the average distance when calculating the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[1]
def measured_hausdorff_distance(self):
"""
This function returns only the hausdorff distance when calculated the
distances between segmentation and reference
:return:
"""
return self.measured_distance()[0]
def measured_hausdorff_distance_95(self):
return self.measured_distance()[2]
# def average_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return (np.sum(np.min(pairwise_dist, 0)) + \
# np.sum(np.min(pairwise_dist, 1))) / \
# (np.sum(self.ref + self.seg))
#
# def hausdorff_distance(self):
# pairwise_dist = self._boundaries_dist_mat()
# return np.max((np.max(np.min(pairwise_dist, 0)),
# np.max(np.min(pairwise_dist, 1))))
def header_str(self):
result_str = [self.m_dict[key][1] for key in self.measures]
result_str = ',' + ','.join(result_str)
return result_str
def fill_value(self):
for key in self.m_dict:
result = self.m_dict[key][0]()
if not isinstance(result, (list, tuple, set, np.ndarray)):
self.m_dict_result[key] = result
else:
for d in range(len(result)):
key_new = key + '_' + str(d)
self.m_dict_result[key_new] = result[d]
def to_string(self, fmt='{:.4f}'):
result_str = ""
list_space = ['com_ref', 'com_seg', 'list_labels']
for key in self.measures:
result = self.m_dict[key][0]()
if key in list_space:
result_str += ' '.join(fmt.format(x) for x in result) \
if isinstance(result, tuple) else fmt.format(result)
else:
result_str += ','.join(fmt.format(x) for x in result) \
if isinstance(result, tuple) else fmt.format(result)
result_str += ','
return result_str[:-1] # trim the last comma
class RegionProperties(object):
def __init__(self, seg, img=None, measures=None,
num_neighbors=18, threshold=0, pixdim=None):
if pixdim is None:
pixdim = [1, 1, 1]
self.seg = seg
self.seg_bin = (self.seg > 0).astype(float)
self.order = [1, 0, 2]
self.voxel_size = np.prod(pixdim)
self.pixdim = pixdim
if img is None:
self.img = seg
else:
self.img = img
self.img_channels = self.img.shape[4] if self.img.ndim >= 4 else 1
for i in range(self.img.ndim, 5):
self.img = np.expand_dims(self.img, -1)
self.threshold = threshold
if self.seg is not None:
self.masked_img, self.masked_seg = self.__compute_mask()
else:
print("no mask")
self.neigh = num_neighbors
self.connect = MorphologyOps(self.seg_bin,
self.neigh).connect()
self.dilate = MorphologyOps(self.seg_bin, self.neigh).dilate()
self.erode = MorphologyOps(self.seg_bin, self.neigh).erode()
# self.glszm = self.grey_level_size_matrix()
self.m_dict = {
'centre of mass': (self.centre_of_mass, ['CoMx',
'CoMy',
'CoMz']),
'centre_abs': (self.centre_abs, ['Truex, Truey, Truez']),
'volume': (self.volume,
['NVoxels', 'NVolume']),
'fragmentation': (self.fragmentation, ['Fragmentation']),
'mean_intensity': (self.mean_int, ['MeanIntensity']),
'surface': (self.surface, ['NSurface', 'Nfaces_surf',
'NSurf_ext', 'Nfaces_ext']),
'surface_dil': (self.surface_dil, ['surf_dil', 'surf_ero']),
'surface volume ratio': (self.sav, ['sav_dil', 'sav_ero']),
'compactness': (self.compactness, ['CompactNumbDil'
]),
'eigen': (self.eigen, ['eigenvalues']),
'std': (self.std_values, ['std']),
'quantiles': (self.quantile_values, ['quantiles']),
'bounds': (self.bounds, ['bounds']),
'cc': (self.connect_cc, ['N_CC']),
'cc_dist': (self.dist_cc, ['MeanDistCC']),
'cc_size': (self.cc_size, ['MinSize', 'MaxSize', 'MeanSize']),
'max_extent': (self.max_extent, ['MaxExtent']),
'shape_factor': (self.shape_factor, ['ShapeFactor',
'shapefactor_surfcount']),
'skeleton_length': (self.skeleton_length, ['SkeletonLength'])
}
self.measures = measures if measures is not None else self.m_dict
self.m_dict_result = {}
def binarise(self):
binary_img = np.where(self.seg > 0, np.ones_like(self.seg),
np.zeros_like(self.seg))
return binary_img
def __compute_mask(self):
# TODO: check whether this works for probabilities type
foreground_selector = np.where((self.seg > 0).reshape(-1))[0]
probs = self.seg.reshape(-1)[foreground_selector]
regions = np.zeros((foreground_selector.shape[0], self.img_channels))
for i in np.arange(self.img_channels):
regions[:, i] = self.img[..., 0, i].reshape(-1)[foreground_selector]
return regions, probs
def shape_factor(self):
binarised = self.seg_bin
vol = np.sum(binarised)
if vol == 0:
return 0, 0, 0
radius = (vol * 0.75 * np.pi) ** (1.0/3.0)
surf_sphere = 4 * np.pi * radius * radius
surf_map, count_surf, _, _ = MorphologyOps(binarised, 6).border_surface_measures()
count_fin = np.where(count_surf > 0, 6-count_surf, count_surf)
count_final_surf = np.sum(count_fin)
vol_change = np.pi ** (1/3) * (6*vol) ** (2/3)
return surf_sphere / np.sum(surf_map), surf_sphere / \
count_final_surf, vol_change/np.sum(surf_map)
def skeleton_length(self):
return np.sum(MorphologyOps(self.seg_bin, 6).skeleton_map())
def centre_of_mass(self):
return list(np.mean(np.argwhere(self.seg > self.threshold), 0))
def centre_abs(self):
mean_centre = np.mean(np.argwhere(self.seg > self.threshold), 0)
mean_centre_new = mean_centre[self.order]
return list(mean_centre_new * self.pixdim)
# def grey_level_size_matrix(self):
def volume(self):
# numb_seg = np.sum(self.seg)
numb_seg_bin = np.sum(self.seg > 0)
return numb_seg_bin, numb_seg_bin*self.voxel_size
def surface(self):
border_seg, count_surf, border_ext, count_surf_ext = MorphologyOps(
self.seg_bin, self.neigh, pixdim=self.pixdim).border_surface_measures()
# numb_border_seg = np.sum(border_seg)
# count_surfaces = np.where(count_surf > 0, 6-count_surf, count_surf)
# numb_border_ext = np.sum(border_ext)
# count_surfaces_ext = np.where(count_surf_ext > 0, 6 - count_surf_ext,
# count_surf_ext)
return border_seg, count_surf, border_ext, \
count_surf_ext
def surface_dil(self):
return np.sum(self.dilate -
self.seg_bin), \
np.sum(self.seg_bin - self.erode)
def cc_size(self):
if self.connect is None:
self.connect = MorphologyOps(self.seg_bin,
self.neigh).connect()
min_size = 100000
max_size = 0
nf = np.max(self.connect)
for l in range(1, nf+1):
bin_label = np.where(self.connect == l, np.ones_like(self.connect),
np.zeros_like(self.connect))
if np.sum(bin_label) > max_size:
max_size = np.sum(bin_label)
if np.sum(bin_label) < min_size:
min_size = np.sum(bin_label)
return min_size, max_size, np.sum(self.binarise())/nf
def connect_cc(self):
if self.connect is None:
self.connect = MorphologyOps(self.seg_bin,
self.neigh).connect()
return np.max(self.connect)
def fragmentation(self):
if self.connect is None:
self.connect = MorphologyOps(self.seg_bin,
self.neigh).connect()
return 1 - 1.0/(np.max(self.connect)+0.000001)
def dist_cc(self):
if self.connect is None:
self.connect = MorphologyOps(self.seg_bin,
self.neigh).connect()
connected, numb_frac = self.connect, np.max(self.connect)
if numb_frac == 1:
return 0
else:
dist_array = []
size_array = []
for label in range(numb_frac):
indices_l = np.asarray(np.where(connected == label+1)).T
for j in range(label+1, numb_frac):
indices_j = np.asarray(np.where(connected == j + 1)).T
size_array.append(indices_j.shape[0] + indices_l.shape[0])
dist_array.append(np.mean(dist.cdist(indices_l,
indices_j,
'wminkowski',
p= 2,
w=self.pixdim)))
return np.sum(np.asarray(dist_array) *
|
np.asarray(size_array)
|
numpy.asarray
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# <NAME>, <NAME>, <NAME>, and <NAME>.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
clustering frontend --- :mod:`MDAnalysis.analysis.encore.clustering.ClusteringMethod`
=====================================================================================
The module defines classes for interfacing to various clustering algorithms.
One has been implemented natively, and will always be available, while
others are available only if scikit-learn is installed
:Author: <NAME>, <NAME>, <NAME>en
.. versionadded:: 0.16.0
"""
from __future__ import absolute_import
import numpy as np
import warnings
import logging
# Import native affinity propagation implementation
from . import affinityprop
# Attempt to import scikit-learn clustering algorithms
try:
import sklearn.cluster
except ImportError:
sklearn = None
msg = "sklearn.cluster could not be imported: some functionality will " \
"not be available in encore.fit_clusters()"
warnings.warn(msg, category=ImportWarning)
logging.warning(msg)
del msg
def encode_centroid_info(clusters, cluster_centers_indices):
"""
Adjust cluster indices to include centroid information
as described in documentation for ClusterCollection
"""
values, indices = np.unique(clusters, return_inverse=True)
for c_center in cluster_centers_indices:
if clusters[c_center] != c_center:
values[indices[c_center]] = c_center
return values[indices]
class ClusteringMethod (object):
"""
Base class for any Clustering Method
"""
# Whether the method accepts a distance matrix
accepts_distance_matrix=True
def __call__(self, x):
"""
Parameters
----------
x
either trajectory coordinate data (np.array) or an
encore.utils.TriangularMatrix, encoding the conformational
distance matrix
Returns
-------
numpy.array
list of cluster indices
"""
raise NotImplementedError("Class {0} doesn't implement __call__()"
.format(self.__class__.__name__))
class AffinityPropagationNative(ClusteringMethod):
"""
Interface to the natively implemented Affinity propagation procedure.
"""
def __init__(self,
damping=0.9, preference=-1.0,
max_iter=500, convergence_iter=50,
add_noise=True):
"""
Parameters
----------
damping : float, optional
Damping factor (default is 0.9). Parameter for the Affinity
Propagation for clustering.
preference : float, optional
Preference parameter used in the Affinity Propagation algorithm for
clustering (default -1.0). A high preference value results in
many clusters, a low preference will result in fewer numbers of
clusters.
max_iter : int, optional
Maximum number of iterations for affinity propagation (default is
500).
convergence_iter : int, optional
Minimum number of unchanging iterations to achieve convergence
(default is 50). Parameter in the Affinity Propagation for
clustering.
add_noise : bool, optional
Apply noise to similarity matrix before running clustering
(default is True)
"""
self.damping = damping
self.preference = preference
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.add_noise = add_noise
def __call__(self, distance_matrix):
"""
Parameters
----------
distance_matrix : encore.utils.TriangularMatrix
conformational distance matrix
Returns
-------
numpy.array
list of cluster indices
"""
clusters = affinityprop.AffinityPropagation(
s=distance_matrix * -1., # invert sign
preference=self.preference,
lam=self.damping,
max_iterations = self.max_iter,
convergence = self.convergence_iter,
noise=int(self.add_noise))
details = {}
return clusters, details
if sklearn:
class AffinityPropagation(ClusteringMethod):
"""
Interface to the Affinity propagation clustering procedure implemented
in sklearn.
"""
def __init__(self,
damping=0.9, preference=-1.0,
max_iter=500, convergence_iter=50,
**kwargs):
"""
Parameters
----------
damping : float, optional
Damping factor (default is 0.9). Parameter for the Affinity
Propagation for clustering.
preference : float, optional
Preference parameter used in the Affinity Propagation algorithm
for clustering (default -1.0). A high preference value results
in many clusters, a low preference will result in fewer numbers
of clusters.
max_iter : int, optional
Maximum number of iterations for affinity propagation (default
is 500).
convergence_iter : int, optional
Minimum number of unchanging iterations to achieve convergence
(default is 50). Parameter in the Affinity Propagation for
clustering.
"""
self.ap = \
sklearn.cluster.AffinityPropagation(
damping=damping,
preference=preference,
max_iter=max_iter,
convergence_iter=convergence_iter,
affinity="precomputed",
**kwargs)
def __call__(self, distance_matrix):
"""
Parameters
----------
distance_matrix : encore.utils.TriangularMatrix
conformational distance matrix
Returns
-------
numpy.array
list of cluster indices
"""
logging.info("Starting Affinity Propagation: {0}".format
(self.ap.get_params()))
# Convert from distance matrix to similarity matrix
similarity_matrix = distance_matrix.as_array() * -1
clusters = self.ap.fit_predict(similarity_matrix)
clusters = encode_centroid_info(clusters,
self.ap.cluster_centers_indices_)
details = {}
return clusters, details
class DBSCAN(ClusteringMethod):
"""
Interface to the DBSCAN clustering procedure implemented in sklearn.
"""
def __init__(self,
eps=0.5,
min_samples=5,
algorithm="auto",
leaf_size=30,
**kwargs):
"""
Parameters
----------
eps : float, optional (default = 0.5)
The maximum distance between two samples for them to be
considered as in the same neighborhood.
min_samples : int, optional (default = 5)
The number of samples (or total weight) in a neighborhood for
a point to be considered as a core point. This includes the
point itself.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends
on the nature of the problem.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at
least ``min_samples`` is by itself a core sample; a sample with
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
self.dbscan = sklearn.cluster.DBSCAN(eps=eps,
min_samples = min_samples,
algorithm=algorithm,
leaf_size = leaf_size,
metric="precomputed",
**kwargs)
def __call__(self, distance_matrix):
"""
Parameters
----------
distance_matrix : encore.utils.TriangularMatrix
conformational distance matrix
Returns
-------
numpy.array
list of cluster indices
"""
logging.info("Starting DBSCAN: {0}".format(
self.dbscan.get_params()))
clusters = self.dbscan.fit_predict(distance_matrix.as_array())
if
|
np.min(clusters == -1)
|
numpy.min
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 23 14:38:10 2021
@author: oscar
Script for training-validation process, getting BRs for each BP, S, number of
encoders and histogram size combination. In this version, we do not sort the
validation data histograms. See article XX for details.
"""
# We run it for 10 CVs, for each BP, and for different numbers of S
from functions_1 import *
import numpy as np
import pickle
import copy
import re
################### SELECT OPTION, PARAMETERS ############################
# Parameters
samples_per_channel_for_histogram_vector = pow(2,np.array([2,3,4,5,6,7,8,9,10])) # size of on-implant histogram
train_percentage = 50 # half of channels are for training, half for validation, for each cross-validation (CV) split
how_many_channels_Sabes = 2000 # we limit the number of Sabes channels since there ar eonly 960 Flint channels, this prevents the results from overfitting to the Sabes data
nb_CV_iterations = 30
# Specify root directory (where directories.txt file is located)
root_directory = r'D:\Dropbox (Imperial NGNI)\NGNI Share\Workspace\Oscar\Work\MUA compression\Upload code'
##########################################################################
# Read directories.txt file
with open(root_directory + '\\directories.txt') as f:
lines = f.readlines()
# Get path to Formatted data
for path in lines:
if path.startswith('Formatted_data_path'):
pattern = "'(.*?)'"
data_directory = re.search(pattern, path).group(1)
# Get results directory
for path in lines:
if path.startswith('BR_no_sort_results'):
pattern = "'(.*?)'"
results_directory = re.search(pattern, path).group(1)
# Get SCLV directory
for path in lines:
if path.startswith('SCLV_path'):
pattern = "'(.*?)'"
SCLV_directory = re.search(pattern, path).group(1)
# Load binned MUA data
file_name = data_directory + '\\all_binned_data_train.pkl'
with open(file_name, 'rb') as file:
results = pickle.load(file)
all_binned_data = results['all_binned_data']
bin_vector = results['bin_vector']
datasets = results['datasets']
results = [] # clear variable
# Iterate through cross-validation iterations
for CV_iteration in np.arange(1,nb_CV_iterations,1):
# Iterate through different BPs
for BP_counter, bin_resolution in enumerate(bin_vector):
all_data = all_binned_data[BP_counter] # index the data binned at the desired BP
# Split channels into train and test, with shuffling
MUA_binned_train = []
MUA_binned_validation = []
# Iterate through Flint and Sabes datasets, stored separately in 'all_data' variable
for dataset_count, data in enumerate(all_data):
# Shuffle channels
shuffled_channels = np.random.permutation(len(data))
data = [data[i] for i in shuffled_channels]
# Limit the Sabes data number of channels
if dataset_count == 1: # Sabes
data = data[:how_many_channels_Sabes]
# Index at which below are train channels and above are validation channels
train_cutoff = int(np.round(train_percentage * len(data) / 100))
# Assign train and validation channels
MUA_binned_train.extend(data[:train_cutoff])
MUA_binned_validation.extend(data[train_cutoff:])
nb_train_channels = len(MUA_binned_train)
nb_val_channels = len(MUA_binned_validation)
nb_channels = nb_train_channels + nb_val_channels
all_data = []
data = []
# Iterate through S values
for S in np.arange(2,11,1):
# Work on copies of the data, keeps the starting point the same for each S
MUA_binned_train_copy = copy.deepcopy(MUA_binned_train)
MUA_binned_validation_copy = copy.deepcopy(MUA_binned_validation)
print('BP: ' + str(bin_resolution) + '; S: ' + str(int(S)))
# Set the max firing rate for each BP, according to S
max_firing_rate = int(S-1)
# Load all possible SCLVs
# NOTE: if these have not been produced, they should be prior to running this script.
# TO produce them, see "Compressing data\Produce SCLVs" directory
file_name = 'Stored_SCLVs_S_'+str(int(S))+'.pkl'
try:
with open(SCLV_directory + '\\' + file_name, 'rb') as file:
SCLVs = pickle.load(file)
nb_SCLVs = len(SCLVs)
SCLVs = np.array(SCLVs,dtype=object)
except:
print('If SCLV files have not been produced, use "produce_all_SCLVs_given_S" script to produce them.')
if len(SCLVs[0,:]) != max_firing_rate + 1:
input('We have a problem, S not equal to SCLV length')
# Get training data histogram for each channel
# Note: We only work on the histograms:
# it saves time in that it's much more computationally efficient to
# do the sorted histogram and SCLV dot product than encode the
# data and check it's length.
histograms = np.zeros((max_firing_rate+1,nb_train_channels))
symbol_list_bin_limits = np.arange(-0.5,max_firing_rate+1.5,1)
for chan_index, train_data in enumerate(MUA_binned_train_copy): # iterate through training data channels
# Saturate dynamic range at S
train_data[train_data> max_firing_rate] = max_firing_rate
# Get histogram
temp_hist_train = np.histogram(train_data, symbol_list_bin_limits)
histograms[:,chan_index] = np.flip(np.sort(temp_hist_train[0]))
# Validation data histograms
val_histograms_memory = []
val_histograms_post_memory = []
sample_counter_cutoff = np.zeros((nb_val_channels,len(samples_per_channel_for_histogram_vector))) # cutoff index after which we use the validation data for measuring compression (before cutoff is for assignment)
end_cutoff = np.zeros((nb_val_channels,len(samples_per_channel_for_histogram_vector))) # cutoff index after which the validation data is no longer used
# Get val data histogram, depending on hist memory size
for hist_counter, sample_val_cutoff in enumerate(samples_per_channel_for_histogram_vector): # iterate through histogram sizes
val_histograms = np.zeros((max_firing_rate+1,nb_val_channels))
val_histograms_post = np.zeros((max_firing_rate+1,nb_val_channels))
skipped_val = 0 # used to test the code
for channel, val_data in enumerate(MUA_binned_validation_copy): # iterate through validation data channels
# Saturate dynamic range at S
val_data[val_data> max_firing_rate] = max_firing_rate
# Get sample cutoff for the given histogram size
temp_dict, sample_counter_cutoff[channel,hist_counter] = online_histogram_w_sat_based_nb_of_samples(val_data,sample_val_cutoff,max_firing_rate)
# Get validation histogram for assignment, counted up to when
# histogram is saturated
temp_hist_val = np.histogram(val_data[:int(sample_counter_cutoff[channel,hist_counter])], symbol_list_bin_limits)[0]
# KEY FEATURE: No sorting of validation data in this version
val_histograms[:,channel] = temp_hist_val
# Get validation histogram after assignment (all the data used to
# measure BR)
end_cutoff[channel,hist_counter] = int(sample_counter_cutoff[channel,hist_counter]) + int(len(val_data)/2)
# If not enough data (if more than half the data was used for assignment), we skip as we want eahc histogrma size to use the same amount of samples for compression (shows up as NaN in the BR)
if end_cutoff[channel,hist_counter] > len(val_data):
skipped_val += 1
continue
# Get the histogram of the to-be-compressed validation data (post-assignment data)
temp_hist_val_post = np.histogram(val_data[int(sample_counter_cutoff[channel,hist_counter]):int(end_cutoff[channel,hist_counter])], symbol_list_bin_limits)
# We need to ensure that the sorting of the histogram is done
# according to the sorting given during assignment
# KEY FEATURE: However, no mapping in this version
val_histograms_post[:,channel] = temp_hist_val_post[0]
# Code Test: make sure nb of columns of zeros in val_histograms_post
# matches the amount of skipped histograms
temp_test = np.sum(np.sum(val_histograms_post,axis=0)==0)
if temp_test != skipped_val:
try:
raise ValueError("Check code: the amount of skipped histograms should match the number of columns of zeros in the post-assignment validation histograms")
raise Exception('This is the exception you expect to handle')
except Exception as error:
print('Caught this error: ' + repr(error))
# Store validation assignment and post-assignment histograms, one of each for each histogram size
val_histograms_memory.append(val_histograms)
val_histograms_post_memory.append(val_histograms_post)
# Get the percentage of post- to pre-assignement val data, just so we know how much of the data wa sused for compression, relatively
stored_val_BR_data_proportion = (end_cutoff.astype(int) - sample_counter_cutoff.astype(int)) / end_cutoff.astype(int)
# Iterate through rounds of training, reduce the number of SCLVs as we go by removing
# the least popular ones. With each round, we find the subset of most popular SCLVs.
stored_SCLVs = []
stored_all_var_BRs = [] # Store CRs
stored_hist_SCLVs = [] # to see which channels were assigned to which SCLVs
counter_round = 0
SCLV_index = np.zeros((nb_train_channels,nb_SCLVs))
while nb_SCLVs != 0: # while there is an SCLV left
all_var_BRs = []
# Store the CLVs in this round (each round has a reduced amount, we remove
# the least popular ones)
stored_SCLVs.append(SCLVs)
# Get dot product of CLVs and training data histogram, proxy for CR
dot_prod = np.matmul(np.transpose(histograms),
|
np.transpose(SCLVs)
|
numpy.transpose
|
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random
def generate_heat_equation_data(amplitude=1.0, n=1, phase=0):
"""
@param amplitude: maximum temperature of initial condition
@param n: [int] no of harmonics of wave of initial condition
@param phase: phase shift of the initial condition
@return: numpy array
"""
L=1 # length of rod in cm
max_t = 2 # maximum time steps in seconds
c = 0.03 # intrinsic property of metal used - fixed for same type of metal
exponential_const = (c * n * np.pi / L) ** 2
t =
|
np.linspace(0, max_t, 200)
|
numpy.linspace
|
import func
import numpy as np
from time import sleep
# X1 = np.array([[0.], [1.], [2.], [3.], [4.]])
# theta1 = np.array([[2.], [4.]])
# Y1 = np.array([[2.], [7.], [12.], [17.], [22.]])
# X3 = np.array([[0.2, 2., 20.], [0.4, 4., 40.], [0.6, 6., 60.], [0.8, 8.,80.]])
# X2 = np.array([[1], [2], [3], [5], [8]])
# theta2 = np.array([[2.]])
# theta3 = np.array([[0.05], [1.], [1.], [1.]])
X1 = np.array([[0.], [1.], [2.], [3.], [4.]])
theta1 = np.array([[1.], [1.]])
Y1 = np.array([[2.], [6.], [10.], [14.], [18.]])
def predict__(theta, X):
print(theta.shape)
print(X.shape)
if (X.shape[1], 1) != theta.shape:
print("incompatible dimension match between X and theta")
return None
return func.mat_vec_prod(X,theta)
# print(predict__(theta3,X3))
def cost_elem__(theta, X, Y):
y_pred = predict__(theta, X)
print(y_pred.shape)
print(Y.shape)
M = X.shape[0]
return 0.5 * M * (y_pred - Y) ** 2
def cost__(theta1, X1, Y1):
y_pred = predict__(theta1, X1)
return func.vec_mse(y_pred, Y1) * 0.5
# print(cost_elem__(theta1, X1, Y1))
# print(cost__(theta1, X1, Y1))
# def ft_vec_gradient(x, y, theta):
# M = x.shape[0]
# N = x.shape[1]
# add = np.ones((x.shape[0],1))
# fix = np.concatenate((add, x),axis=1)
# # print(theta.shape)
# # print(x.shape)
# # print(y.shape)
# hp = predict__(theta, fix) - y
# if (M,1) != y.shape or (N,1) != theta.shape:
# print("incompatible")
# return None
# y = func.reshape(y)
# theta = func.reshape(theta)
# return func.dot(x, hp) / x.shape[0]
def fit__(theta, X, Y,alpha=0.01,n_cycle=2000):
M = X.shape[0]
add = np.ones((X.shape[0],1))
fix =
|
np.concatenate((add, X),axis=1)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
u"""
Beta regression for modeling rates and proportions.
References
----------
Grün, Bettina, <NAME>, and <NAME>. Extended beta regression
in R: Shaken, stirred, mixed, and partitioned. No. 2011-22. Working Papers in
Economics and Statistics, 2011.
Smithson, Michael, and <NAME>. "A better lemon squeezer?
Maximum-likelihood regression with beta-distributed dependent variables."
Psychological methods 11.1 (2006): 54.
"""
import numpy as np
from scipy.special import gammaln as lgamma
import patsy
import statsmodels.base.wrapper as wrap
import statsmodels.regression.linear_model as lm
from statsmodels.tools.decorators import cache_readonly
from statsmodels.base.model import (
GenericLikelihoodModel, GenericLikelihoodModelResults, _LLRMixin)
from statsmodels.genmod import families
_init_example = """
Beta regression with default of logit-link for exog and log-link
for precision.
>>> mod = BetaModel(endog, exog)
>>> rslt = mod.fit()
>>> print(rslt.summary())
We can also specify a formula and a specific structure and use the
identity-link for precision.
>>> from sm.families.links import identity
>>> Z = patsy.dmatrix('~ temp', dat, return_type='dataframe')
>>> mod = BetaModel.from_formula('iyield ~ C(batch, Treatment(10)) + temp',
... dat, exog_precision=Z,
... link_precision=identity())
In the case of proportion-data, we may think that the precision depends on
the number of measurements. E.g for sequence data, on the number of
sequence reads covering a site:
>>> Z = patsy.dmatrix('~ coverage', df)
>>> formula = 'methylation ~ disease + age + gender + coverage'
>>> mod = BetaModel.from_formula(formula, df, Z)
>>> rslt = mod.fit()
"""
class BetaModel(GenericLikelihoodModel):
"""Beta Regression.
The Model is parameterized by mean and precision. Both can depend on
explanatory variables through link functions.
Parameters
----------
endog : array_like
1d array of endogenous response variable.
exog : array_like
A nobs x k array where `nobs` is the number of observations and `k`
is the number of regressors. An intercept is not included by default
and should be added by the user (models specified using a formula
include an intercept by default). See `statsmodels.tools.add_constant`.
exog_precision : array_like
2d array of variables for the precision.
link : link
Any link in sm.families.links for mean, should have range in
interval [0, 1]. Default is logit-link.
link_precision : link
Any link in sm.families.links for precision, should have
range in positive line. Default is log-link.
**kwds : extra keywords
Keyword options that will be handled by super classes.
Not all general keywords will be supported in this class.
Notes
-----
Status: experimental, new in 0.13.
Core results are verified, but api can change and some extra results
specific to Beta regression are missing.
Examples
--------
{example}
See Also
--------
:ref:`links`
""".format(example=_init_example)
def __init__(self, endog, exog, exog_precision=None,
link=families.links.Logit(),
link_precision=families.links.Log(), **kwds):
etmp = np.array(endog)
assert np.all((0 < etmp) & (etmp < 1))
if exog_precision is None:
extra_names = ['precision']
exog_precision = np.ones((len(endog), 1), dtype='f')
else:
extra_names = ['precision-%s' % zc for zc in
(exog_precision.columns
if hasattr(exog_precision, 'columns')
else range(1, exog_precision.shape[1] + 1))]
kwds['extra_params_names'] = extra_names
super(BetaModel, self).__init__(endog, exog,
exog_precision=exog_precision,
**kwds)
self.link = link
self.link_precision = link_precision
# not needed, handled by super:
# self.exog_precision = exog_precision
# inherited df do not account for precision params
self.nobs = self.endog.shape[0]
self.df_model = self.nparams - 1
self.df_resid = self.nobs - self.nparams
assert len(self.exog_precision) == len(self.endog)
self.hess_type = "oim"
if 'exog_precision' not in self._init_keys:
self._init_keys.extend(['exog_precision'])
self._init_keys.extend(['link', 'link_precision'])
self._null_drop_keys = ['exog_precision']
self.results_class = BetaResults
self.results_class_wrapper = BetaResultsWrapper
@classmethod
def from_formula(cls, formula, data, exog_precision_formula=None,
*args, **kwargs):
if exog_precision_formula is not None:
if 'subset' in kwargs:
d = data.ix[kwargs['subset']]
Z = patsy.dmatrix(exog_precision_formula, d)
else:
Z = patsy.dmatrix(exog_precision_formula, data)
kwargs['exog_precision'] = Z
return super(BetaModel, cls).from_formula(formula, data, *args,
**kwargs)
def predict(self, params, exog=None, exog_precision=None, which="mean"):
"""Predict values for mean or precision
Parameters
----------
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
which : str
- "mean" : mean, conditional expectation E(endog | exog)
- "precision" : predicted precision
- "linpred" : linear predictor for the mean function
- "linpred_precision" : linear predictor for the precision function
Returns
-------
ndarray, predicted values
"""
k_mean = self.exog.shape[1]
if which in ["mean", "linpred"]:
if exog is None:
exog = self.exog
params_mean = params[:k_mean]
# Zparams = params[k_mean:]
linpred = np.dot(exog, params_mean)
if which == "mean":
mu = self.link.inverse(linpred)
return mu
else:
return linpred
elif which in ["precision", "linpred_precision"]:
if exog_precision is None:
exog_precision = self.exog_precision
params_prec = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_prec)
if which == "precision":
phi = self.link_precision.inverse(linpred_prec)
return phi
else:
return linpred_prec
def predict_precision(self, params, exog_precision=None):
"""Predict values for precision function for given exog_precision.
Parameters
----------
params : array_like
The model parameters.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted precision.
"""
if exog_precision is None:
exog_precision = self.exog_precision
k_mean = self.exog.shape[1]
params_precision = params[k_mean:]
linpred_prec = np.dot(exog_precision, params_precision)
phi = self.link_precision.inverse(linpred_prec)
return phi
def predict_var(self, params, exog=None, exog_precision=None):
"""predict values for conditional variance V(endog | exog)
Parameters
----------
params : array_like
The model parameters.
exog : array_like
Array of predictor variables for mean.
exog_precision : array_like
Array of predictor variables for precision.
Returns
-------
Predicted conditional variance.
"""
mean = self.predict(params, exog=exog)
precision = self.predict_precision(params,
exog_precision=exog_precision)
var_endog = mean * (1 - mean) / (1 + precision)
return var_endog
def loglikeobs(self, params):
"""
Loglikelihood for observations of the Beta regressionmodel.
Parameters
----------
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
return self._llobs(self.endog, self.exog, self.exog_precision, params)
def _llobs(self, endog, exog, exog_precision, params):
"""
Loglikelihood for observations with data arguments.
Parameters
----------
endog : ndarray
1d array of endogenous variable.
exog : ndarray
2d array of explanatory variables.
exog_precision : ndarray
2d array of explanatory variables for precision.
params : ndarray
The parameters of the model, coefficients for linear predictors
of the mean and of the precision function.
Returns
-------
loglike : ndarray
The log likelihood for each observation of the model evaluated
at `params`.
"""
y, X, Z = endog, exog, exog_precision
nz = Z.shape[1]
params_mean = params[:-nz]
params_prec = params[-nz:]
linpred = np.dot(X, params_mean)
linpred_prec = np.dot(Z, params_prec)
mu = self.link.inverse(linpred)
phi = self.link_precision.inverse(linpred_prec)
eps_lb = 1e-200
alpha = np.clip(mu * phi, eps_lb, np.inf)
beta = np.clip((1 - mu) * phi, eps_lb, np.inf)
ll = (lgamma(phi) - lgamma(alpha)
- lgamma(beta)
+ (mu * phi - 1) *
|
np.log(y)
|
numpy.log
|
#!/usr/bin/env python
"""
Morris' Method of Elementary Effects.
Module includes optimised sampling of trajectories including optional groups as
well as calculation of the Morris measures mu, stddev and mu*.
<NAME>., <NAME>., & <NAME>. (2000). Sensitivity Analysis. Wiley
Series in Probability and Statistics, John Wiley & Sons, New York, 1-504,
pages 68ff
Provided functions are:
morris_sampling - Sample trajectories in parameter space
elementary_effects - Calculate Elementary Effects from model
output on trajectories
Note that the functions morris_sampling and elementary_effects are wrappers for
the functions Optimized_Groups and Morris_Measure_Groups of <NAME> and
<NAME> ported to Python by <NAME>.
This module was originally written by <NAME> as a translation
from an original Matlab code of <NAME> and <NAME>, JRC -
IPSC Ispra, Varese, IT. It was adapted by <NAME> while at
Department of Computational Hydrosystems, Helmholtz Centre for
Environmental Research - UFZ, Leipzig, Germany, and continued while at
Institut National de Recherche pour l'Agriculture, l'Alimentation et
l'Environnement (INRAE), Nancy, France.
:copyright: Copyright 2012-2022 <NAME>, <NAME>, see AUTHORS.rst
for details.
:license: MIT License, see LICENSE for details.
.. moduleauthor:: <NAME>
The following wrappers are provided
.. autosummary::
morris_sampling
elementary_effects
History
* Written in Matlab by <NAME>, <NAME>,
JRC - IPSC Ispra, Varese, IT.
Last Update 15 November 2005 by <NAME>:
http://sensitivity-analysis.jrc.ec.europa.eu/software/index.htm
now at: https://ec.europa.eu/jrc/en/samo/simlab
* Translated to Python in May 2012 by <NAME>.
* Adapted to Python 3, etc., Oct 2013, <NAME>
* Went from exponential time increase with number of trajectories to linear
increase by using in Optimised_Groups one call to cdist from
scipy.spatial.distance and removed one loop in a loop over total number
of trajectories.
Several further little improvements on speed, Dec 2017, <NAME>
* Allow single trajectories, Dec 2017, <NAME>
* Catch degenerated case where lower bound==upper bound, return 0,
Feb 2018, <NAME>
* Use integer division operator // instead of / for trajectory length r,
Jul 2018, <NAME>
* Distance matrix is not done for all trajectories at once because of very
large memory requirement, Aug 2018, <NAME> & <NAME>
* Changed to Sphinx docstring and numpydoc, Dec 2019, <NAME>
* Distinguish iterable and array_like parameter types,
Jan 2020, <NAME>
* Remove np.matrix in Sampling_Function_2, called in Optimized_Groups to
remove numpy deprecation warnings, Jan 2020, <NAME>
* Plot diagnostic figures in png files if matplotlib installed, Feb 2020,
<NAME>
* Renamed file to morris_method.py, Feb 2020, <NAME>
* Adjusted argument and keyword argument names to be consistent with
pyeee, Feb 2020, <NAME>
* Make number of final trajectories an argument instead of a keyword
argument and sample default of 10*final trajectories,
Feb 2020, <NAME>
* Sample not only from uniform distribution but allow all distributions of
scipy.stats, Mar 2020, <NAME>untz
* More consistent docstrings, Jan 2022, <NAME>
"""
from __future__ import division, absolute_import, print_function
import numpy as np
__all__ = ['morris_sampling', 'elementary_effects']
def Sampling_Function_2(p, k, r, lb, ub, GroupMat=np.array([])):
"""
Morris sampling function
Definition
----------
def Sampling_Function_2(p, k, r, lb, ub, GroupMat=np.array([])):
Input
-----
p : int
number of intervals considered in [0,1]
k : int
number of factors examined (``sizea=k``).
In case groups are chosen the number of factors is stored in
``NumFact`` and ``sizea`` becomes the number of created groups
(``sizea=GroupMat.shape[1]``).
r : int
sample size
lb : array-like of size (``sizea``,)
Lower Bound for each factor in list or array
ub : array-like of size (``sizea``,)
Upper Bound for each factor in list or array
Optional Input
--------------
GroupMat : ndarray with size (``NumFact``, ``GroupNumber``)
Array which describes the chosen groups. (default: ``np.array([])``)
Each column represents a group and its elements are set to 1
in correspondence of the factors that belong to the fixed group. All
the other elements are zero.
Output
------
list
[``Outmatrix``, ``OutFact``]
``Outmatrix(sizeb*r, sizea)`` for the entire sample size computed
``In(i,j)`` matrices
``OutFact(sizea*r)` `for the entire sample size computed ``Fact(i,1)``
vectors
Notes
-----
Local Variables
NumFact : number of factors examined in the case when groups are chosen
GroupNumber : Number of groups (eventually 0)
sizeb : sizea+1
randmult(sizea) : vector of random +1 and -1
perm_e(sizea) : vector of sizea random permutated indeces
fact(sizea) : vector containing the factor varied within each traj
DDo(sizea,sizea) : D* in Morris, 1991
A(sizeb,sizea) : Jk+1,k in Morris, 1991
B(sizeb,sizea) : B in Morris, 1991
Po(sizea,sizea) : P* in Morris, 1991
Bo(sizeb,sizea) : B* in Morris, 1991
Ao(sizeb) : Jk+1 in Morris, 1991
xo(sizea) : x* in Morris, 1991 (starting point for the trajectory)
In(sizeb,sizea) : for each loop orientation matrix. It corresponds to
a trajectory of k step in the parameter space and it provides a
single elementary effect per factor
Fact(sizea) : For each loop vector indicating which factor or group of
factors has been changed in each step of the trajectory
AuxMat(sizeb,sizea) : Delta*0.5*((2*B - A) * DD0 + A) in Morris, 1991.
The AuxMat is used as in Morris design for single factor analysis,
while it constitutes an intermediate step for the group analysis.
Note: B0 is constructed as in Morris design when groups are not considered.
When groups are considered the routine follows the following steps:
1. Creation of P0 and DD0 matrices defined in Morris for the
groups. This means that the dimensions of these 2 matrices are
(GroupNumber,GroupNumber).
2. Creation of AuxMat matrix with (GroupNumber+1,GroupNumber)
elements.
3. Definition of GroupB0 starting from AuxMat, GroupMat and P0.
4. The final B0 for groups is obtained as [ones(sizeb,1)*x0' +
GroupB0]. The P0 permutation is present in GroupB0 and it's
not necessary to permute the matrix (ones(sizeb,1)*x0')
because it's already randomly created.
References
----------
<NAME>., <NAME>., & <NAME>. (2000). Sensitivity Analysis.
Wiley Series in Probability and Statistics, <NAME> & Sons,
New York, 1-504. - on page 68ff
History
-------
Written original Matlab code by <NAME>, <NAME>,
JRC - IPSC Ispra, Varese, IT
Last Update: 15 November 2005 by J.Cariboni
http://sensitivity-analysis.jrc.ec.europa.eu/software/index.htm
now at: https://ec.europa.eu/jrc/en/samo/simlab
Modified, <NAME>, May 2012 - ported to Python
<NAME>, Oct 2013
- adapted to JAMS Python package and ported to Python 3
<NAME>, Jan 2020 - remove np.matrix
"""
# Parameters and initialisation of the output matrix
sizea = k
Delta = p / (2. * (p - 1.))
NumFact = sizea
if GroupMat.shape[0] == GroupMat.size:
Groupnumber = 0
else:
Groupnumber = GroupMat.shape[1] # size(GroupMat,2)
sizea = GroupMat.shape[1]
sizeb = sizea + 1
Outmatrix = np.zeros(((sizea + 1) * r, NumFact))
OutFact = np.zeros(((sizea + 1) * r, 1))
# For each i generate a trajectory
for i in range(r):
Fact = np.zeros(sizea + 1)
# Construct DD0
DD0 = np.diagflat(np.sign(np.random.random(k) * 2 - 1))
# Construct B (lower triangular)
B = np.tri((sizeb), sizea, k=-1, dtype=int)
# Construct A0, A
A0 = np.ones((sizeb, 1))
A = np.ones((sizeb, NumFact))
# Construct the permutation matrix P0. In each column of P0 one
# randomly chosen element equals 1 while all the others equal zero. P0
# tells the order in which order factors are changed in each Note that
# P0 is then used reading it by rows.
I = np.eye(sizea)
P0 = I[:, np.random.permutation(sizea)]
# When groups are present the random permutation is done only on B. The
# effect is the same since the added part (A0*x0') is completely
# random.
if Groupnumber != 0:
B = np.dot(B, np.dot(GroupMat, P0.T).T)
# Compute AuxMat both for single factors and groups analysis. For
# Single factors analysis AuxMat is added to (A0*X0) and then
# permutated through P0. When groups are active AuxMat is used to build
# GroupB0. AuxMat is created considering DD0. If the element on DD0
# diagonal is 1 then AuxMat will start with zero and add Delta. If the
# element on DD0 diagonal is -1 then DD0 will start Delta and goes to
# zero.
AuxMat = Delta * 0.5 * (np.dot(2.*B - A, DD0) + A)
# a --> Define the random vector x0 for the factors. Note that x0 takes
# value in the hypercube
# [0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]*[0,...,1-Delta]
# Original in <NAME>'s version
# xset=np.arange(0.0,1.0-Delta,1.0/(p-1))
# Jule's version from The Primer
# xset=np.arange(0.0,1.0-1.0/(p-1),1.0/(p-1))
# Matthias thinks that the difference between Python and Matlab is that
# Python is not taking the last element; therefore the following
# version
xset = np.arange(0.0, 1.00000001 - Delta, 1.0 / (p - 1))
x0 = xset.take(list(np.ceil(
np.random.random(k) * np.floor(p / 2)) - 1))
x0 = x0[np.newaxis, :]
# b --> Compute the matrix B*, here indicated as B0. Each row in B0 is
# a trajectory for Morris Calculations. The dimension of B0 is
# (Numfactors+1,Numfactors)
if Groupnumber != 0:
B0 = np.dot(A0, x0) + AuxMat
else:
B0 = np.dot(np.dot(A0, x0) + AuxMat, P0)
# c --> Compute values in the original intervals
# B0 has values x(i,j) in [0, 1/(p -1), 2/(p -1), ... , 1].
# To obtain values in the original intervals [lb, ub] we compute
# lb(j) + x(i,j)*(ub(j)-lb(j))
In = np.tile(lb, (sizeb, 1)) + B0 * np.tile((ub - lb), (sizeb, 1))
# Create the Factor vector. Each component of this vector indicate
# which factor or group of factor has been changed in each step of the
# trajectory.
for j in range(sizea):
Fact[j] = np.where(P0[j, :])[0]
# Enkel om vorm logisch te houden. of Fact kleiner maken
Fact[sizea] = int(-1)
# append the create traject to the others
Outmatrix[i*(sizea+1):(i+1)*(sizea+1), :] = In
OutFact[i*(sizea+1):(i+1)*(sizea+1)] = Fact.reshape((sizea+1, 1))
return Outmatrix, OutFact
def Optimized_Groups(NumFact, lb, ub, r,
p=6, N=None,
dist=None, distparam=None,
GroupMat=np.array([]), Diagnostic=0):
"""
Optimisation in the choice of trajectories for Morris experiment,
that means elementary effects
Definition
----------
def Optimized_Groups(NumFact, lb, ub, r, p=6, N=None,
GroupMat=np.array([]), Diagnostic=0):
Input
-----
NumFact
Number of factors
lb
[NumFact] Lower bound of the uniform distribution for each factor
or lower fraction of percent point function ppf if distribution given.
ub
[NumFact] Upper bound of the uniform distribution for each factor
or upper fraction of percent point function ppf if distribution given.
r
Final number of optimal trajectories
Optional Input
--------------
p
Number of levels (default: 6)
N
Total number of trajectories. If None: N=10*r (default: None)
dist
List of None or scipy.stats distribution objects for each factor
having the method ppf, Percent Point Function (Inverse of CDF)
(default: None).
If None, the uniform distribution will be sampled from lower bound lb
to upper bound ub.
If dist is scipy.stats.uniform, the ppf will be sampled from the lower
fraction given in lb and the upper fraction in ub. The sampling
interval is then given by the parameters loc=lower and
scale=interval=upper-lower in param.
distparam
List with tuples with parameters as required for dist (default: (0,1)).
All distributions of scipy.stats have location and scale parameters,
at least. loc and scale are implemented as keyword arguments in
scipy.stats. Other parameters such as the shape parameter of the gamma
distribution must hence be given first,
e.g. (shape,loc,scale) for the gamma distribution.
distparam is ignored if dist is None.
The percent point function ppf is called like this:
dist(*distparam).ppf(x)
GroupMat
[NumFact,NumGroups] Matrix describing the groups.
(default: np.array([]))
Each column represents a group and its elements are set to 1 in
correspondence of the factors that belong to the fixed group. All the
other elements are zero.
Diagnostic
1=plot the histograms and compute the efficiency of the samplign or
not, 0 otherwise (default)
Output
------
[OptMatrix, OptOutVec]
References
----------
<NAME>., <NAME>., & <NAME>. (2000). Sensitivity Analysis. Wiley
Series in Probability and Statistics, <NAME>, New York, 1-504. -
on page 68ff
"""
from scipy.spatial import distance
import scipy.stats as stats
if N is None:
N = 10*r
assert len(lb) == NumFact, 'Lower bound must have length NumFact.'
assert len(ub) == NumFact, 'Upper bound must have length NumFact.'
if dist is not None:
assert len(lb) == len(dist), 'scipy.stats distribution object or None has to be given for each parameter.'
for dd in dist:
if dd is not None:
if not isinstance(dd, (stats.rv_discrete, stats.rv_continuous)):
raise TypeError(str(dd)+' is not a scipy.stats distribution object.')
# np.random.seed(seed=1025)
# Sample trajectorie between 0 and 1. Will be rescaled to specific
# distributions later.
lbt = np.zeros(NumFact)
ubt = np.ones(NumFact)
# Version with Groups
OutMatrix, OutFact = Sampling_Function_2(p, NumFact, N, lbt, ubt, GroupMat)
try:
Groupnumber = GroupMat.shape[1]
except:
Groupnumber = 0
if Groupnumber != 0:
sizeb = Groupnumber + 1
else:
sizeb = NumFact + 1
# Compute the distance between all pair of trajectories (sum of the
# distances between points) The distance matrix is a matrix N*N The
# distance is defined as the sum of the distances between all pairs of
# points if the two trajectories differ, 0 otherwise
Dist = np.zeros((N, N))
Diff_Traj = np.arange(0.0, N, 1.0)
# combine all trajectories: eg N=3: 0&1; 0&2; 1&2 (is not dependent from
# sequence)
for j in range(N):
for z in range(j+1, N):
MyDist = distance.cdist(OutMatrix[sizeb*j:sizeb*(j+1), :],
OutMatrix[sizeb*z:sizeb*(z+1), :])
if np.where(MyDist == 0.)[0].size == sizeb:
# Same trajectory. If the number of zeros in Dist matrix is
# equal to (NumFact+1) then the trajectory is a replica. In
# fact (NumFact+1) is the maximum number of points that two
# trajectories can have in common
Dist[j, z] = 0.
Dist[z, j] = 0.
# Memorise the replicated trajectory
Diff_Traj[z] = -1. # the z value identifies the duplicate
else:
# Define the distance between two trajectories as
# the minimum distance among their points
dd = np.sum(MyDist)
Dist[j, z] = dd
Dist[z, j] = dd
# prepare array with excluded duplicates (alternative would be deleting
# rows)
iidup = np.where(Diff_Traj == -1.)[0]
dupli = iidup.size
iiind = np.where(Diff_Traj != -1.)[0]
New_N = iiind.size # N - iidup.size
New_OutMatrix = np.zeros((sizeb*New_N, NumFact))
New_OutFact = np.zeros((sizeb*New_N, 1))
# Eliminate replicated trajectories in the sampled matrix
ID = 0
for i in range(N):
if Diff_Traj[i] != -1.:
New_OutMatrix[ID*sizeb:(ID+1)*sizeb, :] = (
OutMatrix[i*sizeb:(i+1)*sizeb, :])
New_OutFact[ID*sizeb:(ID+1)*sizeb, :] = (
OutFact[i*sizeb:(i+1)*sizeb, :])
ID += 1
# Select in the distance matrix only the rows and columns of different
# trajectories Dist_Diff =
# np.delete(Dist_Diff,np.where(Diff_Traj==-1.)[0])
# moet 2D matrix zijn... wis rijen ipv hou bij
Dist_Diff = Dist[iiind, :]
Dist_Diff = Dist_Diff[:, iiind]
# Select the optimal set of trajectories
Traj_Vec = np.zeros((New_N, r), dtype=int)
OptDist = np.zeros((New_N, r))
for m in range(New_N): # each row in Traj_Vec
Traj_Vec[m, 0] = m
for z in range(1, r): # elements in columns after first
New_Dist_Diff = np.sqrt(np.sum(Dist_Diff[Traj_Vec[m, :z], :]**2,
axis=0))
ii = New_Dist_Diff.argmax()
Traj_Vec[m, z] = ii
OptDist[m, z] = New_Dist_Diff[ii]
# Construct optimal matrix
SumOptDist = np.sum(OptDist, axis=1)
# Find the maximum distance
Pluto = np.where(SumOptDist == SumOptDist.max())[0]
Opt_Traj_Vec = Traj_Vec[Pluto[0], :]
OptMatrix = np.zeros((sizeb*r, NumFact))
OptOutVec = np.zeros((sizeb*r, 1))
for k in range(r):
OptMatrix[k*sizeb:(k+1)*sizeb, :] = (
New_OutMatrix[sizeb*Opt_Traj_Vec[k]:sizeb*(Opt_Traj_Vec[k]+1), :])
OptOutVec[k*sizeb:(k+1)*sizeb, :] = (
New_OutFact[sizeb*Opt_Traj_Vec[k]:sizeb*(Opt_Traj_Vec[k]+1), :])
# ----------------------------------------------------------------------
# Compute values in the original intervals
# Optmatrix has values x(i,j) in [0, 1/(p -1), 2/(p -1), ... , 1].
# To obtain values in the original intervals [lb, ub] we compute
# lb(j) + x(i,j)*(ub(j)-lb(j))
if Diagnostic:
OptMatrix_b = OptMatrix.copy() # save for plot
if dist is None:
OptMatrix = (np.tile(lb, (sizeb*r, 1)) +
np.tile(ub-lb, (sizeb*r, 1)) * OptMatrix)
else:
for i, dd in enumerate(dist):
OptMatrix[:, i] = lb[i] + (ub[i]-lb[i]) * OptMatrix[:, i]
if dd is not None:
if distparam is None:
pars = (0., 1.)
else:
if distparam[i] is None:
pars = (0., 1.)
else:
pars = tuple([ float(k) for k in distparam[i] ])
OptMatrix[:, i] = dd(*pars).ppf(OptMatrix[:, i])
if Diagnostic:
# Clean the trajectories from repetitions and plot the histograms
hplot = np.zeros((2 * r, NumFact))
for i in range(NumFact):
for j in range(r):
# select the first value of the factor
hplot[j * 2, i] = OptMatrix_b[j * sizeb, i]
# search the second value
for ii in range(1, sizeb):
if OptMatrix_b[j*sizeb+ii, i] != OptMatrix_b[j*sizeb, i]:
kk = 1
hplot[j*2+kk, i] = OptMatrix_b[j*sizeb+ii, i]
try: # pragma: no cover
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
mpl.rcParams['font.family'] = 'sans-serif'
mpl.rcParams['font.sans-serif'] = 'Arial' # Arial, Verdana
mpl.rc('savefig', dpi=300, format='png')
mpl.rc('font', size=9)
fig = plt.figure()
fig.suptitle('New Strategy')
DimPlots = NumFact // 2 + (1 if NumFact % 2 > 0 else 0)
for i in range(NumFact):
ax = fig.add_subplot(DimPlots, 2, i + 1)
ax.hist(hplot[:, i], p)
fig.savefig('morris_diag_new_strategy.png', transparent=False,
bbox_inches='tight', pad_inches=0.035)
plt.close(fig)
except ImportError: # pragma: no cover
pass
# Plot the histogram for the original sampling strategy
# Select the matrix
OrigSample = OutMatrix[:r*(sizeb), :]
Orihplot = np.zeros((2*r, NumFact))
for i in range(NumFact):
for j in range(r):
# select the first value of the factor
Orihplot[j * 2, i] = OrigSample[j * sizeb, i]
# search the second value
for ii in range(1, sizeb):
if OrigSample[j * sizeb + ii, i] != OrigSample[j *
sizeb, i]:
kk = 1
Orihplot[j * 2 + kk, i] = OrigSample[j * sizeb + ii, i]
try: # pragma: no cover
fig = plt.figure()
fig.suptitle('Old Strategy')
DimPlots = NumFact // 2 + (1 if NumFact % 2 > 0 else 0)
for i in range(NumFact):
ax = fig.add_subplot(DimPlots, 2, i + 1)
ax.hist(Orihplot[:, i], p)
# plt.title('Old Strategy')
fig.savefig('morris_diag_old_strategy.png', transparent=False,
bbox_inches='tight', pad_inches=0.035)
plt.close(fig)
except: # pragma: no cover
pass
# Measure the quality of the sampling strategy
levels = np.arange(0.0, 1.1, 1.0 / (p - 1))
NumSPoint = np.zeros((NumFact, p))
NumSOrigPoint = np.zeros((NumFact, p))
for i in range(NumFact):
for j in range(p):
# For each factor and each level count the number of times the
# factor is on the level
# This for the new and original sampling
NumSPoint[i, j] = np.where(
np.abs(hplot[:, i] - np.tile(levels[j], hplot.shape[0]))
< 1e-5)[0].size
NumSOrigPoint[i, j] = np.where(
np.abs(Orihplot[:, i] -
np.tile(levels[j], Orihplot.shape[0]))
< 1e-5)[0].size
# The optimal sampling has values uniformly distributed across the
# levels
OptSampl = 2. * r / p
QualMeasure = 0.
QualOriMeasure = 0.
for i in range(NumFact):
for j in range(p):
QualMeasure = QualMeasure + np.abs(NumSPoint[i, j] - OptSampl)
QualOriMeasure = QualOriMeasure + np.abs(
NumSOrigPoint[i, j] - OptSampl)
QualMeasure = 1. - QualMeasure / (OptSampl * p * NumFact)
QualOriMeasure = 1. - QualOriMeasure / (OptSampl * p * NumFact)
print('The quality of the sampling strategy changed from {:f} with '
'the old strategy to {:f} '
'for the optimized strategy'.format(QualOriMeasure, QualMeasure))
return OptMatrix, OptOutVec[:, 0]
def Morris_Measure_Groups(NumFact, Sample, OutFact, Output, p=4,
Group=[], Diagnostic=False):
"""
Given the Morris sample matrix, the output values and the group matrix
compute the Morris measures.
Definition
----------
def Morris_Measure_Groups(NumFact, Sample, OutFact, Output, p=4, Group=[],
Diagnostic=False):
Input
-----
NumFact
Number of factors
Sample
Matrix of the Morris sampled trajectories
OutFact
Matrix with the factor changings as specified in Morris sampling
Output
Matrix of the output(s) values in correspondence of each point of each
trajectory
Optional Input
--------------
p
Number of levels
Group
[NumFact, NumGroups] Matrix describing the groups.
Each column represents one group. The element of each column are zero
if the factor is not in the group. Otherwise it is 1.
Diagnostic
True: print out diagnostics
False: otherwise (default)
Output
------
SA, OutMatrix
SA(NumFact*Output.shape[1],N) individual sensitivity measures
OutMatrix(NumFact*Output.shape[1], 3) = [Mu*, Mu, StDev]
Morris Measures
It gives the three measures of each factor for each output.
References
----------
<NAME>., <NAME>., & <NAME>. (2000). Sensitivity Analysis. Wiley
Series in Probability and Statistics, <NAME> & Sons, New York, 1-504. -
on page 68ff
"""
try:
NumGroups = Group.shape[1]
if Diagnostic:
print('{:d} Groups are used'.format(NumGroups))
except:
NumGroups = 0
if Diagnostic:
print('No Groups are used')
Delt = p / (2. * (p - 1.))
if NumGroups != 0:
sizea = NumGroups
GroupMat = Group
GroupMat = GroupMat.transpose()
if Diagnostic:
print('NumGroups', NumGroups)
else:
sizea = NumFact
sizeb = sizea + 1
# r = Sample.shape[0]/sizeb
r = Sample.shape[0] // sizeb
try:
NumOutp = Output.shape[1]
except:
NumOutp = 1
Output = Output.reshape((Output.size, 1))
# For each Output
if NumGroups == 0:
# for every output: every factor is a line, columns are mu*, mu and std
OutMatrix = np.zeros((NumOutp * NumFact, 3))
else:
# for every output: every factor is a line, column is mu*
OutMatrix = np.zeros((NumOutp * NumFact, 1))
SAmeas_out = np.zeros((NumOutp * NumFact, r))
for k in range(NumOutp):
OutValues = Output[:, k]
# For each trajectory
# vorm afhankelijk maken van group of niet...
SAmeas = np.zeros((NumFact, r))
for i in range(r):
# For each step j in the trajectory
# Read the orientation matrix fact for the r-th sampling
# Read the corresponding output values
# Read the line of changing factors
Single_Sample = Sample[i * sizeb:(i + 1) * sizeb, :]
Single_OutValues = OutValues[i * sizeb:(i + 1) * sizeb]
# gives factor in change (or group)
Single_Facts = np.array(OutFact[i * sizeb:(i + 1) * sizeb],
dtype=int)
A = (Single_Sample[1:sizeb, :] -
Single_Sample[:sizea, :]).transpose()
Delta = A[np.where(A)] # AAN TE PASSEN?
# If upper bound==lower bound then A==0 in all trajectories. Delta
# will have not the right dimensions then because these are
# filtered out with where. Fill in Delta==0 for these cases.
ii = np.where(np.sum(A, axis=0) == 0.)[0]
if ii.size > 0:
Delta =
|
np.insert(Delta, ii, 0.)
|
numpy.insert
|
import numpy as np
def IntensityDataAcquire(PATH):
data = np.genfromtxt(PATH,usecols=(0,1),delimiter=",")
DATA = data[:,1].copy()
return DATA
def Power_dist_one(PATH,Threshold,exptime):
DATA = IntensityDataAcquire(PATH)
Data = DATA > Threshold
negatives = np.array([])
positives = np.array([])
counter_neg = 0
counter_pos = 0
for i in Data:
if i == False:
counter_neg += 1
positives = np.append(positives,counter_pos)
counter_pos = 0
if i == True:
negatives = np.append(negatives,counter_neg)
counter_neg = 0
counter_pos += 1
if i == False:
negatives = np.append(negatives,counter_neg)
if i == True:
positives = np.append(positives,counter_pos)
pos_val = positives[positives.nonzero()]
neg_val = negatives[negatives.nonzero()]
return pos_val,neg_val
def Power_dist(PATH,Threshold,exptime) :
pos_val,neg_val = Power_dist_one(PATH,Threshold,exptime)
max_pos = np.max(pos_val)
min_pos = np.min(pos_val)
max_neg = np.max(neg_val)
min_neg = np.min(neg_val)
pos_bin_list = np.arange(min_pos,max_pos+1,1.0)
neg_bin_list = np.arange(min_neg,max_neg+1,1.0)
pos_bins = np.array([])
pos_bin_counts = np.array([])
for bin_val in pos_bin_list:
pos_bin_counts = np.append(pos_bin_counts,np.sum(pos_val == bin_val))
pos_bins = np.append(pos_bins,bin_val)
pos_bins = np.array(pos_bins)
pos_bin_counts = np.array(pos_bin_counts)
neg_bins = np.array([])
neg_bin_counts = np.array([])
for bin_val in neg_bin_list:
neg_bin_counts = np.append(neg_bin_counts,np.sum(neg_val == bin_val))
neg_bins = np.append(neg_bins,bin_val)
neg_bins = np.array(neg_bins)
neg_bin_counts = np.array(neg_bin_counts)
index_neg = neg_bin_counts > 1;
Offtime = (neg_bins[index_neg])*exptime
countsoff_accept = neg_bin_counts[index_neg]
number_offtime = len(countsoff_accept)
total_off = np.sum(countsoff_accept)
pdf_accept_off = np.zeros((number_offtime, 1))
if number_offtime >= 5:
for i in range(0,number_offtime):
if i == 0:
delta_off = Offtime[i+1] - Offtime[i];
elif i == number_offtime-1:
delta_off = Offtime[i] - Offtime[i-1];
else:
A_off = Offtime[i] - Offtime[i-1];
B_off = Offtime[i+1] - Offtime[i];
delta_off = (A_off + B_off)/2;
pdf_accept_off[i] = countsoff_accept[i]/(total_off*delta_off);
index_pos = pos_bin_counts > 1;
Ontime = (pos_bins[index_pos])*exptime
countson_accept = pos_bin_counts[index_pos]
number_ontime = len(countson_accept)
total_on = np.sum(countson_accept)
pdf_accept_on = np.zeros((number_ontime, 1))
if number_ontime >= 5:
for i in range(0,number_ontime):
if i == 0:
delta_on = Ontime[i+1] - Ontime[i];
elif i == number_ontime-1:
delta_on = Ontime[i] - Ontime[i-1];
else:
A_on = Ontime[i] - Ontime[i-1];
B_on = Ontime[i+1] - Ontime[i];
delta_on = (A_on + B_on)/2;
pdf_accept_on[i] = countson_accept[i]/(total_on*delta_on);
return pdf_accept_on,pdf_accept_off,Offtime,Ontime
def Offtime_pdf(PATH,Threshold,exptime):
pdf_accept_on, pdf_accept_off, Offtime, Ontime = Power_dist(PATH,Threshold,exptime)
return pdf_accept_off,Offtime
def Ontime_pdf(PATH,Threshold,exptime):
pdf_accept_on, pdf_accept_off, Offtime, Ontime = Power_dist(PATH,Threshold,exptime)
return pdf_accept_on,Ontime
def On_Off_frac_main(PATH,Threshold,exptime):
ontime,offtime = Power_dist_one(PATH,Threshold,exptime)
TOTAL_OFFTIME = np.sum(offtime)*exptime
TOTAL_ONTIME = np.sum(ontime)*exptime
TOLAL_TIME = TOTAL_OFFTIME+TOTAL_ONTIME
return TOTAL_OFFTIME, TOTAL_ONTIME, TOLAL_TIME
def OnTimeFraction(PATH,Threshold,exptime):
_, TOTAL_ONTIME, TOLAL_TIME = On_Off_frac_main(PATH,Threshold,exptime)
On_ratio = TOTAL_ONTIME/TOLAL_TIME
return On_ratio
def OffTimeFraction(PATH,Threshold,exptime):
TOTAL_OFFTIME,_, TOLAL_TIME = On_Off_frac_main(PATH,Threshold,exptime)
Off_ratio = TOTAL_OFFTIME/TOLAL_TIME
return Off_ratio
def OnOffRatio(PATH,Threshold,exptime):
TOTAL_OFFTIME,_, TOLAL_TIME = On_Off_frac_main(PATH,Threshold,exptime)
ON_OFF_ratio = TOTAL_ONTIME/TOTAL_OFFTIME
return ON_OFF_ratio
def OffOnRatio(PATH,Threshold,exptime):
TOTAL_OFFTIME,_, TOLAL_TIME = On_Off_frac_main(PATH,Threshold,exptime)
OFF_ON_ratio = TOTAL_OFFTIME/TOTAL_ONTIME
return OFF_ON_ratio
def TotalOnTime(PATH,Threshold,exptime):
_, TOTAL_ONTIME,_ = On_Off_frac_main(PATH,Threshold,exptime)
return TOTAL_ONTIME
def TotalOffTime(PATH,Threshold,exptime):
TOTAL_OFFTIME,_,_ = On_Off_frac_main(PATH,Threshold,exptime)
return TOTAL_OFFTIME
def AverageIntensity(PATH):
DATA = IntensityDataAcquire(PATH)
return
|
np.mean(DATA)
|
numpy.mean
|
import json
import numpy as np
class NACStandardScaler:
def __init__(self):
self.x_mean =
|
np.zeros((1, 1, 1))
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as ani
###############################################
def CalculatePosition(radius,velocity,time,dt):
"""Calculates position of an object around a circle after some time with interval dt."""
# Initial conditions
theta = 0
xini = radius * np.cos(theta)
yini = radius * np.sin(theta)
t = 0
# Store positions and time
xposition = [xini]
yposition = [yini]
storedtime = [t]
# Calculate positions
while t < time:
t += dt
x = radius * np.cos((velocity/radius)*t)
y = radius * np.sin((velocity/radius)*t)
xposition.append(x)
yposition.append(y)
storedtime.append(t)
# Make an array
xposition = np.array(xposition)
yposition = np.array(yposition)
storedtime = np.array(storedtime)
return xposition,yposition,storedtime
###############################################
def MultiplePositions(radius,velocity,time,dt):
"""Calculate positions of multiple objects around a circle."""
# Stop the calculation when the outermost point takes a whole revolution
# Outermost point position
outerposition = radius[len(radius)-1]
# Calculate the positions of outermost point:
xouter = CalculatePosition(radius[len(radius)-1],velocity[len(radius)-1],time,dt)[0]
# Circumference of the outer circle
circouter = 2*np.pi*outerposition
# Distance the outer object traveled
distance = 0
istop = 0
# New time
storedtime = CalculatePosition(radius[0],velocity[0],time,dt)[2]
for t in storedtime:
if distance < circouter:
distance = velocity[len(radius)-1]*t
else:
istop = np.where(storedtime == t)[0] # find index of the numpy array
break
istop = int(istop)
newstoredtime = storedtime[:istop-1]
xmultiple = []
ymultiple = []
for i in range(len(radius)):
x = CalculatePosition(radius[i],velocity[i],time,dt)[0]
y = CalculatePosition(radius[i],velocity[i],time,dt)[1]
xmultiple.append(x)
ymultiple.append(y)
xmultiple = np.array(xmultiple)
ymultiple = np.array(ymultiple)
return xmultiple, ymultiple, newstoredtime
###############################################
def PlotRotationCurve(radius,velocity,title,
xlabel='Radius (km)',ylabel='Velocity (km/s)',
xlim=1,
ylim=0.1):
# Convert title to string
title = str(title)
# Plot
fig = plt.figure(figsize=(6,6))
ax = plt.axes()
fig.patch.set_facecolor('black')
ax.set_facecolor('black')
ax.set_xlabel(xlabel,color='white')
ax.set_ylabel(ylabel,color='white')
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['left'].set_color('white')
ax.spines['right'].set_color('white')
ax.xaxis.label.set_color('white')
ax.yaxis.label.set_color('white')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
plt.title(title,color='white',fontsize='14')
plt.scatter(radius,velocity,color='khaki')
plt.plot(radius,velocity,color='white')
plt.xlim(0,np.max(radius)+xlim)
plt.ylim(0,np.max(velocity)+ylim)
plt.show()
###############################################
def MakeAnimation(radius,velocity,time,dt,filename,title,
xlim=1,ylim=1,
size=False,masses=None):
# Extract x and y positions, and time
xpositions = MultiplePositions(radius,velocity,time,dt)[0]
ypositions = MultiplePositions(radius,velocity,time,dt)[1]
#storedtimes = CalculatePosition(radius,velocity,time,dt)[2]
storedtimes = MultiplePositions(radius,velocity,time,dt)[2]
# Sizes of dots based on masses
if size == True: # use an array of masses as an input for sizes
area = [s * 5e-25 for s in masses]
if size == False: # use a default size
area = 100
# Create a movie write object, set frame rate
writer = ani.FFMpegWriter(fps=25)
# Create a figure, 8"x8" in size
fig = plt.figure(figsize=(8,8))
# Change background color of the plot
fig.patch.set_facecolor('black')
ax = plt.axes()
ax.set_facecolor('black')
ax.set_facecolor('black')
ax.set_xlabel('x (km)',color='white')
ax.set_ylabel('y (km)',color='white')
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['left'].set_color('white')
ax.spines['right'].set_color('white')
# Convert filename and title to string
filename = str(filename)
title = str(title)
# Set things up to save frames to a movie:
# fig = the figure the writer will record from
with writer.saving(fig, filename, 100):
# Loop
i = 0 # start counter
for t in storedtimes:
plt.cla()
for r in radius:
circle = plt.Circle((0, 0), r, color='white', fill=False)
plt.gca().add_patch(circle)
plt.scatter(xpositions[:,i],ypositions[:,i],s=area,color='khaki')
plt.suptitle(title,color='white',fontsize='18')
plt.title('{:.1e} seconds'.format(t),color='white')
plt.xlim(-np.max(radius)-xlim,
|
np.max(radius)
|
numpy.max
|
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Vector of Counts cardinality estimator.
This is a new, open source method that has lots of nice properties. Namely,
because you have to deduplicate users before constructing the Sketch, it is
not, strictly-speaking, a "Cardinality Estimator" as outlined in the paper
titled "Cardinality Estimators do not Preserve Privacy":
https://arxiv.org/pdf/1808.05879.pdf. So you can construct a vector with
differential privacy, and send it to a third party with a differential privacy
guarantee, but also not worry about losing accuracy as many of the structures
are aggregated together to get an estimate for the cardinality.
"""
import copy
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.common.hash_function import HashFunction
from wfa_cardinality_estimation_evaluation_framework.estimators.base import EstimatorBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import SketchNoiserBase
from wfa_cardinality_estimation_evaluation_framework.estimators.base import SketchBase
class IdentityNoiser(SketchNoiserBase):
"""Does not add noise to a VectorOfCounts."""
def __call__(self, sketch):
"""Return an identical copy of the incoming sketch."""
return copy.deepcopy(sketch)
class LaplaceNoiser(SketchNoiserBase):
"""This class adds noise to a VectorOfCounts."""
def __init__(self, epsilon=np.log(3), random_state=np.random.RandomState()):
"""Creates a VectorOfCountsNoiser which can add noise to a VectorOfCounts.
Args:
epsilon: the privacy parameter.
random_state: a numpy.random.RandomState used to draw random numbers.
"""
self.epsilon = epsilon
self.random_state = random_state
def __call__(self, sketch):
"""Returns a copy of VectorOfCounts with noise.
Args:
sketch: a VectorOfCounts object.
Returns:
A new VectorOfCounts object with noise added to the sketch.
"""
noised_sketch = copy.deepcopy(sketch)
noise = self.random_state.laplace(loc=0, scale=1.0/self.epsilon,
size=noised_sketch.num_buckets)
noised_sketch.stats = noised_sketch.stats + noise
return noised_sketch
class VectorOfCounts(SketchBase):
"""A vector of counts sketch."""
@classmethod
def get_sketch_factory(cls, num_buckets, *args, **kwargs):
def f(random_seed):
return cls(num_buckets, random_seed)
return f
def __init__(self, num_buckets, random_seed):
"""Creates a vector of counts sketch.
Args:
num_buckets: the number of buckets of the VectorOfCounts.
random_seed: a random seed for the hash function.
"""
SketchBase.__init__(self)
self._num_buckets = num_buckets
self.stats = np.zeros(num_buckets)
self.hash_function = HashFunction(random_seed, num_buckets)
self._vectorized_hash_function = np.vectorize(self.hash_function)
self._ids_added = False
@property
def num_buckets(self):
return self._num_buckets
def add_ids(self, ids):
"""Add IDs to the sketch.
This method should be called only once. Otherwise, will raise error.
The reason is that a vector of counts does not support sequentially adding
ids without additional layers. Let's imagine the following scenario:
voc = VectorOfCounts(very_large_num_buckets_so_there_is_no_hash_collision)
voc.add_ids([1, 2])
voc.add_ids([1, 3])
Ideally, we want voc.estimate_cardinality() == 3. However, this may be hard,
unless the vector of counts has a way to tell what ids have been added to
the bucket so far.
Args:
ids: a list of raw ids.
Returns:
self.
Raises:
AssertionError: If this method is called a second time.
"""
assert not self._ids_added, 'Can only add ids to Vector of Counts once.'
hashed_ids = self._vectorized_hash_function(ids)
self.stats = np.bincount(hashed_ids, minlength=self.num_buckets)
self._ids_added = True
return self
def cardinality(self):
return
|
np.sum(self.stats)
|
numpy.sum
|
#!/d/users/turner/tools/anaconda3/bin/python
"""
big 'ole python script that (hopefully) goes through the entire science procedure for give band 4 and band 7 fits files
notes are given in science.md
**python3**
to run:
ipython
exec(open('science.py').read())
"""
import numpy as np
import matplotlib.pyplot as plt
import astropy
from astropy.io import fits
import os, sys, subprocess
from astropy import constants as const
# add scripts directory to the path
sys.path.insert(0, '/uwpa2/turner/legus-alma/scripts/')
from ds9_regions_from_sextractor import get_regions
from get_sextractor_in_footprint import in_footprint
from overlapping_regions import overlap
from mcmc_error_bars import mcmc_error
from nearest_cluster import nearest_cluster
"""
if starting with new band 4 and band 7 fits files, you need to:
1. make sure to have pbcoverage files
2. take global.deg.reg and make new pixel files for band 4 and band 7
data1 --> results for sextractor with 5 pixels > 2 sigma
data2 --> results for sextractor with 5 pixels > 2.5 sigma
data3 --> results for sextractor with 5 pixels > 3.0 sigma
data4 --> retry sextractor 5pix > 2 sigma
data5 --> retry sextractor 5pix > 2.5 sigma
data6 --> retry sextractor 5pix > 3.0 sigma
data7 --> sextractor 2pix > 2 sigma
data8 --> sextractor 2pix > 3 sigma
data_oct23 --> using band4.ilsang.pbcor.fits & band7.ilsang.pbcor.fits | sextractor with 5 pixels > 2 sigma
data_oct23_2 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 2 sigma
data_oct23_3 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 3 sigma
data_nov13 --> redo data_oct23 but with much closer overlapping criteria
data_jan31 --> double check if fluxes are being correctly calculated from sextractor
50 contigous pixels > 2 sigma (50 pixels ~ 1/5 beam size)
data_feb5 --> same as before just now outputs the source fluxes [W/m2] in a seperate file
data_feb7 --> last one
data_feb8 --> nevermind this is the last one since overlapping_regions was set to 2.2 arcsec for the separation
but want to have just 1.1 arcsec (beam size)
data_feb12 --> just kidding, this is the last one. fixed beam sizes in fluxes section
"""
# decide what you want:
global_phot = True # perform global photometry?
regions = True # use sextractor to get dust regions and whatnot?
fluxes = True # calculate flux in dust regions and get slopes?
create_legus_region_files = False # create ds9 region files from legus cluster catalog? (probably not necessary since those files are already on the github repo)
closest_clusters = True # find closest stellar clusters to dust regions?
plot = True # do some plotting?
backup = True # backup files
backup_dir = 'data_feb12'
main_dir = '/uwpa2/turner/legus-alma/'
os.chdir(main_dir + 'science')
# define the band 4 and band 7 fits files to use
b4_fits = 'band4.ilsang.pbcor.fits'
b7_fits = 'band7.ilsang.pbcor.fits'
# define the other fits files needed
b4_pbcoverage = 'band4.ilsang.pb.fits'
b7_pbcoverage = 'band7.ilsang.pb.fits'
# defining some functions
def slope(x,y):
"""simple function to return the slope of a log line connecting two points
"""
return (np.log10(y[1]) - np.log10(y[0])) / (
|
np.log10(x[1])
|
numpy.log10
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the GNU-License license.
"""
"""
import uqra
import numpy as np, os, sys
import scipy.stats as stats
from tqdm import tqdm
import itertools, copy, math, collections
import multiprocessing as mp
import random
# warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
sys.stdout = uqra.utilities.classes.Logger()
def observation_error(y, mu=0, cov=0.03, random_state=100):
e = stats.norm(0, cov * abs(y)).rvs(size=len(y), random_state=random_state)
return e
def isOverfitting(cv_err):
if len(cv_err) < 3 :
return False
if cv_err[-1] > cv_err[-2] and cv_err[-2] > cv_err[0]:
print('WARNING: Overfitting')
return False
def threshold_converge(y, threshold=0.95):
y = np.array(y)
status = True if y[-1]> threshold else False
return status, threshold
def relative_converge(y, err=0.05):
"""
check if y is converge in relative error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs((y[-2]-y[-1])/ y[-1])
res = (error < err, error)
return res
def absolute_converge(y, err=1e-4):
"""
check if y is converge in absolute error
return: (status, error)
status: Boolean for convergeing or not
error: absolute error
"""
y = np.array(y)
if len(y) < 2:
res = (False, np.nan)
else:
error = abs(y[-2]-y[-1])
res = (error < err, error)
return res
def main(model_params, doe_params, solver, r=0, random_state=None):
random.seed(random_state)
ndim_deg_cases = np.array(list(itertools.product([model_params.ndim,], model_params.degs)))
main_res = []
### object contain all training samples
data_train = uqra.Data()
data_train.xi_index = []
data_train.xi = np.empty((model_params.ndim, 0))
data_train.x = np.empty((model_params.ndim, 0))
data_train.y = np.empty((0,))
sparsity = 6
for i, (ndim, deg) in enumerate(ndim_deg_cases):
print('\n==================================================================================')
print(' <<<< Exploration iteration No. {:d}: ndim={:d}, p={:d} >>>>'.format(i+1, ndim, deg))
print('==================================================================================\n')
## ------------------------ UQRA Surrogate model----------------- ###
orth_poly = uqra.poly.orthogonal(ndim, deg, model_params.basis)
pce_model = uqra.PCE(orth_poly)
dist_u = model_params.dist_u
dist_xi = orth_poly.weight
dist_x = solver.distributions
pce_model.info()
## ------------------------ Updating DoE parameters ----------------- ###
idoe_params = copy.deepcopy(doe_params)
idoe_params.ndim = ndim
idoe_params.deg = int(deg)
### Specify candidate data filename template function
### e.g. filename_template= lambda r: r'DoE_Ball5pt6E5R{:d}.npy'.format(r)
### if not specified, default values will be used
idoe_params.update_filenames(filename_template=None)
filename_cand = idoe_params.fname_cand(r)
# filename_design = idoe_params.fname_design(r)
print(' - {:<23s} : {}'.format(' Candidate filename' , filename_cand ))
if filename_cand:
data_cand = np.load(os.path.join(data_dir_cand, filename_cand))
data_cand = data_cand[:ndim,random.sample(range(data_cand.shape[1]), k=idoe_params.num_cand)]
print(' {:<23s} : {}'.format(' shape', data_cand.shape))
else:
data_cand = None
print(' {:<23s} : {}'.format(' shape', data_cand))
idoe_sampling = idoe_params.doe_sampling.lower()
idoe_nickname = idoe_params.doe_nickname()
ioptimality = idoe_params.optimality
print(' - {:<23s} : {}'.format(' UQRA DoE ' , idoe_nickname))
### data object containing results from intermedia steps
data_ideg = uqra.Data()
data_ideg.ndim = ndim
data_ideg.deg = deg
data_ideg.y0_hat_ = []
data_ideg.cv_err_ = []
data_ideg.rmse_y_ = []
data_ideg.model_ = []
data_ideg.score_ = []
data_ideg.yhat_ecdf_= []
data_ideg.xi_train_ = []
data_ideg.x_train_ = []
data_ideg.y_train_ = []
data_ideg.DoI_candidate_ = []
### ------------------------ #1: Obtain exploration optimal samples ----------------- ###
print(' ------------------------------------------------------------')
print(' > Adding exploration samples in global domain... ')
print(' 1. optimal samples based on FULL basis: {:s}'.format(idoe_nickname))
# if deg == model_params.degs[0]:
# n_samples = math.ceil(len(active_index) * model_params.alpha)
# else:
# n_samples = len(active_index)
## obtain exploration optimal samples
n_samples = max(sparsity, math.ceil(0.8*pce_model.num_basis))
xi_train_, idx_optimal = idoe_params.get_samples(data_cand, orth_poly, n_samples, x0=[],
active_index=None, initialization='RRQR', return_index=True)
x_train_ = solver.map_domain(xi_train_, dist_xi)
y_train_ = solver.run(x_train_)
data_ideg.xi_train_.append(xi_train_)
data_ideg.x_train_.append (x_train_)
data_ideg.y_train_.append (y_train_)
n_samples_deg = len(np.concatenate(data_ideg.y_train_, axis=0))
data_train.xi = np.concatenate([data_train.xi, xi_train_], axis=1)
data_train.x = np.concatenate([data_train.x , x_train_ ], axis=1)
data_train.y = np.concatenate([data_train.y , y_train_ ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal)
print(' - {:<32s} : {:d}'.format('Adding exploration optimal samples', n_samples))
print(' - {:<32s} : {:d}'.format('No. optimal samples [p='+str(deg)+']', n_samples_deg))
print(' - {:<32s} : {:.2f}'.format('Local oversampling [p='+str(deg)+']', n_samples_deg/pce_model.num_basis))
print(' - {:<32s} : {:d}'.format('Total number of samples', len(data_train.y)))
print(' 2. Training with {} '.format(model_params.fitting))
weight = doe_params.sampling_weight() ## weight function
pce_model.fit(model_params.fitting, data_train.xi, data_train.y, w=weight,
n_jobs=model_params.n_jobs) #, n_splits=model_params.n_splits
sparsity = len(pce_model.active_index)
print(' - {:<32s} : ({},{}), Alpha: {:.2f}'.format('X train', data_train.x.shape[1], pce_model.num_basis,
data_train.x.shape[1]/pce_model.num_basis))
print(' - {:<32s} : {}'.format('Y train' , data_train.y.shape))
print(' - {:<32s} : {}'.format('Sparsity' , sparsity))
print(' 3. Prediction with {} samples '.format(xi_test.shape))
y_test_hat = pce_model.predict(xi_test, n_jobs=model_params.n_jobs)
data_ideg.model_.append(pce_model)
data_ideg.rmse_y_.append(uqra.metrics.mean_squared_error(y_test, y_test_hat, squared=False))
data_ideg.y0_hat_.append(uqra.metrics.mquantiles(y_test_hat, 1-model_params.pf))
data_ideg.score_.append(pce_model.score)
data_ideg.cv_err_.append(pce_model.cv_error)
data_ideg.yhat_ecdf_.append(uqra.ECDF(y_test_hat, model_params.pf, compress=True))
print(' - {:<32s} : {:.4e}'.format('y0 test [ PCE ]', np.array(data_ideg.y0_hat_[-1])))
print(' - {:<32s} : {:.4e}'.format('y0 test [TRUE ]', y0_test))
i_iteration = 1
while True:
print(' ------------------------------')
print(' < Iteration No. {:d} >'.format(i_iteration))
print(' ------------------------------')
print(' ------------------------------------------------------------')
print(' > Adding exploration optimal samples in global domain ... ')
print(' 1-1. optimal samples based on SIGNIFICANT basis in global domain ... ')
####-------------------------------------------------------------------------------- ####
n_samples = min(5, max(3,sparsity)) #min(sparsity, model_params.alpha *pce_model.num_basis - n_samples_deg, 5)
# n_samples = min(10, sparsity) #len(active_index)
xi_train_, idx_optimal = idoe_params.get_samples(data_cand, orth_poly, n_samples, x0=data_train.xi_index,
active_index=pce_model.active_index, initialization='RRQR', return_index=True)
## obtain exploration optimal samples
x_train_ = solver.map_domain(xi_train_, dist_xi)
y_train_ = solver.run(x_train_)
data_ideg.xi_train_.append(xi_train_)
data_ideg.x_train_.append (x_train_)
data_ideg.y_train_.append (y_train_)
n_samples_deg = len(np.concatenate(data_ideg.y_train_, axis=0))
data_train.xi = np.concatenate([data_train.xi, xi_train_], axis=1)
data_train.x = np.concatenate([data_train.x , x_train_ ], axis=1)
data_train.y = np.concatenate([data_train.y , y_train_ ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal)
print(' - {:<32s} : {:d}'.format('Adding exploration optimal samples', n_samples))
print(' - {:<32s} : {:d}'.format('No. optimal samples [p='+str(deg)+']', n_samples_deg))
print(' - {:<32s} : {:.2f}'.format('Local oversampling [p='+str(deg)+']', n_samples_deg/pce_model.num_basis))
print(' - {:<32s} : {:d}'.format('Total number of samples', len(data_train.y)))
print(' 1-2. optimal samples based on SIGNIFICANT basis in domain of interest... ')
## obtain DoI candidate samples
data_cand_DoI, idx_data_cand_DoI = idoe_params.samples_nearby(data_ideg.y0_hat_[-1], xi_test, y_test_hat, data_cand
, deg, n0=20, epsilon=0.1, return_index=True)
data_cand_xi_DoI = deg**0.5 * data_cand_DoI if idoe_params.doe_sampling in ['CLS4', 'CLS5'] else data_cand_DoI
data_ideg.DoI_candidate_.append(solver.map_domain(data_cand_xi_DoI, dist_xi))
## obtain DoI optimal samples
xi_train_, idx_optimal_DoI = idoe_params.get_samples(data_cand_DoI, orth_poly, n_samples, x0=[],
active_index=pce_model.active_index, initialization='RRQR', return_index=True)
assert xi_train_.shape[1] == n_samples ## make sure return number of samples required
x_train_ = solver.map_domain(xi_train_, dist_xi)
y_train_ = np.array(solver.run(x_train_), ndmin=1)
data_ideg.xi_train_.append(xi_train_)
data_ideg.x_train_.append (x_train_)
data_ideg.y_train_.append (y_train_)
n_samples_deg = len(np.concatenate(data_ideg.y_train_, axis=0))
## put all training samples together, up to current step
data_train.xi = np.concatenate([data_train.xi, xi_train_], axis=1)
data_train.x = np.concatenate([data_train.x , x_train_ ], axis=1)
data_train.y = np.concatenate([data_train.y , y_train_ ], axis=0)
data_train.xi_index = uqra.list_union(data_train.xi_index, idx_optimal_DoI)
print(' - {:<32s} : {} '.format('DoI candidate samples', data_cand_DoI.shape ))
print(' - {:<32s} : {:d}'.format('Adding DoI optimal samples', n_samples))
print(' - {:<32s} : {:d}'.format('No. samples [p='+str(deg)+']', n_samples_deg))
print(' - {:<32s} : {:.2f}'.format('Local oversampling [p='+str(deg)+']', n_samples_deg/pce_model.num_basis))
print(' - {:<32s} : {:d}'.format('Total number of samples', len(data_train.y)))
print(' 2. Training with {} '.format(model_params.fitting))
weight = doe_params.sampling_weight() ## weight function
pce_model.fit(model_params.fitting, data_train.xi, data_train.y, w=weight,
n_jobs=model_params.n_jobs) #, n_splits=model_params.n_splits
sparsity = len(pce_model.active_index)
print(' - {:<32s} : ({},{}), Alpha: {:.2f}'.format('X train', data_train.x.shape[1], pce_model.num_basis,
data_train.x.shape[1]/pce_model.num_basis))
print(' - {:<32s} : {}'.format('Y train' , data_train.y.shape))
print(' - {:<32s} : {}'.format('Sparsity' , len(pce_model.active_index)))
print(' 3. Prediction with {} samples '.format(xi_test.shape))
y_test_hat = pce_model.predict(xi_test, n_jobs=model_params.n_jobs)
data_ideg.model_.append(pce_model)
data_ideg.rmse_y_.append(uqra.metrics.mean_squared_error(y_test, y_test_hat, squared=False))
data_ideg.y0_hat_.append(uqra.metrics.mquantiles(y_test_hat, 1-model_params.pf))
data_ideg.score_.append(pce_model.score)
data_ideg.cv_err_.append(pce_model.cv_error)
data_ideg.yhat_ecdf_.append(uqra.ECDF(y_test_hat, model_params.pf, compress=True))
# isOverfitting(data_ideg.cv_err) ## check Overfitting
print(' - {:<32s} : {:.4e}'.format('y0 test [ PCE ]', np.array(data_ideg.y0_hat_[-1])))
print(' - {:<32s} : {:.4e}'.format('y0 test [TRUE ]', y0_test))
isConverge, error_converge = relative_converge(data_ideg.y0_hat_, err=model_params.rel_err)
# isConverge, error_converge = absolute_converge(data_ideg.y0_hat, err=model_params.abs_err)
print(' 4. Converge check ...')
print(' - Value : {} [Ref: {:e}]'.format(np.array(data_ideg.y0_hat_), y0_test))
print(' - Error : {} % [{}]'.format(np.around(error_converge, 4)*100,isConverge))
print(' ------------------------------------------------------------')
i_iteration +=1
if np.all(isConverge):
print(' !< Model converge for order {:d} >!'.format(deg))
break
if n_samples_deg > model_params.alpha*orth_poly.num_basis:
# if len(data_train.y)> model_params.alpha*orth_poly.num_basis:
print(' !< Number of samples exceeding {:.2f}P >!'.format(model_params.alpha))
break
data_ideg.y0_hat = data_ideg.y0_hat_[-1]
data_ideg.cv_err = data_ideg.cv_err_[-1]
# data_ideg.kappa = data_ideg.kappa_[-1]
data_ideg.model = data_ideg.model_[-1]
data_ideg.score = data_ideg.score_[-1]
data_ideg.yhat_ecdf = data_ideg.yhat_ecdf_[-1]
data_ideg.rmse_y = data_ideg.rmse_y_[-1]
if len(data_ideg.DoI_candidate_) == 0:
data_ideg.DoI_candidate = []
else:
data_ideg.DoI_candidate = np.concatenate(data_ideg.DoI_candidate_, axis=1)
data_ideg.xi_train =
|
np.concatenate(data_ideg.xi_train_, axis=1)
|
numpy.concatenate
|
import pandas as pd
import rsw
import matplotlib.pyplot as plt
import numpy as np
def get_dummies(df, column, prefix=None, columns=None):
dummies = pd.get_dummies(df[column], prefix=prefix, columns=columns)
dummies[df[column].isna()] = np.nan
return dummies
def get_brfss():
raw_df = pd.read_sas("data/LLCP2018.XPT ", format='xport')
states = pd.read_csv("data/us_states.csv")
states = states.set_index("post_code")
columns = ["_STATE", "_AGE_G", "SEX1",
"EDUCA", "INCOME2", "HTM4", "GENHLTH"]
df = raw_df[columns]
df._STATE.replace(dict(zip(states.fips, states.index)), inplace=True)
df._STATE.replace({
66: np.nan
}, inplace=True)
df._AGE_G.replace({
1: "18_24",
2: "25_34",
3: "35_44",
4: "45_54",
5: "55_65",
6: "65+"
}, inplace=True)
df.SEX1.replace({
1: "M",
2: "F",
7: np.nan,
9: np.nan
}, inplace=True)
df.EDUCA.replace({
1: np.nan,
2: "Elementary",
3: np.nan,
4: "High school",
5: "Some college",
6: "College",
9: np.nan
}, inplace=True)
df.INCOME2.replace({
1: "<10k",
2: "<15k",
3: "<20k",
4: "<25k",
5: "<35k",
6: "<50k",
7: "<75k",
8: ">75k",
77: np.nan,
99: np.nan
}, inplace=True)
df.GENHLTH.replace({
1: "Excellent",
2: "Very good",
3: "Good",
4: "Fair",
5: "Poor",
7: np.nan,
9: np.nan
}, inplace=True)
df.loc[:, "State Age"] = df._STATE + " " + df._AGE_G
df.loc[:, "Education Income"] = df.EDUCA + " " + df.INCOME2
df_processed = pd.concat([
get_dummies(df, "State Age", "state_age"),
get_dummies(df, "SEX1", "sex"),
get_dummies(df, "Education Income", "education_income"),
get_dummies(df, "GENHLTH", "health")
], axis=1)
return raw_df, df, df_processed
# get data
print("Getting data.")
raw_df, df, df_processed = get_brfss()
# compute sample averages
print("Computing sample averages.")
fdes = df_processed.mean()
state_age_mean = fdes[filter(lambda x: "state_age" in x, df_processed.columns)]
sex_mean = fdes[filter(lambda x: "sex" in x, df_processed.columns)]
education_income_mean = fdes[
filter(lambda x: "education_income" in x, df_processed.columns)]
health_mean = fdes[filter(lambda x: "health" in x, df_processed.columns)]
# construct skewed sample
print("Constructing skewed sample.")
df_for_sampling = df_processed.fillna(df_processed.mean())
np.random.seed(0)
n = 10000
while True:
c = np.random.randn(df_processed.shape[1]) / 2
pi = np.exp(df_for_sampling@c) / np.sum(np.exp(df_for_sampling@c))
idx = np.random.choice(df_for_sampling.shape[
0], p=pi, size=n, replace=False)
df_small = df_processed.loc[idx]
if (df_small.mean() > 0).all():
break
# Maximum entropy weighting
print("\n\n Maximum entropy weighting")
losses = [
rsw.EqualityLoss(np.array(state_age_mean).flatten()),
rsw.EqualityLoss(np.array(sex_mean).flatten()),
rsw.EqualityLoss(np.array(education_income_mean).flatten()),
rsw.EqualityLoss(np.array(health_mean).flatten())
]
regularizer = rsw.EntropyRegularizer(limit=100)
w, out, sol = rsw.rsw(df_small, None, losses, regularizer,
1, verbose=True, rho=75, eps_abs=1e-6, eps_rel=1e-6)
w = np.clip(w, 1 / (100 * n), 100 / n)
w /= np.sum(w)
w_maxent = w
n = w.size
t = df[:n]
t["weight"] = w
print(t.loc[t.weight.argmax()])
# print(t[t["State Age"] == "NJ 45_54"])
print(-np.sum(w * np.log(w)), -np.sum(np.ones(n) / n * np.log(np.ones(n) / n)))
print(w.min(), w.max())
plt.hist(w, bins=100, color='black')
plt.xlabel("$w_i$")
plt.ylabel("count")
plt.savefig("figs/hist_w.pdf")
plt.close()
x = np.array(raw_df['HTIN4'].iloc[idx])
x = x[~np.isnan(x)]
hist_unweighted, vals = np.histogram(x, bins=1000)
cdf_unweighted = np.cumsum(hist_unweighted) / np.sum(hist_unweighted)
plt.plot(vals[1:], cdf_unweighted, label='unweighted', c='grey', linewidth=1)
x = np.array(raw_df['HTIN4'].iloc[idx])
w_adjusted = w * 1 / (1 - w[np.isnan(x)].sum())
w_adjusted[np.isnan(x)] = 0
hist_weighted, _ = np.histogram(x, bins=vals, weights=w_adjusted)
cdf_weighted = np.cumsum(hist_weighted) / np.sum(hist_weighted)
plt.plot(vals[1:], cdf_weighted, label='weighted', c='black', linewidth=1)
x = np.array(raw_df['HTIN4'])
x = x[~np.isnan(x)]
hist_true, _ = np.histogram(x, bins=vals)
cdf_true = np.cumsum(hist_true) / np.sum(hist_true)
plt.plot(vals[1:], cdf_true, '--', label='true', c='black', linewidth=1)
plt.xlim(55, 80)
plt.xlabel("Height (inches)")
plt.ylabel("CDF")
plt.legend()
plt.savefig("figs/height.pdf")
plt.close()
print("%3.3f, %3.3f" % (np.abs(cdf_weighted - cdf_true).max(),
np.abs(cdf_unweighted - cdf_true).max()))
x = np.array(raw_df['WTKG3'].iloc[idx]) / 100
x = x[~np.isnan(x)]
hist_unweighted, vals = np.histogram(x, bins=1000)
cdf_unweighted = np.cumsum(hist_unweighted) / np.sum(hist_unweighted)
plt.plot(vals[1:], cdf_unweighted, label='unweighted', c='grey', linewidth=1)
x = np.array(raw_df['WTKG3'].iloc[idx]) / 100
w_adjusted = w * 1 / (1 - w[
|
np.isnan(x)
|
numpy.isnan
|
import os
import math
import time
import pprint
from tqdm import tqdm
import dill as pickle
import numpy as np
from numpy import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_pretrained_bert.tokenization import BertTokenizer
from utils import constant, tile, text_input2bert_input, top_k_top_p_filtering
from models import RNNEncoder, RNNDecoder
class RLSeq(nn.Module):
def __init__(self, encoder, decoder, vocab):
super(RLSeq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.use_attn = True if constant.attn != "none" else False
self.use_beam = constant.beam
self.use_cuda = constant.USE_CUDA
self.embeddings_cpu = constant.embeddings_cpu
self.V = self.encoder.V
self.H = self.encoder.H
self.L = self.encoder.L
self.D = self.encoder.D
self.vocab = vocab
if constant.bi == 'bi':
self.reduce_state = nn.Linear(self.H * 2, self.H)
def init_multitask(self):
self.clf = nn.Linear(self.H, 1)
# For Reward shaping
def init_aux_reward(self, reward_model):
self.aux_reward = reward_model
# For REINFORCE
def init_baseline_reward(self):
self.baseline_reward = nn.Linear(self.H, 1)
# For REINFORCE
def init_reward(self, reward_model):
self.reward_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
self.reward = reward_model
# For REINFORCE
def init_user_model(self, user_model):
self.user_model = user_model
# For Curiosity Driven
def init_intrinsic_curosity_module(self):
# receive h_t and transform to \phi_t
self.curiosity_encoder = nn.Sequential(
nn.Linear(self.H, self.H),
nn.SELU(),
nn.Dropout(constant.dropout),
nn.Linear(self.H, constant.CD),
nn.SELU()
)
# receive h_t and h_{t+1} to predict a_t
self.curiosity_inverse = nn.Linear(constant.CD * 2, self.V)
# receive h_t and embedding(a_t) to predict h_{t+1}
self.curiosity_forward = nn.Sequential(
nn.Linear(constant.CD + self.D, self.H),
nn.SELU(),
nn.Dropout(constant.dropout),
nn.Linear(self.H, constant.CD),
nn.SELU(),
)
def encode(self, seqs, lens):
src_h, src_h_0 = self.encoder(seqs, lens)
# Init decoder hidden with encoder final hidden w/o attn
dec_h_t = src_h_0.unsqueeze(0)
if constant.bi == 'bi':
src_h = self.reduce_state(src_h)
dec_h_t = self.reduce_state(dec_h_t)
return src_h, dec_h_t
def decode(self, t, x_t, dec_h_t, src_h, targets, tau=1.0, sample=False, use_mle=False, min_dec_len=1):
# y_t: B x V, dec_h_t: 1 x B x H
y_t, dec_h_t = self.decoder(x_t, dec_h_t, src_h, self.use_attn)
if use_mle:
x_t = targets[:,t] # Next input is current target
elif sample: # torch.multinomial sample vector B with input_dist y_t: B x V
y_t[:, constant.unk_idx] = -float('Inf')
# prevent empty string by setting min decoding length
if t < min_dec_len:
y_t[:, constant.eou_idx] = -float('Inf')
if constant.topk:
filtered = top_k_top_p_filtering(y_t.data / tau, top_k=constant.topk_size)
x_t = torch.multinomial(F.softmax(filtered, dim=1), 1).long().squeeze() # x_t: B
else:
x_t = torch.multinomial(F.softmax(y_t.data / tau, dim=1), 1).long().squeeze() # x_t: B
else:
y_t[:, constant.unk_idx] = -float('Inf')
# prevent empty string by setting min decoding length
if t < min_dec_len:
y_t[:, constant.eou_idx] = -float('Inf')
if constant.topk:
filtered = top_k_top_p_filtering(y_t.data / tau, top_k=constant.topk_size)
x_t = torch.multinomial(F.softmax(filtered, dim=1), 1).long().squeeze() # x_t: B
else:
_, topi = y_t.data.topk(1)
if self.use_cuda:
x_t = torch.cuda.LongTensor(topi.view(-1)) # Chosen word is next input
else:
x_t = torch.LongTensor(topi.view(-1)) # Chosen word is next input
# if self.use_cuda:
# x_t = x_t.cuda()
return y_t, dec_h_t, x_t
def forward(self, seqs, lens, targets, sentiments=None, test=False, use_mle=False):
B, T = targets.shape
src_h, dec_h_t = self.encode(seqs, lens)
# clf
if not test and constant.use_sentiment:
clf_logits = self.clf(dec_h_t).squeeze()
if dec_h_t.shape[0] == 1 and len(dec_h_t.shape) < 3:
dec_h_t = dec_h_t.unsqueeze(0)
x_t = torch.LongTensor([constant.sou_idx] * B) # Trigger Generation with SOS Token
xs = torch.zeros(T, B).long()
probs = torch.zeros(T, B, self.V)
if not use_mle:
# xs = torch.zeros(T, B).long() # decoded wrods
rs = torch.zeros(T, B).float() # baseline rewards
if constant.use_curiosity:
# curiosity_forward_criterion = nn.MSELoss()
curiosity_inverse_criterion = nn.CrossEntropyLoss(ignore_index=constant.eou_idx)
curiosity_features = torch.zeros(T, B, constant.CD).float()
curiosity_forwards = torch.zeros(T, B, constant.CD).float()
curiosity_actions = torch.zeros(T, B, self.V).float()
R_c = torch.zeros(T, B).float()
step_mask = torch.ones(B).float()
step_masks = []
step_losses = []
if self.use_cuda:
x_t = x_t.cuda()
xs = xs.cuda()
probs = probs.cuda()
if not use_mle:
rs = rs.cuda()
step_mask = step_mask.cuda()
if constant.use_curiosity:
curiosity_features = curiosity_features.cuda()
curiosity_forwards = curiosity_forwards.cuda()
curiosity_actions = curiosity_actions.cuda()
R_c = R_c.cuda()
dec_h_0, x_0 = dec_h_t, x_t
prev_curiosity_feature = None
# Run through decoder one time step at a time
for t in range(T):
y_t, dec_h_t, x_t = self.decode(t, x_t, dec_h_t, src_h, targets, tau=constant.tau, use_mle=False if test else use_mle, sample=False if test else (not use_mle))
probs[t] = y_t
xs[t] = x_t
if not use_mle:
# save the decoded sentence
gold_probs = torch.gather(y_t, 1, x_t.unsqueeze(1)).squeeze()
step_loss = -torch.log(gold_probs)
step_loss = step_loss * step_mask
step_losses.append(step_loss)
step_masks.append(step_mask)
step_mask = torch.clamp(step_mask - (x_t == constant.eou_idx).float(), min=0.0)
# calculate baseline rewards for each timestep (only update regression model - detach dec_h_t)
if not test and constant.use_baseline:
rs[t] = self.baseline_reward(dec_h_t.squeeze().detach()).squeeze() * step_mask
# curiosity features and forward model
if not test and constant.use_curiosity:
curiosity_feature = self.curiosity_encoder(dec_h_t.squeeze().detach()) * step_mask.unsqueeze(1)
curiosity_features[t] = curiosity_feature
curiosity_forward = self.curiosity_forward(torch.cat([curiosity_feature, self.decoder.embedding(xs[t]).detach()], dim=-1)) * step_mask.unsqueeze(1)
curiosity_forwards[t] = curiosity_forward
# curiosity inverse model <- from a_1 to a_t-1
if t > 0:
curiosity_actions[t-1] = self.curiosity_inverse(torch.cat((prev_curiosity_feature, curiosity_feature), dim=-1)) * step_mask.unsqueeze(1)
R_c[t-1] = 0.5 * torch.pow((curiosity_feature - curiosity_forward).norm(p=2, dim=-1), 2) * step_mask
prev_curiosity_feature = curiosity_feature
else:
prev_curiosity_feature = curiosity_feature
# curiosity reward is MSE loss of || \phi_t || given h_{t-1} <- from a_1 to a_t-1
if not use_mle and not test and constant.use_curiosity:
# R_c = curiosity_forward_criterion(R_c, curiosity_forwards[:, 1:]) # don't predict first state
# L_i = curiosity_inverse_criterion(curiosity_actions.transpose(0, 1).contiguous().view(B*(T-1), -1), xs.transpose(0, 1).contiguous()[:, :-1].contiguous().view(B*(T-1)) ) # don't predict last action
# curiosity_actions[T-1] = self.curiosity_inverse(torch.cat((curiosity_features[-1], torch.zeros(B, 128)), dim=-1)) * step_mask.unsqueeze(1)
last_action = torch.zeros(B, self.V).float().cuda() if constant.USE_CUDA else torch.zeros(B, self.V).float()
# eou_idx = torch.LongTensor([constant.eou_idx] * B)
# if constant.USE_CUDA:
# last_action = last_action.cuda()
# eou_idx = eou_idx.cuda()
# last_action.scatter_(1, eou_idx.unsqueeze(1), float('Inf'))
# last_action *= step_mask.unsqueeze(1)
curiosity_actions[T-1] = last_action
L_i = curiosity_inverse_criterion(curiosity_actions.transpose(0, 1).contiguous().view(B*T, -1), xs.transpose(0, 1).contiguous().view(B*T) ) # don't predict last action
# generate from the decoded sentence
xs = xs.cpu().data.numpy().T # B, T
# iter(callable, sentinel) break loop when sentinel is hit
sentences = [" ".join([self.vocab.index2word[x_t] for x_t in iter(lambda x=iter(gens): next(x), constant.eou_idx)]) for gens in xs]
if use_mle:
return probs, sentences
else:
if constant.use_bert:
if constant.use_user:
contexts = [" ".join([self.vocab.index2word[x_t] for x_t in iter(lambda x=iter(seq): next(x), constant.pad_idx)]) for seq in seqs.cpu().data.numpy()]
sents = [context + ' ' + sent for context, sent in zip(contexts, sentences)]
sents = [self.vocab.transform_one(sent) for sent in sents]
lens = [len(sentence) for sentence in sents]
sort = np.argsort(lens)[::-1].tolist()
unsort = np.argsort(sort).tolist()
sents = np.array(sents, dtype='object')[sort].tolist()
lens = np.array(lens)[sort]#.tolist()
# Pad dialogs and targets to their respective max batch lens
B = len(sentences)
L = lens[0]
padded_sentences = torch.ones((B, L)) * constant.pad_idx
for b in range(B):
padded_sentences[b, :lens[b]] = torch.from_numpy(np.array(sents[b]))
padded_sentences = padded_sentences.long()
if constant.USE_CUDA:
padded_sentences = padded_sentences.cuda()
user_sents = np.array(self.user_model.predict_batch(padded_sentences, lens, np.zeros((B, L))))[unsort].tolist()
R = self.get_reward(user_sents)
else:
R = self.get_reward(sentences)
else:
sents = sentences
if constant.use_context:
contexts = [" ".join([self.vocab.index2word[x_t] for x_t in iter(lambda x=iter(seq): next(x), constant.pad_idx)]) for seq in seqs.cpu().data.numpy()]
sents = [context + ' ' + sent for context, sent in zip(contexts, sents)]
sents = [self.vocab.transform_one(sent) for sent in sents]
lens = [len(sentence) for sentence in sents]
sort = np.argsort(lens)[::-1].tolist()
unsort = np.argsort(sort).tolist()
sents = np.array(sents, dtype='object')[sort].tolist()
lens = np.array(lens)[sort]#.tolist()
# Pad dialogs and targets to their respective max batch lens
B = len(sentences)
L = lens[0]
padded_sentences = torch.ones((B, L)) * constant.pad_idx
for b in range(B):
padded_sentences[b, :lens[b]] = torch.from_numpy(
|
np.array(sents[b])
|
numpy.array
|
import numbers
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import matplotlib.pyplot as plt
llf_factor = 10**6 # factor for avoiding a too flat likelihood function
def exp_intensity(beta, sigma, gamma, n, history, sum_less_equal=True):
"""
Calculate the (exponential) intensity of a HawkesN process.
Parameters
----------
beta : float
Parameter beta of the corresponding SEIR model.
sigma : float or None
Parameter sigma of the corresponding SEIR model. If None,
`sigma`==`gamma` is assumed.
gamma : float
Parameter gamma of the corresponding SEIR model.
n : float or int
The size of the population.
history : np.array
Array containing the process' jump times.
sum_less_equal : bool, default: True
If True, we sum over all event times <= time t. Otherwise, we sum
over all event times < time t.
Returns
-------
exp_intensity_ : function
A function of the time (expecting a float or np.array as argument).
"""
def exp_intensity_(t):
"""
Parameters
----------
t : float or np.array
If `t` is a float, then it represents a point in time. If it is
a 1-dimensional array, then it represents an array of points in
time.
Returns
-------
result : float or np.array
If `t` is a float, then the intensity of the HawkesN process at
time `t` is returned. If `t` is a 1-dimensional array, then an
array is returned. Each entry of it represents the intensity of
the HawkesN process at the time that was specified by the
corresponding entry in `t`.
"""
nonlocal sigma
if sigma is None:
sigma = gamma
#if np.isnan([beta, sigma, gamma, n]).any():
# raise RuntimeError("One of the arguments to exp_intensity is nan: "
# "beta" + str(beta) + ", sigma" + str(sigma) +
# ", gamma" + str(gamma) + ", n" + str(n) +
# ", history" + str(history))
if isinstance(t, numbers.Number):
t = np.array([t])
result = np.empty(t.shape)
for index, time in enumerate(t):
if sum_less_equal:
history_until_t = history[history <= time]
else:
history_until_t = history[history < time]
if sigma != gamma:
result[index] = np.sum(
np.exp(-sigma * (time - history_until_t))
-
np.exp(-gamma * (time - history_until_t))
)
else:
result[index] = np.sum(
(time - history_until_t) *
np.exp(-gamma * (time - history_until_t))
)
result[index] *= (1 - np.count_nonzero(history <= time)/n)
# print("intensity at index", index, "is", result[index]*scale*decay)
if sigma != gamma:
result *= beta * sigma / (gamma - sigma)
else:
result *= beta * gamma
return result
return exp_intensity_
def plot_exp_intensity(t_max, intensity=None, beta=None, sigma=None,
gamma=None, n=None, history=None, width=5.51, height=4,
n_xticks=6, step=.01, fname=None, **kwargs):
"""
Plot (or save the plot of) the exponential intensity function from t=0
until t=t_max.
Parameters
----------
t_max : float
Define the time horizon of the plot. The time axis will contain
values from 0 to t_max.
intensity : function or None
If None, then the arguments scale, decay, and n must be provided in
order to compute an intensity function. If exp_intensity is already
a function (taking the time as only argument) then scale, decay, n,
and history are ignored.
beta : float
See corresponding argument in :func:`exp_intensity`. Ignored if
exp_intensity is provided as argument.
sigma : float
See corresponding argument in :func:`exp_intensity`. Ignored if
exp_intensity is provided as argument.
gamma : float
See corresponding argument in :func:`exp_intensity`. Ignored if
exp_intensity is provided as argument.
n : int or None
Population size. Ignored if exp_intensity is provided as argument.
history : np.array or None
One dimensional array containing the event times. The event times
must be sorted in ascending order. Ignored if exp_intensity is
provided as argument.
width : float, default: 5.51
Width of the plot.
height : float, default: 4
Height of the plot.
n_xticks : int (must be non-negative)
Number of ticks on the time axis.
step : float
Step size for drawing the function graph.
fname : str or None
Name (without extension) of the file the plot is saved to. If
`None`, the plot is not saved.
"""
if intensity is None:
intensity = exp_intensity(beta, sigma, gamma, n, history, **kwargs)
t = np.arange(0, t_max, step)
plt.figure(dpi=300, figsize=(width, height))
plt.plot(t, intensity(t))
plt.xlabel("$t$")
plt.xlim(0, t_max)
plt.xticks(np.linspace(0, t_max, n_xticks))
plt.ylabel("Intensity")
plt.grid()
title = "Intensity of a SEIR-related HawkesN process"
if history is not None and beta is not None and sigma is not None \
and sigma is not None and n is not None:
title += " with\nevent history \{" \
+ ",".join(str(i) for i in history[:4]) \
+ (", ..." if len(history) > 4 else "") \
+ "\} and parameters: $\\beta=" + str(beta) \
+ "$, $\\sigma=" + str(sigma) + "$, $\\gamma=" + str(gamma) + \
"$, $N$=" + str(n)
title += "."
plt.title(title)
if fname is not None:
plt.savefig(fname + ".pdf")
def llf_sigma_neq_gamma(beta, sigma, gamma, n, history, sum_less_equal=True):
"""
Parameters
----------
beta : float
See corresponding argument in :func:`exp_intensity`.
sigma : float
See corresponding argument in :func:`exp_intensity`.
gamma : float
See corresponding argument in :func:`exp_intensity`.
n : float
See corresponding argument in :func:`exp_intensity`.
history : np.array
See corresponding argument in :func:`exp_intensity`.
sum_less_equal : bool, default: True
See corresponding argument in :func:`exp_intensity`.
Returns
-------
llf : numpy.float64
The log-likelihood for the parameters passed as arguments.
"""
intensity = exp_intensity(beta, sigma, gamma, n, history,
sum_less_equal=sum_less_equal)
sum_log_arg = intensity(history[history > 0])
if sum_log_arg[0] <=0:
sum_log_arg = sum_log_arg[1:]
sum_part = np.sum(np.log(sum_log_arg))
int_part = 0
for i in range(len(history) - 1):
int_part += (n - (i + 1)) / n * np.sum(
(
np.exp(-sigma * (history[i] - history[:i + 1]))
-
np.exp(-sigma * (history[i + 1] - history[:i + 1]))
) / sigma
-
(
np.exp(-gamma * (history[i] - history[:i + 1]))
-
np.exp(-gamma * (history[i + 1] - history[:i + 1]))
) / gamma
)
int_part *= (beta * sigma / (gamma-sigma))
# print("sum:", sum_part)
# print("integral:", int_part)
# print("*** llf:", sum_part - int_part)
return sum_part - int_part
def llf_sigma_eq_gamma(beta, gamma, n, history, sum_less_equal=True):
"""
Parameters
----------
beta : float
See corresponding argument in :func:`exp_intensity`.
gamma : float
See corresponding argument in :func:`exp_intensity`.
n : float
See corresponding argument in :func:`exp_intensity`.
history : np.array
See corresponding argument in :func:`exp_intensity`.
sum_less_equal : bool, default: True
See corresponding argument in :func:`exp_intensity`.
Returns
-------
llf : numpy.float64
The log-likelihood for the parameters passed as arguments.
"""
intensity = exp_intensity(beta, gamma, gamma, n, history,
sum_less_equal=sum_less_equal)
sum_log_arg = intensity(history[history > 0])
if sum_log_arg[0] <=0:
sum_log_arg = sum_log_arg[1:]
sum_part = np.sum(sum_log_arg)
int_part = 0
for i in range(len(history) - 1):
int_part += (n - (i + 1)) / n * np.sum(
np.exp(-gamma * (history[i] - history[:i + 1]))
* (gamma * (history[i] - history[:i + 1]) + 1)
-
np.exp(-gamma * (history[i + 1] - history[:i + 1]))
* (gamma * (history[i + 1] - history[:i + 1]) + 1)
) / gamma
int_part *= beta
# print("sum:", sum_part)
# print("integral:", int_part)
# print("*** llf:", sum_part - int_part)
return sum_part - int_part
def llf(beta, sigma, gamma, n, history, sum_less_equal=True):
if sigma != gamma and sigma is not None:
return llf_sigma_neq_gamma(beta, sigma, gamma, n, history,
sum_less_equal)
else:
return llf_sigma_eq_gamma(beta, gamma, n, history, sum_less_equal)
def fit_sigma_neq_gamma(history, beta_start=None, sigma_start=None,
gamma_start=None, n_start=None, estimate_n_only=False):
"""
Parameters
----------
history : np.array
1-dimensional array containing the event times in ascending order.
beta_start : float
Starting value for the likelihood optimization.
sigma_start : float
Starting value for the likelihood optimization.
gamma_start : float
Starting value for the likelihood optimization.
n_start : float or None, default: None
Starting value for the likelihood optimization. If None, a value is
chosen based on the number of events contained in the `history`.
estimate_n_only : bool, default: False
If True, `beta`, `sigma` and `gamma` are considered to be fixed and
only :math:`N` is fitted. Otherwise, `beta`, `sigma` and `gamma` are
fitted together with :math:`N`.
References
----------
This method uses the L-BFGS algorithm (see [1]_).
.. [1] <NAME>, <NAME> and <NAME>. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
"""
if estimate_n_only and \
(beta_start is None or sigma_start is None or gamma_start is None):
raise Exception("If beta, sigma, and gamma are fixed, their values "
"must be provided!")
if n_start is None:
n_start = len(history) + .5
def negative_llf(beta_sigma_gamma_n):
"""
Parameters
----------
beta_sigma_gamma_n : np.array (shape (4))
Values for the parameters beta, sigma, gamma, and N in a single
array.
Returns
-------
neg_llf : float
The negative log-likelihood.
"""
beta, sigma, gamma, n = tuple(beta_sigma_gamma_n)
if sigma == gamma:
sigma += 1e-7
return -llf_factor * llf(beta=beta, sigma=sigma, gamma=gamma, n=n,
history=history, sum_less_equal=False)
def negative_llf_separate_params(n, beta, sigma, gamma):
"""
Same as :func:`negative_llf` but taking the parameters `n`, `beta`,
`sigma`, and `gamma` as separate arguments. This makes the function
suitable for likelihood maximization in only one parameter (`n`) with
fixed values for `beta`, `sigma`, and `gamma`.
"""
if sigma == gamma:
sigma += 1e-7
return -llf_factor * llf(beta=beta, sigma=sigma, gamma=gamma, n=n,
history=history, sum_less_equal=False)
def negative_llf_gradient(beta_sigma_gamma_n):
beta, sigma, gamma, n = tuple(beta_sigma_gamma_n)
if sigma == gamma:
sigma += 1e-7
return -llf_factor * llf_gradient(beta=beta, sigma=sigma, gamma=gamma,
n=n, history=history,
sum_less_equal=False)
def negative_llf_gradient_separate_params(n, beta, sigma, gamma):
if sigma == gamma:
sigma += 1e-7
return -llf_factor * dllf_dn_sigma_neq_gamma(beta=beta, sigma=sigma,
gamma=gamma, n=n,
history=history,
sum_less_equal=False)
eps = np.finfo(float).eps
if estimate_n_only:
return fmin_l_bfgs_b(
func=negative_llf_separate_params, # minimize this
x0=np.array([n_start]), # initial guess
args=(beta_start, sigma_start, gamma_start), # additional args to func&fprime
fprime=negative_llf_gradient_separate_params,
bounds=[(len(history), None)],
iprint=1
)
else:
return fmin_l_bfgs_b(
func=negative_llf, # minimize this
x0=np.array([beta_start,
sigma_start,
gamma_start,
n_start]), # initial guess
fprime=negative_llf_gradient,
bounds=[(eps, None),
(eps, None),
(eps, None),
(len(history), None)],
factr=10,
iprint=1
)
def fit_sigma_eq_gamma(history, beta_start=None, gamma_start=None,
n_start=None, estimate_n_only=False):
"""
Parameters
----------
history : np.array
1-dimensional array containing the event times in ascending order.
beta_start : float
Starting value for the likelihood optimization.
gamma_start : float
Starting value for the likelihood optimization.
n_start : float or None, default: None
Starting value for the likelihood optimization. If None, a value is
chosen based on the number of events contained in the `history`.
estimate_n_only : bool, default: False
If True, `beta` and `gamma` are considered to be fixed and only
:math:`N` is fitted. Otherwise, `beta` and `gamma` are fitted together
with :math:`N`.
References
----------
This method uses the L-BFGS algorithm (see [1]_).
.. [1] <NAME>, <NAME> and <NAME>. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
"""
if estimate_n_only and (beta_start is None or gamma_start is None):
raise Exception("If beta and gamma are fixed, their values "
"must be provided!")
if n_start is None:
n_start = len(history) + .5
def negative_llf(beta_gamma_n):
"""
Parameters
----------
beta_gamma_n : np.array (shape (4))
Values for the parameters beta, gamma, and N in a single array.
Returns
-------
neg_llf : float
The negative log-likelihood.
"""
beta, gamma, n = tuple(beta_gamma_n)
return -llf(beta=beta, sigma=None, gamma=gamma, n=n, history=history,
sum_less_equal=False)
def negative_llf_separate_params(n, beta, gamma):
"""
Same as :func:`negative_llf` but taking the parameters `n`, `beta`,
and `gamma` as separate arguments. This makes the function suitable for
likelihood maximization in only one parameter (`n`) with fixed values
for `beta` and `gamma`.
"""
return -llf(beta=beta, sigma=None, gamma=gamma, n=n, history=history,
sum_less_equal=False)
def negative_llf_gradient(beta_gamma_n):
beta, gamma, n = tuple(beta_gamma_n)
return -llf_gradient(beta=beta, sigma=None, gamma=gamma, n=n,
history=history, sum_less_equal=False)
def negative_llf_gradient_separate_params(n, beta, gamma):
return -dllf_dn_sigma_eq_gamma(beta=beta, gamma=gamma, n=n,
history=history, sum_less_equal=False)
eps = np.finfo(float).eps
if estimate_n_only:
return fmin_l_bfgs_b(
func=negative_llf_separate_params, # minimize this
x0=np.array([n_start]), # initial guess
args=(beta_start, gamma_start), # additional args to func&fprime
fprime=negative_llf_gradient_separate_params,
bounds=[(len(history), None)],
iprint=1
)
else:
return fmin_l_bfgs_b(
func=negative_llf, # minimize this
x0=np.array([beta_start, gamma_start, n_start]), # initial guess
fprime=negative_llf_gradient,
bounds=[(eps, None), (eps, None), (len(history), None)],
iprint=1
)
def dllf_dbeta_sigma_neq_gamma(beta, sigma, gamma, n, history,
sum_less_equal=True):
"""
Parameters
----------
beta : float
See corresponding argument in :func:`exp_intensity`.
sigma : float
See corresponding argument in :func:`exp_intensity`.
gamma : float
See corresponding argument in :func:`exp_intensity`.
n : float
See corresponding argument in :func:`exp_intensity`.
history : np.array
See corresponding argument in :func:`exp_intensity`.
sum_less_equal : bool, default: True
See corresponding argument in :func:`exp_intensity`.
Note that this argument does not affect the derivative w.r.t. the beta
parameter. Thus, the return value does not depend on it.
Returns
-------
derivative_wrt_beta : float
The derivative (w.r.t. the beta parameter) of the log-likelihood
function given the `history` and evaluated at the parameters
`beta`, `sigma`, `gammma`, and `n`.
"""
sum_part = np.count_nonzero(history) / beta
int_part = 0
for l in range(len(history) - 1):
int_part += (n - (l + 1)) / n * (
np.sum(
np.exp(-sigma * (history[l] - history[:l + 1]))
-
np.exp(-sigma * (history[l + 1] - history[:l + 1]))
) / sigma
-
np.sum(
|
np.exp(-gamma * (history[l] - history[:l + 1]))
|
numpy.exp
|
import numpy as np
from pymoab import core
from pymoab import types
from pymoab import topo_util
from PyTrilinos import Epetra, AztecOO, EpetraExt # , Amesos
import time
import math
import os
import shutil
import random
import sys
from test34 import MsClassic_mono
class MsClassic_mono_faces(MsClassic_mono):
def __init__(self, ind = False):
super().__init__(ind = ind)
def calculate_local_problem_het_faces(self, elems, lesser_dim_meshsets, support_vals_tag):
lim = 1e-9
all_elems_bound = [self.mb.get_entities_by_handle(ms) for ms in lesser_dim_meshsets]
soma = sum(
[self.mb.tag_get_data(support_vals_tag, elems, flat=True)[0]
if len(all_elems_bound) <= 2
else sum(self.mb.tag_get_data(support_vals_tag, elems, flat=True))
for elems in all_elems_bound]
)
if soma < lim:
self.mb.tag_set_data(support_vals_tag, elems, np.repeat(0.0, len(elems)))
else:
std_map = Epetra.Map(len(elems), 0, self.comm)
linear_vals = np.arange(0, len(elems))
id_map = dict(zip(elems, linear_vals))
boundary_elms = set()
lim = 1e-5
b = Epetra.Vector(std_map)
x = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
for ms in lesser_dim_meshsets:
lesser_dim_elems = self.mb.get_entities_by_handle(ms)
for elem in lesser_dim_elems:
if elem in boundary_elms:
continue
boundary_elms.add(elem)
idx = id_map[elem]
A.InsertGlobalValues(idx, [1], [idx])
b[idx] = self.mb.tag_get_data(support_vals_tag, elem, flat=True)[0]
for elem in (set(elems) ^ boundary_elms):
values, ids = self.mount_lines_3(elem, id_map)
A.InsertGlobalValues(id_map[elem], values, ids)
A.FillComplete()
x = self.solve_linear_problem(A, b, len(elems))
self.mb.tag_set_data(support_vals_tag, elems,
|
np.asarray(x)
|
numpy.asarray
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import scipy as sc
from scipy import ndimage
import random as rand
from sklearn import preprocessing, linear_model
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.ticker import MaxNLocator
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from tabulate import tabulate
import dill
import copy
from core.controllers import PDController
from core.dynamics import ConfigurationDynamics
from koopman_core.controllers import OpenLoopController, MPCController, PerturbedController, NonlinearMPCControllerNb, BilinearMPCControllerNb
from koopman_core.dynamics import LinearLiftedDynamics, BilinearLiftedDynamics
from koopman_core.learning import Edmd, BilinearEdmd
from koopman_core.basis_functions import PlanarQuadBasis
from koopman_core.learning.utils import differentiate_vec
from koopman_core.systems import PlanarQuadrotorForceInput
class QuadrotorPdOutput(ConfigurationDynamics):
def __init__(self, dynamics, xd, t_d, n, m):
ConfigurationDynamics.__init__(self, dynamics, 1)
self.xd = xd
self.t_d = t_d
self.xd_dot = differentiate_vec(self.xd, self.t_d)
self.n = n
self.m = m
def proportional(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.y(q) - self.y_d(t)
def derivative(self, x, t):
q, q_dot = x[:int(n/2)], x[int(n/2):]
return self.dydq(q)@q_dot - self.y_d_dot(t)
def y(self, q):
return q
def dydq(self, q):
return np.eye(int(self.n/2))
def d2ydq2(self, q):
return np.zeros((int(self.n/2), int(self.n/2), int(self.n/2)))
def y_d(self, t):
return self.desired_state_(t)[:int(self.n/2)]
def y_d_dot(self, t):
return self.desired_state_(t)[int(self.n/2):]
def y_d_ddot(self, t):
return self.desired_state_dot_(t)[int(self.n/2):]
def desired_state_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd[:,ii].flatten()) for ii in range(self.xd.shape[1])]
def desired_state_dot_(self, t):
return [np.interp(t, self.t_d.flatten(),self.xd_dot[:,ii].flatten()) for ii in range(self.xd_dot.shape[1])]
class PlanarQuadrotorForceInputDiscrete(PlanarQuadrotorForceInput):
def __init__(self, mass, inertia, prop_arm, g=9.81, dt=1e-2):
PlanarQuadrotorForceInput.__init__(self, mass, inertia, prop_arm, g=g)
self.dt=dt
def eval_dot(self, x, u, t):
return x + self.dt*self.drift(x, t) + self.dt*np.dot(self.act(x, t),u)
def get_linearization(self, x0, x1, u0, t):
m, J, b, g = self.params
A_lin = np.eye(self.n) + self.dt*np.array([[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, -(1/m)*np.cos(x0[2])*u0[0] -(1/m)*np.cos(x0[2])*u0[1], 0, 0, 0],
[0, 0, -(1/m)*np.sin(x0[2])*u0[0] -(1/m)*np.sin(x0[2])*u0[1], 0, 0, 0],
[0, 0, 0, 0, 0, 0],])
B_lin = self.dt*np.array([[0, 0],
[0, 0],
[0, 0],
[-(1/m)*np.sin(x0[2]), -(1/m)*np.sin(x0[2])],
[(1/m)*np.cos(x0[2]), (1/m)*np.cos(x0[2])],
[-b/J, b/J]])
if x1 is None:
x1 = A_lin@x0 + B_lin@u0
f_d = self.eval_dot(x0,u0,t)
r_lin = f_d - x1
return A_lin, B_lin, r_lin
# Cart pole system parameters
mass = 2.
inertia = 1.
prop_arm = 0.2
gravity = 9.81
quadrotor = PlanarQuadrotorForceInput(mass, inertia, prop_arm, g=gravity)
# Linearized system specification:
n, m = 6, 2 # Number of states, number of control inputs
A_nom = np.array([[0., 0., 0., 1., 0., 0.], # Linearization of the true system around the origin
[0., 0., 0., 0., 1., 0.],
[0., 0., 0., 0., 0., 1.],
[0., 0., -gravity, 0., 0., 0.],
[0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0.]])
B_nom = np.array([[0., 0.], # Linearization of the true system around the origin
[0., 0.],
[0., 0.],
[0., 0.],
[1./mass, 1./mass],
[-prop_arm/inertia, prop_arm/inertia]])
hover_thrust = mass*gravity/m
# Data collection parameters:
q_dc, r_dc = 5e2, 1 # State and actuation penalty values, data collection
Q_dc = q_dc * np.identity(n) # State penalty matrix, data collection
R_dc = r_dc*np.identity(m) # Actuation penalty matrix, data collection
P_dc = sc.linalg.solve_continuous_are(A_nom, B_nom, Q_dc, R_dc) # Algebraic Ricatti equation solution, data collection
K_dc = np.linalg.inv(R_dc)@B_nom.T@P_dc # LQR feedback gain matrix, data collection
K_dc_p = K_dc[:,:int(n/2)] # Proportional control gains, data collection
K_dc_d = K_dc[:,int(n/2):] # Derivative control gains, data collection
nominal_sys = LinearLiftedDynamics(A_nom, B_nom, np.eye(n), lambda x: x)
dt = 1.0e-2 # Time step length
traj_length_dc = 2. # Trajectory length, data collection
n_pred_dc = int(traj_length_dc/dt) # Number of time steps, data collection
t_eval = dt * np.arange(n_pred_dc + 1) # Simulation time points
n_traj_dc = 100 # Number of trajectories to execute, data collection
noise_var = 5. # Exploration noise to perturb controller, data collection
xmax = np.array([2, 2, np.pi/3, 2.,2.,2.]) # State constraints, trajectory generation
xmin = -xmax
umax =
|
np.array([2*hover_thrust, 2*hover_thrust])
|
numpy.array
|
# icsdll.py
# Copyright (c) 2016-2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Interface to the image correlation spectroscopy library ICSx64.dll.
ICSdll is a Python ctypes interface to the Image Correlation Spectroscopy
Dynamic Link Library (ICSx64.dll) developed at the Laboratory for Fluorescence
Dynamics (LFD) for the Globals for Images SimFCS software.
ICSx64.dll is implemented in C++ using the Intel(r) Math Kernel Library and
OpenMP. It provides functions and classes for the analysis of fluorescence
time series data:
* 1D, 2D, and 3D auto- and cross-correlation
* Image pair correlation function (ipCF)
* Airy detector pair correlation function (apCF)
* Image mean square displacement (iMSD)
* Line spatio-temporal image correlation spectroscopy (lSTICS)
* Fit 1D pair correlation functions to the results of ipCF analysis
* Subtract immobile fractions
* Correct photo-bleaching
* 1D DFTs of image stack
* Richardson Lucy deconvolution (WIP)
:Author:
`<NAME> <https://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics. University of California, Irvine
:License: BSD 3-Clause
:Version: 2021.3.2
Requirements
------------
* `CPython >= 3.7, 64-bit <https://www.python.org>`_
* `Numpy 1.19.5 <https://pypi.org/project/numpy/>`_
* `Intel(r) Math Kernel Library <https://software.intel.com/en-us/mkl>`_
(build)
* `Visual Studio 2019 C++ compiler <https://visualstudio.microsoft.com/>`_
(build)
Revisions
---------
2021.3.2
Rebuild package.
2019.11.22
Wrap yxt_dft functions.
Upgrade to ICSx64.DLL version 2019.11.22.
2019.7.10
Pass 22 tests.
Wrap apcf, imsd, and lstics functions.
Raise IcsError in case of DLL function errors.
Use ICSx64.DLL version 2019.7.10.
2019.5.22
Initial release based on ICSx64.DLL version 2018.7.18.
Notes
-----
ICSdll is currently developed, built, and tested on 64-bit Windows only.
The API is not stable and might change between revisions.
Refer to the C++ header file and source code for function signatures.
References
----------
1. `ipcf.ipynb <https://www.lfd.uci.edu/~gohlke/ipcf/>`_
Pair correlation function analysis of fluorescence fluctuations in
big image time series using Python.
2. `Globals for Images SimFCS <https://www.lfd.uci.edu/globals/>`_,
Software for fluorescence image acquisition, analysis, and simulation.
3. `Globals for Airyscan <https://www.lfd.uci.edu/globals/>`_,
Image correlation analysis for the ZEISS(tm) LSM 880 Airyscan detector.
"""
__version__ = '2021.3.2'
__all__ = (
'API',
'IcsError',
'rfftnd',
'xyt',
'nlsp',
'yxt_ipcf',
'yxt_apcf',
'yxt_imsd',
'yxt_lstics',
'yxt_subtract_immobile',
'yxt_correct_bleaching',
'yxt_dft',
'zyx_deconv',
'ipcf_nlsp_1dpcf',
'radial',
'circle',
'logbins',
'bins2times',
'points2distances',
'nextpow2',
'numpy_correlate',
)
import os
import math
import ctypes
import warnings
import numpy
def API(dllname=None):
"""Return ctypes interface to functions of ICSx64 DLL."""
from ctypes import (
c_int,
c_int32,
c_int64,
c_size_t,
c_ssize_t,
c_double,
c_float,
c_char_p,
POINTER,
)
c_ssize_t_p = POINTER(c_ssize_t)
c_double_p = POINTER(c_double)
handle_t = c_size_t
if dllname is None:
dllname = os.path.join(os.path.dirname(__file__), 'ICSx64.dll')
api = ctypes.CDLL(dllname)
api.VERSION = c_char_p.in_dll(api, 'ICS_VERSION').value.decode('ascii')
api.MODE_FCS = 2
api.MODE_CC = 4
api.AXIS0 = 1
api.AXIS1 = 8
api.AXIS2 = 16
api.FALSE = 0
api.TRUE = 1
api.OK = 0
api.ERROR = -1
api.VALUE_ERROR = -2
api.MEMORY_ERROR = -3
api.NOTIMPLEMENTD_ERROR = -4
api.VALUE_ERROR1 = -201
api.VALUE_ERROR2 = -202
api.VALUE_ERROR3 = -203
api.VALUE_ERROR4 = -204
api.VALUE_ERROR5 = -205
api.VALUE_ERROR6 = -206
api.VALUE_ERROR7 = -207
api.VALUE_ERROR8 = -208
api.VALUE_ERROR9 = -209
api.MODE_DEFAULT = 0
api.MODE_TIME = 1 # do not center correlation results in axis 0 (time)
api.MODE_FCS = 2 # normalize correlation results according to FCS
api.MODE_CC = 4 # allocate second buffer for cross correlation
api.AXIS0 = 1 # do not center correlation results in axis 0
api.AXIS1 = 8 # do not center correlation results in axis 1
api.AXIS2 = 16 # do not center correlation results in axis 2
api.MASK_DEFAULT = 0
api.MASK_ANY = 0 # any one value must be True
api.MASK_FIRST = 1 # first mask value must be True
api.MASK_CENTER = 2 # center mask value must be True
api.MASK_ALL = 4 # all mask values must be True
api.MASK_CLEAR = 32 # clear output if not calculated
api.RADIUS = 1
api.DIAMETER = 2
api.NLSP_ND = 1
api.NLSP_1DPCF = 100
api.ICS_DECONV_DEFAULT = 1
api.ICS_DECONV_RICHARDSON_LUCY = 1
api.ICS_DECONV_WIENER = 2
api.ICS_DECONV_NOPAD = 256
api.DTYPES = {'l': 'i', 'i': 'i', 'h': 'h', 'H': 'H', 'd': 'd', 'f': 'f'}
def ndpointer(dtype=None, ndim=None, shape=None, flags=None, null=False):
"""Return numpy.ctypes.ndpointer type that also accepts None/NULL."""
cls = numpy.ctypeslib.ndpointer(dtype, ndim, shape, flags)
if not null:
return cls
from_param_ = cls.from_param
def from_param(cls, param):
if param is None:
return param
return from_param_(param)
cls.from_param = classmethod(from_param)
return cls
def outer(a, b, skip=tuple()):
return ((x, y) for x in a for y in b if (x, y) not in skip)
# rfft#d_ functions
for nd in (1, 2, 3):
rfftnd = f'rfft{nd}d_'
func = getattr(api, rfftnd + 'new')
setattr(func, 'argtypes', [c_ssize_t, c_int])
setattr(func, 'restype', handle_t)
func = getattr(api, rfftnd + 'del')
setattr(func, 'argtypes', [handle_t])
setattr(func, 'restype', None)
func = getattr(api, rfftnd + 'mode')
setattr(func, 'argtypes', [c_int])
setattr(func, 'restype', None)
for i, o in outer('dfihH', 'df'):
ai = ndpointer(dtype=i, ndim=nd)
ao = ndpointer(dtype=o, ndim=nd)
func = getattr(api, rfftnd + f'autocorrelate_{i}{o}')
setattr(func, 'argtypes', [handle_t, ai, ao, c_ssize_t_p])
setattr(func, 'restype', c_int)
func = getattr(api, rfftnd + f'crosscorrelate_{i}{o}')
setattr(
func,
'argtypes',
[handle_t, ai, ai, ao, c_ssize_t_p, c_ssize_t_p],
)
setattr(func, 'restype', c_int)
# nlsp class
# TODO: test nlsp_ functions
api.nlsp_new.restype = handle_t
api.nlsp_new.argtypes = [c_int, c_ssize_t_p]
api.nlsp_del.restype = None
api.nlsp_del.argtypes = [handle_t]
api.nlsp_get.restype = c_int
api.nlsp_get.argtypes = [
handle_t,
c_ssize_t_p,
c_ssize_t_p,
c_double_p,
c_double_p,
]
api.nlsp_set.restype = c_int
api.nlsp_set.argtypes = [
handle_t,
c_ssize_t,
c_ssize_t,
ndpointer(dtype='float64', shape=(6,), null=True),
c_double,
c_double,
]
for dt in 'fd':
func = getattr(api, f'nlsp_eval_{dt}')
setattr(func, 'restype', c_int)
setattr(func, 'argtypes', [handle_t, ndpointer(dtype=dt), c_ssize_t_p])
func = getattr(api, f'nlsp_solve_{dt}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
handle_t,
ndpointer(dtype=dt), # data
c_ssize_t_p, # strides
ndpointer(dtype='float64', ndim=1), # extra
ndpointer(dtype='float64', ndim=1, null=True), # guess
ndpointer(dtype='float64', ndim=1, null=True), # bounds
ndpointer(dtype='float64', ndim=1, null=True), # datasolution
],
)
# xyt class
api.yxt_new.restype = handle_t
api.yxt_new.argtypes = [c_ssize_t_p]
api.yxt_del.restype = None
api.yxt_del.argtypes = [handle_t]
api.yxt_get_buffer.restype = c_double_p
api.yxt_get_buffer.argtypes = [handle_t, c_ssize_t_p, c_ssize_t_p]
for ti, to in outer('dfihH', 'df'):
# yxt_ipcf_*
func = getattr(api, f'yxt_ipcf_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
handle_t,
ndpointer(dtype=ti, ndim=3, null=True), # data
ndpointer(dtype=ti, ndim=3, null=True), # channel
c_ssize_t_p, # strides
ndpointer(dtype=to, ndim=4), # out
c_ssize_t_p, # outstrides
ndpointer(dtype='intp', ndim=1), # points
c_ssize_t, # npoints
ndpointer(dtype='intp', ndim=1), # bins
c_ssize_t, # nbins
c_double, # threshold
c_double, # filter
c_int, # nthreads
],
)
# yxt_apcf_*
func = getattr(api, f'yxt_apcf_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
handle_t,
ndpointer(dtype=ti, ndim=2, null=True), # data
c_ssize_t_p, # strides
ndpointer(dtype=to, ndim=3), # out
c_ssize_t_p, # outstrides
ndpointer(dtype='intp', ndim=1), # bins
c_ssize_t, # nbins
c_int, # autocorr
c_double, # filter
c_int, # nthreads
],
)
# yxt_imsd_*
func = getattr(api, f'yxt_imsd_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
handle_t,
ndpointer(dtype=ti, ndim=3, null=True), # data
c_ssize_t_p, # strides
ndpointer(dtype=ti, ndim=3, null=True), # data1
c_ssize_t_p, # strides1
ndpointer(dtype='int32', ndim=2, null=True), # mask
c_ssize_t_p, # maskstrides
c_int, # maskmode
ndpointer(dtype=to, ndim=5), # out
c_ssize_t_p, # outstrides
c_ssize_t_p, # blocks
ndpointer(dtype='intp', ndim=1, null=True), # bins
c_ssize_t, # nbins
c_double, # filter
c_int, # nthreads
],
)
# yxt_lstics_*
func = getattr(api, f'yxt_lstics_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
handle_t,
ndpointer(dtype=ti, ndim=3, null=True), # data
c_ssize_t_p, # strides
ndpointer(dtype=ti, ndim=3, null=True), # data1
c_ssize_t_p, # strides1
ndpointer(dtype='int32', ndim=2, null=True), # mask
c_ssize_t_p, # maskstrides
c_int, # maskmode
ndpointer(dtype=to, ndim=5), # out
c_ssize_t_p, # outstrides
ndpointer(dtype='intp', ndim=3), # lines
c_ssize_t_p, # lineshape
c_ssize_t_p, # blocks
ndpointer(dtype='intp', ndim=1), # bins
c_ssize_t, # nbins
c_double, # filter
c_int, # nthreads
],
)
# ipcf_nlsp_1dpcf
for dt in 'f':
func = getattr(api, f'ipcf_nlsp_1dpcf_{dt}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
ndpointer(dtype=dt, ndim=4), # ipcf
c_ssize_t_p, # shape
c_ssize_t_p, # strides
ndpointer(dtype=dt, ndim=1), # times
ndpointer(dtype=dt, ndim=1), # distances
ndpointer(dtype=dt, ndim=1), # args
ndpointer(dtype=dt, ndim=1, null=True), # bounds
ndpointer(dtype=dt, ndim=4, null=True), # ix
c_ssize_t_p, # stridesx
ndpointer(dtype=dt, ndim=4, null=True), # ifx
c_ssize_t_p, # stridesfx
ndpointer(dtype=dt, ndim=4, null=True), # status
c_ssize_t_p, # stridestatus
ndpointer(dtype=dt, ndim=1, null=True), # settings
c_int, # average (bool)
c_int, # nthreads
],
)
# subtract_immobile, yxt_correct_bleaching
for dt in 'ihH':
ai = ndpointer(dtype=dt, ndim=3)
func = getattr(api, f'yxt_subtract_immobile_{dt}')
setattr(func, 'argtypes', [ai, c_ssize_t_p, c_ssize_t_p, c_int])
setattr(func, 'restype', c_int)
func = getattr(api, f'yxt_correct_bleaching_{dt}')
setattr(
func,
'argtypes',
[
ai,
c_ssize_t_p,
c_ssize_t_p,
ndpointer(dtype='double', ndim=2),
c_ssize_t_p,
c_double,
c_int,
],
)
setattr(func, 'restype', c_int)
# yxt_dft
for ti, to in outer('dfihH', 'df'):
if ti + to in 'dfd':
continue
func = getattr(api, f'yxt_dft_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
ndpointer(dtype=ti, ndim=3), # data
c_ssize_t_p, # shape
c_ssize_t_p, # strides
ndpointer(dtype=to, ndim=3), # out
c_ssize_t_p, # outshape
c_ssize_t_p, # outstrides
c_int, # nthreads
],
)
# zyx_deconv
try:
for ti, to in ('ff', 'dd', 'Hf'): # outer('fH', 'f'):
func = getattr(api, f'zyx_deconv_{ti}{to}')
setattr(func, 'restype', c_int)
setattr(
func,
'argtypes',
[
ndpointer(dtype=ti, ndim=3), # image
c_ssize_t_p, # shape
c_ssize_t_p, # strides
ndpointer(dtype=ti, ndim=3), # psf
c_ssize_t_p, # shape
c_ssize_t_p, # strides
ndpointer(dtype=to, ndim=3), # out
c_ssize_t_p, # outshape
c_ssize_t_p, # outstrides
c_int, # niter
c_int, # mode
c_int, # nthreads
],
)
except AttributeError:
pass
# helper functions
api.radial.restype = c_ssize_t
api.radial.argtypes = [
ndpointer(dtype='intp', ndim=3),
c_ssize_t,
c_ssize_t,
ndpointer(dtype='float64', ndim=1),
c_int,
]
api.circle.restype = c_ssize_t
api.circle.argtypes = [
c_ssize_t,
ndpointer(dtype='intp', ndim=2, null=True),
c_ssize_t,
]
api.logbins.restype = c_ssize_t
api.logbins.argtypes = [
c_ssize_t,
c_ssize_t,
ndpointer(dtype='intp', ndim=1),
]
api.points2distances_f.restype = c_float
api.points2distances_f.argtypes = [
ndpointer(dtype='intp', ndim=2),
c_ssize_t,
c_float,
ndpointer(dtype='float32', ndim=1),
]
api.points2distances_d.restype = c_double
api.points2distances_d.argtypes = [
ndpointer(dtype='intp', ndim=1),
c_ssize_t,
c_double,
ndpointer(dtype='float64', ndim=1),
]
api.bins2times_f.restype = c_float
api.bins2times_f.argtypes = [
ndpointer(dtype='intp', ndim=1),
c_ssize_t,
c_float,
ndpointer(dtype='float32', ndim=1),
]
api.bins2times_d.restype = c_double
api.bins2times_d.argtypes = [
ndpointer(dtype='intp', ndim=1),
c_ssize_t,
c_double,
ndpointer(dtype='float64', ndim=1),
]
api.nextpow2_i.restype = c_int32
api.nextpow2_i.argtypes = [c_int32]
api.nextpow2_q.restype = c_int64
api.nextpow2_q.argtypes = [c_int64]
return api
API = API()
class IcsError(RuntimeError):
"""ICS DLL Exceptions."""
def __init__(self, func, err):
msg = {
None: 'NULL',
API.OK: 'OK',
API.ERROR: 'ERROR',
API.VALUE_ERROR: 'VALUE_ERROR',
API.MEMORY_ERROR: 'MEMORY_ERROR',
API.NOTIMPLEMENTD_ERROR: 'NOTIMPLEMENTD_ERROR',
API.VALUE_ERROR1: 'VALUE_ERROR1',
API.VALUE_ERROR2: 'VALUE_ERROR2',
API.VALUE_ERROR3: 'VALUE_ERROR3',
API.VALUE_ERROR4: 'VALUE_ERROR4',
API.VALUE_ERROR5: 'VALUE_ERROR5',
API.VALUE_ERROR6: 'VALUE_ERROR6',
API.VALUE_ERROR7: 'VALUE_ERROR7',
API.VALUE_ERROR8: 'VALUE_ERROR8',
API.VALUE_ERROR9: 'VALUE_ERROR9',
}.get(err, f'unknown error {err}')
RuntimeError.__init__(self, f'{func.__name__} returned {msg}')
class rfftnd:
"""Wrapper for rfft#d_ functions."""
def __init__(self, shape, mode):
self._rfftnd = f'rfft{len(shape)}d_'
func = getattr(API, self._rfftnd + 'new')
self._handle = func(*shape, mode)
if self._handle == 0:
raise IcsError(func, None)
self._mode = mode
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
self._mode = value
func = getattr(API, self._rfftnd + 'mode')
func(value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._handle:
func = getattr(API, self._rfftnd + 'del')
func(self._handle)
self._handle = None
def autocorrelate(self, a, out):
if out is None:
out = a
func = getattr(
API,
self._rfftnd
+ 'autocorrelate_{}{}'.format(
API.DTYPES[a.dtype.char], API.DTYPES[out.dtype.char]
),
)
status = func(self._handle, a, out, a.ctypes.strides)
if status:
raise IcsError(func, status)
def crosscorrelate(self, a, b, out):
if out is None:
out = a
func = getattr(
API,
self._rfftnd
+ 'crosscorrelate_{}{}'.format(
API.DTYPES[a.dtype.char], API.DTYPES[out.dtype.char]
),
)
status = func(
self._handle, a, b, out, a.ctypes.strides, b.ctypes.strides
)
if status:
raise IcsError(func, status)
class nlsp:
"""Wrapper class for nlsp_ functions.
Solver of non-linear least squares problem with linear boundary
constraints using RCI and the Trust-Region algorithm.
Only the "1D pair correlation function" diffusion model is currently
supported.
"""
def __init__(self, shape, model='1dpcf'):
warnings.warn('the nlsp class is untested')
model = {API.NLSP_1DPCF: API.NLSP_1DPCF, '1dpcf': API.NLSP_1DPCF}[
model
]
shape = (ctypes.c_ssize_t * len(shape))(*shape)
self._handle = API.nlsp_new(model, shape)
if self._handle == 0:
raise IcsError(API.nlsp_new, None)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._handle:
API.nlsp_del(self._handle)
self._handle = None
def solve(self, data, extra, guess=None, bounds=None, solution=None):
"""Solve nonlinear least squares problem.
For the 1dpcf model, the 'extra' argument contains the xaxis values,
the w2 parameter, and the squared distance.
"""
func = getattr(API, f'nlsp_solve_{API.DTYPES[data.dtype.char]}')
status = func(self._handle, data, data.strides)
if status:
raise IcsError(func, status)
def eval(self, data):
"""Evaluate function using current solution vector."""
func = getattr(API, f'nlsp_eval_{API.DTYPES[data.dtype.char]}')
status = func(self._handle, data, data.strides)
if status:
raise IcsError(func, status)
def get(self):
"""Return solution statuses.
Return number of iterations, stop criterion, initial residual, and
final residual.
"""
it = ctypes.c_ssize_t()
st_cr = ctypes.c_ssize_t()
r1 = ctypes.c_double()
r2 = ctypes.c_double()
status = API.nlsp_get(self._handle, it, st_cr, r1, r2)
if status:
raise IcsError(API.nlsp_get, status)
return it.value, st_cr.value, r1.value, r2.value
def set(self, iter1=0, iter2=0, eps=None, eps_jac=0.0, rs=0.0):
"""Set solver parameters."""
status = API.nlsp_set(self._handle, iter1, iter2, eps, eps_jac, rs)
if status:
raise IcsError(API.nlsp_set, status)
class xyt:
"""Wrapper class for xyt_ functions."""
def __init__(self, shape):
shape = (ctypes.c_ssize_t * len(shape))(*shape)
self._handle = API.yxt_new(shape)
if self._handle == 0:
raise IcsError(API.yxt_new, None)
# retrieve trunated shape
API.yxt_get_buffer(self._handle, shape, None)
self.shape = int(shape[0]), int(shape[1]), int(shape[2])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self._handle:
API.yxt_del(self._handle)
self._handle = None
def ipcf(
self,
data,
points,
bins,
channel=None,
out=None,
threshold=0.0,
smooth=0.0,
nthreads=0,
verbose=False,
):
"""Image pair correlation function."""
if (
data.ndim != 3
or data.shape[0] != self.shape[0]
or data.shape[1] != self.shape[1]
):
raise ValueError('invalid data shape')
if channel is not None and (
channel.strides != data.strides or channel.shape != data.shape
):
raise ValueError('invalid channel shape')
npoints = points.shape[0]
nbins = len(bins)
x0, y0 = points.min(axis=0)
x1, y1 = points.max(axis=0)
outshape = (
data.shape[0] - y1 + x0,
data.shape[1] - x1 + x0,
npoints,
nbins,
)
if out is None:
out = numpy.zeros(shape=outshape, dtype='float32')
if out.ndim != 4 or out.size < product(outshape):
raise ValueError()
func = getattr(
API,
'yxt_ipcf_{}{}'.format(
API.DTYPES[data.dtype.char], API.DTYPES[out.dtype.char]
),
)
if verbose:
print('data shape =', data.shape)
print('data strides =', data.strides)
print('out shape =', out.shape)
print('out strides =', out.strides)
status = func(
self._handle,
data,
channel,
data.ctypes.strides,
out,
out.ctypes.strides,
points.flatten(),
npoints,
bins,
nbins,
threshold,
smooth,
nthreads,
)
if status != 0:
raise IcsError(func, status)
return out
def apcf(
self,
data,
bins,
out=None,
autocorr=True,
smooth=0.0,
nthreads=0,
verbose=False,
):
"""Airy detector pair correlation."""
if (
data.ndim != 2
or data.shape[0] != self.shape[1]
or self.shape[0] != 1
):
raise ValueError('invalid data shape')
nbins = len(bins)
if autocorr:
outshape = data.shape[0], data.shape[0], nbins
else:
outshape = data.shape[0], data.shape[0] - 1, nbins
if out is None:
out = numpy.zeros(shape=outshape, dtype='float32')
if out.ndim != 3 or out.size < product(outshape):
raise ValueError()
func = getattr(
API,
'yxt_apcf_{}{}'.format(
API.DTYPES[data.dtype.char], API.DTYPES[out.dtype.char]
),
)
if verbose:
print('data shape =', data.shape)
print('data strides =', data.strides)
print('out shape =', out.shape)
print('out strides =', out.strides)
status = func(
self._handle,
data,
data.ctypes.strides,
out,
out.ctypes.strides,
bins,
nbins,
autocorr,
smooth,
nthreads,
)
if status != 0:
raise IcsError(func, status)
return out
def imsd(
self,
data,
block,
bins,
channel=None,
out=None,
mask=None,
mask_mode=None,
smooth=0.0,
nthreads=0,
verbose=False,
):
"""Image mean square displacement."""
if (
data.ndim != 3
or data.shape[0] != self.shape[0]
or data.shape[1] != self.shape[1]
):
raise ValueError('invalid data shape')
if channel is not None:
if channel.shape != data.shape:
raise ValueError('invalid channel shape')
channel_strides = channel.strides
else:
channel_strides = None
if mask is not None:
if mask.shape[:2] != data.shape[:2]:
raise ValueError('invalid mask shape')
mask_strides = mask.strides
else:
mask_strides = None
if mask_mode is None:
mask_mode = API.MASK_ANY | API.MASK_CLEAR
if len(block) != 4:
raise ValueError()
try:
nbins = int(bins)
bins = None
except Exception:
nbins = len(bins)
outshape = (
(data.shape[0] - block[0]) // block[2] + 1,
(data.shape[1] - block[1]) // block[3] + 1,
block[0],
block[1],
nbins,
)
if out is None:
out = numpy.zeros(shape=outshape, dtype='float32')
elif out.ndim != 5 or out.size < product(outshape):
raise ValueError('invalid out shape')
block = (ctypes.c_ssize_t * 4)(*block)
func = getattr(
API,
'yxt_imsd_{}{}'.format(
API.DTYPES[data.dtype.char], API.DTYPES[out.dtype.char]
),
)
if verbose:
print('data shape =', data.shape)
print('data strides =', data.strides)
print('out shape =', out.shape)
print('out strides =', out.strides)
status = func(
self._handle,
data,
data.ctypes.strides,
channel,
channel_strides,
mask,
mask_strides,
mask_mode,
out,
out.ctypes.strides,
block,
bins,
nbins,
smooth,
nthreads,
)
if status != 0:
raise IcsError(func, status)
return out
def lstics(
self,
data,
block,
lines,
bins,
channel=None,
out=None,
mask=None,
mask_mode=None,
smooth=0.0,
nthreads=0,
verbose=False,
):
"""Line spatio temporal image correlation spectroscopy."""
if (
data.ndim != 3
or data.shape[0] != self.shape[0]
or data.shape[1] != self.shape[1]
):
raise ValueError('invalid data shape')
if channel is not None:
if channel.shape != data.shape:
raise ValueError('invalid channel shape')
channel_strides = channel.strides
else:
channel_strides = None
if mask is not None:
if mask.shape[:2] != data.shape[:2]:
raise ValueError('invalid mask shape')
mask_strides = mask.strides
else:
mask_strides = None
if mask_mode is None:
mask_mode = API.MASK_ANY | API.MASK_CLEAR
if len(block) != 4:
raise ValueError()
nbins = len(bins)
outshape = (
(data.shape[0] - block[0]) // block[2] + 1,
(data.shape[1] - block[1]) // block[3] + 1,
lines.shape[0],
lines.shape[1],
nbins,
)
if out is None:
out = numpy.zeros(shape=outshape, dtype='float32')
elif out.ndim != 5 or out.size < product(outshape):
raise ValueError('invalid out shape')
block = (ctypes.c_ssize_t * 4)(*block)
lines_shape = (ctypes.c_ssize_t * 3)(*lines.shape)
func = getattr(
API,
'yxt_lstics_{}{}'.format(
API.DTYPES[data.dtype.char], API.DTYPES[out.dtype.char]
),
)
if verbose:
print('data shape =', data.shape)
print('data strides =', data.strides)
print('out shape =', out.shape)
print('out strides =', out.strides)
print('lines shape =', lines.shape)
status = func(
self._handle,
data,
data.ctypes.strides,
channel,
channel_strides,
mask,
mask_strides,
mask_mode,
out,
out.ctypes.strides,
lines,
lines_shape,
block,
bins,
nbins,
smooth,
nthreads,
)
if status != 0:
raise IcsError(func, status)
return out
def yxt_ipcf(data, radius=4, nbins=32, smooth=0.7, threshold=0.0, nthreads=0):
"""Simplified image pair correlation function."""
# make time axis last dimension
# if data.shape[0] > 8 * data.shape[2]:
# data = numpy.moveaxis(data, 0, -1)
height, width, ntimes = data.shape
# truncate time axis to power of two
ntimes = 2 ** int(math.log(ntimes, 2))
data = data[..., :ntimes]
bins = logbins(ntimes // 2, nbins)
# nbins = bins.shape[0]
points = circle(radius)
# npoints = points.shape[0]
with xyt(data.shape) as handle:
result = handle.ipcf(
data,
points,
bins,
smooth=smooth,
threshold=threshold,
nthreads=nthreads,
)
return result, bins, points
def yxt_apcf(data, nbins=256, autocorr=True, smooth=0.7, nthreads=0):
"""Simplified airy detector pair correlation."""
width, ntimes = data.shape
# truncate time axis to power of two
ntimes = 2 ** int(math.log(ntimes, 2))
data = data[..., :ntimes]
bins = logbins(ntimes // 2, nbins)
# nbins = bins.shape[0]
with xyt((1, width, ntimes)) as handle:
result = handle.apcf(
data, bins, autocorr=autocorr, smooth=smooth, nthreads=nthreads
)
return result, bins
def yxt_imsd(data, block=(32, 32, 4, 4), bins=16, smooth=0.0, nthreads=0):
"""Simplified image mean square displacement."""
with xyt(data.shape) as handle:
result = handle.imsd(
data, block, bins, smooth=smooth, nthreads=nthreads
)
return result
def yxt_lstics(
data,
block=(16, 16, 1, 1),
nlines=16,
linelength=8,
nbins=16,
smooth=0.0,
nthreads=0,
):
"""Simplified line spatio temporal image correlation spectroscopy."""
height, width, ntimes = data.shape
# truncate time axis to power of two
ntimes = 2 ** int(math.log(ntimes, 2))
data = data[..., :ntimes]
bins = logbins(ntimes // 2, nbins)
# nbins = bins.shape[0]
lines = radial(nlines, linelength)
with xyt(data.shape) as handle:
result = handle.lstics(
data, block, lines, bins, smooth=smooth, nthreads=nthreads
)
return result, bins, lines
def yxt_subtract_immobile(a, nthreads=0):
"""Wrapper for yxt_subtract_immobile_ functions."""
if a.ndim != 3:
raise ValueError('input must be three dimensional')
func = getattr(API, f'yxt_subtract_immobile_{API.DTYPES[a.dtype.char]}')
status = func(a, a.ctypes.shape, a.ctypes.strides, nthreads)
if status:
raise IcsError(func, status)
def yxt_correct_bleaching(a, smooth, nthreads=0):
"""Wrapper for yxt_correct_bleaching_ functions."""
if a.ndim != 3:
raise ValueError('input must be three dimensional')
func = getattr(API, f'yxt_correct_bleaching_{API.DTYPES[a.dtype.char]}')
out =
|
numpy.empty(a.shape[:2], 'float64')
|
numpy.empty
|
try:
import bpy
import bmesh
except ImportError:
pass
import numpy as np
import time
# universal ---------------
def deselect(ob, sel=None, type='vert'):
"""Deselect all then select something"""
x = np.zeros(len(ob.data.vertices), dtype=np.bool)
y = np.zeros(len(ob.data.edges), dtype=np.bool)
z = np.zeros(len(ob.data.polygons), dtype=np.bool)
ob.data.vertices.foreach_set('select', x)
ob.data.edges.foreach_set('select', y)
ob.data.polygons.foreach_set('select', z)
if sel is not None:
if type == 'vert':
x[sel] = True
ob.data.vertices.foreach_set('select', x)
if type == 'edge':
y[sel] = True
ob.data.edges.foreach_set('select', y)
if type == 'face':
z[sel] = True
ob.data.polygons.foreach_set('select', z)
ob.data.update()
# universal ---------------
def eliminate_duplicate_pairs_keep_mirrors(ar):
"""Finds unique index pairs assuming left
and right side are different types:
[[1, 2], [1, 2], [2, 1]] becomes:
[[1, 2], [2, 1]]"""
a = ar
x = np.array(np.random.rand(a.shape[1]), dtype=np.float32)
y = a @ x
unique, index = np.unique(y, return_index=True)
return a[index], index
# universal ---------------
def get_panel_groups(Bobj, vg=None):
"""Creates a dictionary of boolean arrays
for verts in each panel"""
obm = get_bmesh(Bobj)
count = len(obm.verts)
groups = [i.name for i in Bobj.vertex_groups if i.name.startswith('P_')]
bools = {}
for i in groups:
g_idx = Bobj.vertex_groups[i].index
obm.verts.layers.deform.verify()
dvert_lay = obm.verts.layers.deform.active
boo = np.zeros(count, dtype=np.bool)
bools[i] = boo
relevant = obm.verts
if vg is not None:
relevant = [obm.verts[v] for v in vg]
for v in relevant:
idx = v.index
dvert = v[dvert_lay]
if g_idx in dvert:
boo[idx] = True
return bools
# universal ---------------
def create_spread_key(Bobj, key="flat", margin=5.0, hide_sew_edges=False):
"""Creates a shape key that spreads out
the panels based on their bounding boxes.
" margin " is the space between the panels.
" hide_sew_edges " hides sew edges in edit mode.
Use "alt + H" to unhide."""
new_key = key + "_spread"
keys = Bobj.data.shape_keys.key_blocks
v_count = len(Bobj.data.vertices)
co = np.empty((v_count, 3), dtype=np.float32)
if new_key in keys:
Bobj.data.shape_keys.key_blocks[new_key].data.foreach_get('co', co.ravel())
return co
co = np.empty((v_count, 3), dtype=np.float32)
Bobj.data.shape_keys.key_blocks[key].data.foreach_get('co', co.ravel())
panels = get_panel_groups(Bobj, vg=None)
ls = 0.0
for k, v in panels.items():
x = np.min(co[:, 0][v])
move = ls - x
co[:, 0][v] += move
ls = np.max(co[:, 0][v]) + margin
new_key = key + "_spread"
if new_key not in keys:
Bobj.shape_key_add(name=new_key)
if hide_sew_edges:
obm = get_bmesh(Bobj)
se = [e.index for e in obm.edges if len(e.link_faces) == 0]
eboo = np.zeros(len(obm.edges), dtype=np.bool)
eboo[se] = True
Bobj.data.edges.foreach_set('hide', eboo)
Bobj.data.shape_keys.key_blocks[new_key].data.foreach_set('co', co.ravel())
Bobj.data.update()
return co
# universal ---------------
def get_bmesh(ob=None, refresh=False):
"""gets bmesh in editmode or object mode
by checking the mode"""
if ob.data.is_editmode:
return bmesh.from_edit_mesh(ob.data)
obm = bmesh.new()
obm.from_mesh(ob.data)
if refresh:
obm.verts.ensure_lookup_table()
obm.edges.ensure_lookup_table()
obm.faces.ensure_lookup_table()
return obm
# universal ---------------
def get_proxy_co_mods(ob, co=None, proxy=None, return_proxy=False, types=["SOLIDIFY"]):
"""Gets co with modifiers like cloth exculding mods in types"""
mods = [m for m in ob.modifiers]
views = [m.show_viewport for m in mods]
for m in mods:
if m.type in types:
m.show_viewport = False
if proxy is None:
dg = bpy.context.evaluated_depsgraph_get()
prox = ob.evaluated_get(dg)
proxy = prox.to_mesh()
if co is None:
vc = len(proxy.vertices)
co =
|
np.empty((vc, 3), dtype=np.float32)
|
numpy.empty
|
from idpy.LBM.SCThermo import ShanChanEquilibriumCache, ShanChen
from idpy.LBM.LBM import XIStencils, FStencils, NPT, LBMTypes
from idpy.LBM.LBM import ShanChenMultiPhase
from idpy.LBM.LBM import CheckUConvergence, CheckCenterOfMassDeltaPConvergence
from idpy.LBM.LBM import PosFromIndex
from idpy.IdpyCode import GetTenet, GetParamsClean, CheckOCLFP
from idpy.IdpyCode import CUDA_T, OCL_T, idpy_langs_sys
from idpy.IdpyCode.IdpySims import IdpySims
import sympy as sp
import numpy as np
from pathlib import Path
import os, h5py
from functools import reduce
from collections import defaultdict
from scipy import interpolate, optimize
'''
Temporary Thermodynamic Variables
'''
n, eps = sp.symbols('n \varepsilon')
_eps_f = sp.Rational(10, 31)
TLPsis = [sp.exp(-1/n), 1 - sp.exp(-n),
((eps/n + 1) ** (-1 / eps)).subs(eps, _eps_f)]
TLPsiCodes = {TLPsis[0]: 'exp((NType)(-1./ln))',
TLPsis[1]: '1. - exp(-(NType)ln)',
TLPsis[2]: ('pow(((NType)ln/(' + str(float(_eps_f)) + ' + (NType)ln)), '
+ str(float(1/_eps_f)) + ')')}
def AddPosPeriodic(a, b, dim_sizes):
_swap_add = tuple(map(lambda x, y: x + y, a, b))
_swap_add = tuple(map(lambda x, y: x + y, _swap_add, dim_sizes))
_swap_add = tuple(map(lambda x, y: x % y, _swap_add, dim_sizes))
return _swap_add
def PosNorm2(pos):
return reduce(lambda x, y: x + y, map(lambda x: x ** 2, pos))
class LatticePressureTensor:
'''
Ideally, I would need to pass the whole stencil and select the correct lengths
'''
def __init__(self, n_field = None, f_stencil = None, psi_sym = None, G = None):
if n_field is None:
raise Exception("Parameter n_field must not be None")
if f_stencil is None:
raise Exception("Parameter f_stencil must not be None")
if psi_sym is None:
raise Exception("Parameter psi_sym must not be None")
if G is None:
raise Exception("Parameter G must not be None")
self.n_field, self.f_stencil, self.psi_sym, self.G = n_field, f_stencil, psi_sym, G
self.psi_f = sp.lambdify(n, self.psi_sym)
'''
Geometric constants
'''
self.dim = len(self.n_field.shape)
self.dim_sizes = self.n_field.shape
self.dim_strides = np.array([reduce(lambda x, y: x*y, self.dim_sizes[0:i+1])
for i in range(len(self.dim_sizes) - 1)],
dtype = NPT.C[LBMTypes['SType']])
self.V = reduce(lambda x, y: x * y, self.dim_sizes)
'''
Finding the square-lengths
'''
self.l2_list = []
for _e in self.f_stencil['Es']:
_norm2 = PosNorm2(_e)
if _norm2 not in self.l2_list:
self.l2_list += [_norm2]
self.l2_list = np.array(self.l2_list, dtype = np.int32)
'''
Init the basis vectors dictionary
'''
self.e_l2 = {}
for l2 in self.l2_list:
self.e_l2[l2] = []
for _e in self.f_stencil['Es']:
_norm2 = PosNorm2(_e)
self.e_l2[_norm2] += [_e]
'''
Finding the weights
'''
self.w_list, _w_i = {}, 0
_, _swap_idx = np.unique(self.f_stencil['Ws'], return_index = True)
for l2 in self.l2_list:
self.w_list[l2] = np.array(self.f_stencil['Ws'])[np.sort(_swap_idx)][_w_i]
_w_i += 1
'''
Index the lattice pressure tensor contrbutions as a function of the suqare lengths
'''
self.pt_groups_f = {1: self.PTLen1, 2: self.PTLen2}
def PTLen1(self, _pos, _l_psi, l2):
if l2 != 1:
raise Exception("Parameter l2 must be equal to 1!")
for _ea in self.e_l2[1]:
#print(_ea)
'''
Neighbors
'''
_n_ea = tuple(AddPosPeriodic(_pos, np.flip(_ea), self.dim_sizes))
#print(_pos, np.flip(_ea), _n_ea, self.w_list[1])
_n_psi = self.psi_field[_n_ea]
_swap_lpt = self.G * self.w_list[1] * _l_psi * _n_psi / 2
for i in range(self.dim):
for j in range(i, self.dim):
_lpt_index = (j - i) + self.lpt_dim_strides[i]
##print(j - i, i, self.lpt_dim_strides[i], _lpt_index)
self.LPT[(_lpt_index,) + _pos] += _swap_lpt * _ea[i] * _ea[j]
def PTLen2(self, _pos, _l_psi, l2):
if l2 != 2:
raise Exception("Parameter l2 must be equal to 2!")
for _ea in self.e_l2[2]:
#print(_ea)
'''
Neighbors
'''
_n_ea = tuple(AddPosPeriodic(_pos, np.flip(_ea), self.dim_sizes))
#print(_pos, np.flip(_ea), _n_ea, self.w_list[2])
_n_psi = self.psi_field[_n_ea]
_swap_lpt = self.G * self.w_list[2] * _l_psi * _n_psi / 2
for i in range(self.dim):
for j in range(i, self.dim):
_lpt_index = (j - i) + self.lpt_dim_strides[i]
self.LPT[(_lpt_index,) + _pos] += _swap_lpt * _ea[i] * _ea[j]
def GetLPT(self):
self.psi_field = self.psi_f(self.n_field)
self.n_lpt = self.dim * (self.dim + 1)//2
self.lpt_dim_sizes = [self.dim - i for i in range(self.dim - 1)]
self.lpt_dim_strides = np.array([0] + [reduce(lambda x, y: x + y, self.lpt_dim_sizes[0:i+1])
for i in range(len(self.lpt_dim_sizes))],
dtype = np.int32)
self.LPT = np.zeros([self.n_lpt] + list(self.dim_sizes))
for _pos_i in range(self.V):
_pos = PosFromIndex(_pos_i, self.dim_strides)
_l_psi = self.psi_field[_pos]
for l2 in self.l2_list:
self.pt_groups_f[l2](_pos, _l_psi, l2)
#sbreak
'''
End of interaction pressure tensor
Adding ideal contribution on diagonal
'''
for i in range(self.dim):
_lpt_index = self.lpt_dim_strides[i]
self.LPT[_lpt_index, :, :] += self.n_field/3
return self.LPT
class SurfaceOfTension:
def __init__(self, n_field = None, f_stencil = None, psi_sym = None, G = None):
if n_field is None:
raise Exception("Parameter n_field must not be None")
if f_stencil is None:
raise Exception("Parameter f_stencil must not be None")
if psi_sym is None:
raise Exception("Parameter psi_sym must not be None")
if G is None:
raise Exception("Parameter G must not be None")
self.n_field, self.f_stencil, self.psi_sym, self.G = \
n_field, f_stencil, psi_sym, G
self.dim = len(self.n_field.shape)
self.dim_center = np.array(list(map(lambda x: x//2, self.n_field.shape)))
self.dim_sizes = self.n_field.shape
self.psi_f = sp.lambdify(n, self.psi_sym)
'''
Preparing common variables
'''
_LPT_class = LatticePressureTensor(self.n_field, self.f_stencil, self.psi_sym, self.G)
self.LPT = _LPT_class.GetLPT()
self.r_range = np.arange(self.dim_sizes[2] - self.dim_center[2])
self.radial_n = self.LPT[0, self.dim_center[0], self.dim_center[1], self.dim_center[2]:]
self.radial_t = self.LPT[3, self.dim_center[0], self.dim_center[1], self.dim_center[2]:]
self.radial_profile = self.n_field[self.dim_center[0],
self.dim_center[1], self.dim_center[2]:]
def GetSurfaceTension(self, grains_fine = 2 ** 10, cutoff = 2 ** 7):
self.r_fine = np.linspace(self.r_range[0], self.r_range[-1], grains_fine)
self.radial_t_spl = \
interpolate.UnivariateSpline(self.r_range, self.radial_t, k = 5, s = 0)
self.radial_n_spl = \
interpolate.UnivariateSpline(self.r_range, self.radial_n, k = 5, s = 0)
'''
Rowlinson: 4.217
'''
def st(R):
_p_jump = \
(self.radial_n[0] - (self.radial_n[0] - self.radial_n[-1]) *
np.heaviside(self.r_fine - R, 1))
_swap_spl = \
interpolate.UnivariateSpline(self.r_fine,
(self.r_fine ** 2) *
(_p_jump - self.radial_t_spl(self.r_fine)),
k = 5, s = 0)
return _swap_spl.integral(self.r_fine[0], self.r_fine[-1]) / (R ** 2)
_swap_st = np.array([st(rr) for rr in self.r_fine[1:]])
_swap_st_spl = interpolate.UnivariateSpline(self.r_fine[1:], _swap_st, k = 5, s = 0)
_swap_rs = optimize.newton(_swap_st_spl.derivative(), x0 = 0)
_swap_smin = _swap_st_spl(_swap_rs)
return {'sigma_4.217': _swap_smin, 'Rs_4.217': _swap_rs,
'st_spl_4.217': _swap_st_spl, 'r_fine_4.217': self.r_fine[1:]}
class EquimolarRadius:
def __init__(self, mass = None, n_in_n_out = None, dim_sizes = None):
if mass is None:
raise Exception("Paramtere mass must not be None")
if n_in_n_out is None:
raise Exception("Parameter n_in_n_out must not be None")
if dim_sizes is None:
raise Exception("Parameter dim_sizes must not be None")
self.mass, self.n_in_n_out, self.dim_sizes = mass, n_in_n_out, dim_sizes
self.V = reduce(lambda x, y: x * y, self.dim_sizes)
def GetEquimolarRadius(self):
_r_swap = \
((3 / (4 * np.pi)) * (self.mass - self.n_in_n_out[1] * self.V)
/ (self.n_in_n_out[0] - self.n_in_n_out[1]))
return {'Re': _r_swap ** (1 / 3)}
class TolmanSimulations:
def __init__(self, *args, **kwargs):
self.InitClass(*args, **kwargs)
self.DumpName()
'''
Check if dump exists
'''
self.is_there_dump = os.path.isfile(self.dump_name)
if self.is_there_dump:
self.full_kwargs = {**self.full_kwargs, **{'empty_sim': True}}
self.mp_sim = ShanChenMultiPhase(**self.full_kwargs)
'''
Get/Compute Equilibrium values from/and store in cache
'''
_sc_eq_cache = \
ShanChanEquilibriumCache(stencil = self.params_dict['EqStencil'],
psi_f = self.params_dict['psi_sym'],
G = self.params_dict['SC_G'],
c2 = self.params_dict['xi_stencil']['c2'])
self.eq_params = _sc_eq_cache.GetFromCache()
def End(self):
self.mp_sim.End()
del self.mp_sim
def GetDensityField(self):
_swap_class_name = self.mp_sim.__class__.__name__
if self.is_there_dump:
_n_swap = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/idpy_memory/n')
else:
_n_swap = \
self.mp_sim.sims_idpy_memory['n'].D2H()
_n_swap = _n_swap.reshape(np.flip(self.mp_sim.sims_vars['dim_sizes']))
return _n_swap
def GetDensityStrip(self, direction = 0):
_n_swap = self.GetDensityField()
_dim_center = self.mp_sim.sims_vars['dim_center']
'''
I will need to get a strip that is as thick as the largest forcing vector(y) (x2)
'''
_delta = 1
if len(self.params_dict['dim_sizes']) == 2:
_n_swap = _n_swap[_dim_center[1] - _delta:_dim_center[1] + _delta + 1,:]
if len(self.params_dict['dim_sizes']) == 3:
_n_swap = _n_swap[_dim_center[2] - _delta:_dim_center[2] + _delta + 1,
_dim_center[1] - _delta:_dim_center[1] + _delta + 1,:]
return _n_swap
def GetDataEquimolar(self):
_swap_class_name = self.mp_sim.__class__.__name__
if self.is_there_dump:
_mass_swap = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/mass')
_n_in_n_out = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/n_in_n_out')
_dim_sizes = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/dim_sizes')
else:
_mass_swap = \
self.mp_sim.sims_vars['mass']
_n_in_n_out = \
self.mp_sim.sims_vars['n_in_n_out']
_dim_sizes = \
self.mp_sim.sims_vars['dim_sizes']
_output = {'mass': _mass_swap, 'n_in_n_out': _n_in_n_out, 'dim_sizes': _dim_sizes}
return _output
def GetDataEquimolarIntegral(self):
return {'n_field': self.GetDensityStrip(),
'n_in': (self.eq_params['n_l']
if self.params_dict['full_flag'] else
self.eq_params['n_g']),
'n_out': (self.eq_params['n_g']
if self.params_dict['full_flag'] else
self.eq_params['n_l'])}
def GetDataDeltaP(self):
_swap_class_name = self.mp_sim.__class__.__name__
if self.is_there_dump:
_swap_delta_p = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/delta_p')
else:
_swap_delta_p = self.mp_sim.sims_vars['delta_p']
_output = {'delta_p': _swap_delta_p[-1]}
return _output
def GetDataSurfaceOfTension(self):
_output = {'n_field': self.GetDensityStrip(),
'f_stencil': self.params_dict['force_stencil'],
'psi_sym': self.params_dict['psi_sym'],
'G': self.params_dict['SC_G']}
return _output
def Simulate(self):
if not self.is_there_dump:
'''
Perform Simulation
'''
self.mp_sim.InitRadialInterface(n_g = self.eq_params['n_g'],
n_l = self.eq_params['n_l'],
R = self.params_dict['R'],
full_flag = self.params_dict['full_flag'])
self.mp_sim.MainLoop(range(0, self.params_dict['max_steps'],
self.params_dict['delta_step']),
convergence_functions = [CheckUConvergence,
CheckCenterOfMassDeltaPConvergence])
'''
Check if bubble/droplet burested
'''
if abs(self.mp_sim.sims_vars['delta_p'][-1]) < 1e-9:
print("The", self.params_dict['type'], "has bursted! Dumping Empty simulation")
'''
Writing empty simulation file
'''
self.mp_sim.sims_dump_idpy_memory_flag = False
self.mp_sim.sims_vars['empty'] = 'burst'
self.mp_sim.DumpSnapshot(file_name = self.dump_name,
custom_types = self.mp_sim.custom_types)
return 'burst'
elif not self.mp_sim.sims_vars['is_centered_seq'][-1]:
print("The", self.params_dict['type'], "is not centered! Dumping Empty simulation")
'''
Writing empty simulation file
'''
self.mp_sim.sims_dump_idpy_memory_flag = False
self.mp_sim.sims_vars['empty'] = 'center'
self.mp_sim.DumpSnapshot(file_name = self.dump_name,
custom_types = self.mp_sim.custom_types)
return 'center'
else:
print("Dumping in", self.dump_name)
self.mp_sim.sims_dump_idpy_memory += ['n']
self.mp_sim.DumpSnapshot(file_name = self.dump_name,
custom_types = self.mp_sim.custom_types)
return True
else:
print("Dump file", self.dump_name, "already exists!")
_swap_class_name = self.mp_sim.__class__.__name__
if self.mp_sim.CheckSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/empty'):
_swap_val = \
self.mp_sim.ReadSnapshotData(file_name = self.dump_name,
full_key =
_swap_class_name + '/vars/empty')
_swap_val = np.array(_swap_val, dtype='<U10')
print("Empty simulation! Value:", _swap_val)
return _swap_val
else:
return False
def DumpName(self):
_unique_ws = str(np.unique(self.params_dict['f_stencil']['Ws']))
self.dump_name = \
(self.__class__.__name__ + '_'
+ str(self.params_dict['dim_sizes']) + '_'
+ 'R' + str(self.params_dict['R']) + '_'
+ str(self.params_dict['type']) + '_'
+ 'G' + str(self.params_dict['SC_G']) + '_'
+ 'psi_' + str(self.params_dict['psi_sym']) + '_'
+ 'ews_' + _unique_ws)
self.dump_name = self.dump_name.replace("[","_").replace("]","").replace(" ", "_")
self.dump_name = self.dump_name.replace("/", "_").replace(".","p").replace(",","")
self.dump_name = self.dump_name.replace("\n", "").replace("(", "").replace(")", "")
self.dump_name = self.params_dict['data_dir'] / (self.dump_name + '.hdf5')
print(self.dump_name)
def InitClass(self, *args, **kwargs):
self.needed_params = ['psi_sym', 'R', 'type', 'EqStencil',
'force_stencil', 'max_steps', 'data_dir']
self.needed_params_mp = ['dim_sizes', 'xi_stencil', 'f_stencil',
'f_stencil', 'psi_code', 'SC_G',
'tau', 'optimizer_flag', 'e2_val',
'lang', 'cl_kind', 'device']
if not hasattr(self, 'params_dict'):
self.params_dict = {}
self.kwargs = GetParamsClean(kwargs, [self.params_dict],
needed_params = self.needed_params + self.needed_params_mp)
if 'max_steps' not in self.params_dict:
self.params_dict['max_steps'] = 2 ** 22
'''
Merging the dictionaries for passthrough
'''
self.params_dict['xi_stencil'] = XIStencils['D3Q19']
self.params_dict['delta_step'] = 2 ** 11
if 'data_dir' not in self.params_dict:
self.params_dict['data_dir'] = Path('data/three-dimensions')
self.params_dict['psi_code'] = TLPsiCodes[self.params_dict['psi_sym']]
if 'EqStencil' not in self.params_dict:
self.params_dict['f_stencil'] = self.params_dict['force_stencil'].PushStencil()
self.params_dict['EqStencil'] = self.params_dict['force_stencil']
else:
self.params_dict['f_stencil'] = self.params_dict['force_stencil']
self.params_dict['full_flag'] = \
True if self.params_dict['type'] == 'droplet' else False
self.full_kwargs = {**self.kwargs, **self.params_dict}
class FlatAnalysis:
def __init__(self, n_field = None, f_stencil = None, psi_sym = None, G = None):
if n_field is None:
raise Exception("Parameter n_field must not be None")
if f_stencil is None:
raise Exception("Parameter f_stencil must not be None")
if psi_sym is None:
raise Exception("Parameter psi_sym must not be None")
if G is None:
raise Exception("Parameter G must not be None")
self.n_field, self.f_stencil, self.psi_sym, self.G = \
n_field, f_stencil, psi_sym, G
self.dim_sizes = self.n_field.shape
self.dim_center = np.array(list(map(lambda x: x//2, self.dim_sizes)))
self.dim = len(self.dim_sizes)
if self.dim == 2:
self.n_line = self.n_field[self.dim_center[0], self.dim_center[1]:]
self.z_range =
|
np.arange(self.dim_sizes[1] - self.dim_center[1])
|
numpy.arange
|
import statsmodels.api as sm
import numpy as np
y = [1,2,3,4,3,4,5,3,5,5,4,5,4,5,4,5,6,0,6,3,1,3,1]
X = [[0,2,4,1,5,4,5,9,9,9,3,7,8,8,6,6,5,5,5,6,6,5,5],
[4,1,2,3,4,5,6,7,5,8,7,8,7,8,7,8,6,8,9,2,1,5,6],
[4,1,2,5,6,7,8,9,7,8,7,8,7,4,3,1,2,3,4,1,3,9,7]]
def reg_m(y, x):
ones = np.ones(len(x[0]))
X = sm.add_constant(np.column_stack((x[0], ones)))
for ele in x[1:]:
X = sm.add_constant(
|
np.column_stack((ele, X))
|
numpy.column_stack
|
from hddm.simulators import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import seaborn as sns
import pymc as pm
import os
import warnings
import hddm
import pandas as pd
from kabuki.analyze import _post_pred_generate, _parents_to_random_posterior_sample
from statsmodels.distributions.empirical_distribution import ECDF
from hddm.model_config import model_config
# Basic utility
def prettier_tag(tag):
len_tag = len(tag)
if len_tag == 1:
return tag[0]
else:
return "(" + ", ".join([str(t) for t in tag]) + ")"
# Plot Composer Functions
def plot_posterior_pair(
model,
plot_func=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
samples=100,
parameter_recovery_mode=False,
**kwargs
):
"""Generate posterior pair plots for each observed node.
Arguments:
model: kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
Optional:
samples: int <default=10>
How many posterior samples to use.
columns: int <default=3>
How many columns to use for plotting the subjects.
bins: int <default=100>
How many bins to compute the data histogram over.
figsize: (int, int) <default=(8, 6)>
save: bool <default=False>
Whether to save the figure to a file.
path: str <default=None>
Save figure into directory prefix
format: str or list of strings <default='png'>
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func: function <default=_plot_posterior_pdf_node>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_vanilla"
if plot_func is None:
plot_func = _plot_func_pair
observeds = model.get_observeds()
kwargs["figsize"] = figsize
kwargs["n_samples"] = samples
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
# Plot individual subjects (if present)
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
if "subj_idx" in bottom_node:
if str(node_name) == "wfpt":
kwargs["title"] = str(subj_i)
else:
kwargs["title"] = str(node_name)
if parameter_recovery_mode:
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
g = plot_func(bottom_node["node"], **kwargs)
plt.show()
# Save figure if necessary
if save:
print("passing_print")
if len(tag) == 0:
fname = "ppq_subject_" + str(subj_i)
else:
fname = "ppq_" + ".".join(tag) + "_subject_" + str(subj_i)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
print(["%s.%s" % (os.path.join(path, fname), x) for x in format])
[
g.fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
def plot_from_data(
df,
generative_model="ddm_vanilla",
plot_func=None,
columns=None,
save=False,
path=None,
groupby="subj_idx",
figsize=(8, 6),
format="png",
**kwargs
):
"""Plot data from a hddm ready DataFrame.
Arguments:
df : pd.DataFrame
HDDM ready dataframe.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
kwargs["model_"] = generative_model
title_ = kwargs.pop("title", "")
ax_title_size = kwargs.pop("ax_title_fontsize", 10)
if type(groupby) == str:
groupby = [groupby]
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in df.groupby(groupby).groups.items()])
columns = min(3, max_items)
n_plots = len(df.groupby(groupby))
# Plot different conditions (new figure for each)
fig = plt.figure(figsize=figsize)
fig.suptitle(title_, fontsize=12)
fig.subplots_adjust(top=0.9, hspace=0.4, wspace=0.3)
i = 1
for group_id, df_tmp in df.groupby(groupby):
nrows = np.ceil(n_plots / columns)
# Plot individual subjects (if present)
ax = fig.add_subplot(np.ceil(nrows), columns, i)
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
# Make axis title
tag = ""
for j in range(len(groupby)):
tag += groupby[j] + "(" + str(group_id[j]) + ")"
if j < (len(groupby) - 1):
tag += "_"
print(tag)
ax.set_title(tag, fontsize=ax_title_size)
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(df_tmp, ax, **kwargs)
i += 1
# Save figure if necessary
if save:
fname = "ppq_" + tag
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
def plot_posterior_predictive(
model,
plot_func=None,
required_method="pdf",
columns=None,
save=False,
path=None,
figsize=(8, 6),
format="png",
num_subjs=None,
parameter_recovery_mode=False,
**kwargs
):
"""Plot the posterior predictive distribution of a kabuki hierarchical model.
Arguments:
model : kabuki.Hierarchical
The (constructed and sampled) kabuki hierarchical model to
create the posterior preditive from.
value_range : numpy.ndarray
Array to evaluate the likelihood over.
Optional:
samples : int <default=10>
How many posterior samples to generate the posterior predictive over.
columns : int <default=3>
How many columns to use for plotting the subjects.
bins : int <default=100>
How many bins to compute the data histogram over.
figsize : (int, int) <default=(8, 6)>
save : bool <default=False>
Whether to save the figure to a file.
path : str <default=None>
Save figure into directory prefix
format : str or list of strings
Save figure to a image file of type 'format'. If more then one format is
given, multiple files are created
parameter_recovery_mode: bool <default=False>
If the data attached to the model supplied under the model argument
has the format expected of the simulator_h_c() function from the simulators.hddm_dataset_generators
module, then parameter_recovery_mode = True can be use to supply ground truth parameterizations to the
plot_func argument describes below.
plot_func : function <default=_plot_func_posterior_pdf_node_nn>
Plotting function to use for each observed node
(see default function for an example).
Note:
This function changes the current value and logp of the nodes.
"""
if hasattr(model, "model"):
kwargs["model_"] = model.model
else:
kwargs["model_"] = "ddm_vanilla"
if plot_func is None:
plot_func = _plot_func_posterior_pdf_node_nn
observeds = model.get_observeds()
if columns is None:
# If there are less than 3 items to plot per figure,
# only use as many columns as there are items.
max_items = max([len(i[1]) for i in observeds.groupby("tag").groups.items()])
columns = min(3, max_items)
# Plot different conditions (new figure for each)
for tag, nodes in observeds.groupby("tag"):
fig = plt.figure(figsize=figsize) # prev utils.pretty_tag
fig.suptitle(prettier_tag(tag), fontsize=12)
fig.subplots_adjust(top=0.85, hspace=0.4, wspace=0.3)
nrows = num_subjs or np.ceil(len(nodes) / columns)
if len(nodes) - (int(nrows) * columns) > 0:
nrows += 1
# Plot individual subjects (if present)
i = 0
for subj_i, (node_name, bottom_node) in enumerate(nodes.iterrows()):
i += 1
if not hasattr(bottom_node["node"], required_method):
continue # skip nodes that do not define the required_method
ax = fig.add_subplot(np.ceil(nrows), columns, subj_i + 1)
if "subj_idx" in bottom_node:
ax.set_title(str(bottom_node["subj_idx"]))
# Allow kwargs to pass to the plot_func, whether this is the first plot
# (useful to generate legends only for the first subplot)
if i == 1:
kwargs["add_legend"] = True
else:
kwargs["add_legend"] = False
if parameter_recovery_mode:
kwargs["parameter_recovery_mode"] = True
kwargs["node_data"] = model.data.loc[bottom_node["node"].value.index]
# Call plot function on ax
# This function should manipulate the ax object, and is expected to not return anything.
plot_func(bottom_node["node"], ax, **kwargs)
if i > (np.ceil(nrows) * columns):
warnings.warn("Too many nodes. Consider increasing number of columns.")
break
if num_subjs is not None and i >= num_subjs:
break
# Save figure if necessary
if save:
fname = "ppq_" + ".".join(tag)
if path is None:
path = "."
if isinstance(format, str):
format = [format]
[
fig.savefig("%s.%s" % (os.path.join(path, fname), x), format=x)
for x in format
]
# AXIS MANIPULATORS ---------------
def _plot_func_posterior_pdf_node_nn(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.2,
plot_likelihood_raw=False,
**kwargs
):
"""Calculate posterior predictives from raw likelihood values and plot it on top of a histogram of the real data.
The function does not define a figure, but manipulates an axis object.
Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
Optional:
model : str <default='ddm_vanilla'>
str that defines the generative model underlying the kabuki model from which the bottom_node
argument derives.
samples : int <default=10>
Number of posterior samples to use.
bin_size: float <default=0.2>
Size of bins for the data histogram.
plot_likelihood_raw : bool <default=False>
Whether or not to plot likelihoods sample wise.
add_legend : bool <default=True>
Whether or not to add a legend to the plot
linewidth : float <default=0.5>
Linewidth of histogram outlines.
"""
# Setup -----
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
model_ = kwargs.pop("model_", "ddm_vanilla")
add_legend = kwargs.pop("add_legend", True)
alpha_line = kwargs.pop("alpha", 0.05)
lw_ = kwargs.pop("linewidth", 0.5)
choices = model_config[model_]["choices"]
n_choices = model_config[model_]["n_choices"]
bins = np.arange(value_range[0], value_range[-1], bin_size)
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if n_choices == 2:
like = np.empty((samples, len(value_range)), dtype=np.float32)
pdf_in = value_range
else:
like = np.empty((samples, len(value_range), n_choices), dtype=np.float32)
pdf_in = np.zeros((len(value_range), 2))
pdf_in[:, 0] = value_range
# -----
# Get posterior parameters and plot corresponding likelihoods (if desired) ---
for sample in range(samples):
# Get random posterior sample
_parents_to_random_posterior_sample(bottom_node)
# Generate likelihood for parents parameters
if n_choices == 2:
like[sample, :] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
axis.plot(
value_range,
like[sample, :],
color="black",
lw=1.0,
alpha=alpha_line,
)
else:
c_cnt = 0
for choice in choices:
pdf_in[:, 1] = choice
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
if plot_likelihood_raw:
like[sample, :, c_cnt] = bottom_node.pdf(pdf_in)
axis.plot(
pdf_in[:, 0],
like[sample, :, c_cnt],
color=color_dict[choice],
lw=1.0,
alpha=alpha_line,
)
c_cnt += 1
# -------
# If we don't plot raw likelihoods, we generate a mean likelihood from the samples above
# and plot it as a line with uncertainty bars
if not plot_likelihood_raw:
y = like.mean(axis=0)
try:
y_std = like.std(axis=0)
except FloatingPointError:
print(
"WARNING! %s threw FloatingPointError over std computation. Setting to 0 and continuing."
% bottom_node.__name__
)
y_std = np.zeros_like(y)
if n_choices == 2:
axis.plot(value_range, y, label="post pred", color="black")
axis.fill_between(
value_range, y - y_std, y + y_std, color="black", alpha=0.5
)
else:
c_cnt = 0
for choice in choices:
axis.plot(
value_range,
y[:, c_cnt],
label="post pred",
color=color_dict[choice],
)
axis.fill_between(
value_range,
y[:, c_cnt] - y_std[:, c_cnt],
y[:, c_cnt] + y_std[:, c_cnt],
color=color_dict[choice],
alpha=0.5,
)
c_cnt += 1
# Plot data
if len(bottom_node.value) != 0:
if n_choices == 2:
rt_dat = bottom_node.value.copy()
if np.sum(rt_dat.rt < 0) == 0:
rt_dat.loc[rt_dat.response != 1, "rt"] = (-1) * rt_dat.rt[
rt_dat.response != 1
].values
axis.hist(
rt_dat.rt.values,
density=True,
color="blue",
label="data",
bins=bins,
linestyle="-",
histtype="step",
lw=lw_,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / bottom_node.value.shape[0],
reps=bottom_node.value[bottom_node.value.response == choice].shape[
0
],
)
if np.sum(bottom_node.value.response == choice) > 0:
axis.hist(
bottom_node.value.rt[bottom_node.value.response == choice],
bins=np.arange(value_range[0], value_range[-1], bin_size),
weights=weights,
color=color_dict[choice],
label="data",
linestyle="dashed",
histtype="step",
lw=lw_,
)
axis.set_ylim(bottom=0) # Likelihood and histogram can only be positive
# Add a custom legend
if add_legend:
# If two choices only --> show data in blue, posterior samples in black
if n_choices == 2:
custom_elems = []
custom_titles = []
custom_elems.append(Line2D([0], [0], color="blue", lw=1.0, linestyle="-"))
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
# If more than two choices --> more styling
else:
custom_elems = [
Line2D([0], [0], color=color_dict[choice], lw=1) for choice in choices
]
custom_titles = ["response: " + str(choice) for choice in choices]
custom_elems.append(
Line2D([0], [0], color="black", lw=1.0, linestyle="dashed")
)
custom_elems.append(Line2D([0], [0], color="black", lw=1.0, linestyle="-"))
custom_titles.append("Data")
custom_titles.append("Posterior")
axis.legend(custom_elems, custom_titles, loc="upper right")
def _plot_func_posterior_node_from_sim(
bottom_node,
axis,
value_range=None,
samples=10,
bin_size=0.1,
add_posterior_uncertainty=True,
add_posterior_mean=True,
**kwargs
):
"""Calculate posterior predictive for a certain bottom node and plot a histogram using the supplied axis element.
:Arguments:
bottom_node : pymc.stochastic
Bottom node to compute posterior over.
axis : matplotlib.axis
Axis to plot into.
value_range : numpy.ndarray
Range over which to evaluate the likelihood.
:Optional:
samples : int (default=10)
Number of posterior samples to use.
bin_size : int (default=100)
Number of bins to compute histogram over.
add_posterior_uncertainty: bool (default=True)
Plot individual posterior samples or not.
add_posterior_mean: bool (default=True)
Whether to add a mean posterior (histogram from a dataset collapsed across posterior samples)
alpha: float (default=0.05)
alpha (transparency) level for plot elements from single posterior samples.
linewidth: float (default=0.5)
linewidth used for histograms
add_legend: bool (default=True)
whether or not to add a legend to the current axis.
legend_loc: str <default='upper right'>
string defining legend position. Find the rest of the options in the matplotlib documentation.
legend_shadow: bool <default=True>
Add shadow to legend box?
legend_fontsize: float <default=12>
Fontsize of legend.
model_: str (default='lca_no_bias_4')
string that the defines generative models used (e.g. 'ddm', 'ornstein' etc.).
"""
color_dict = {
-1: "black",
0: "black",
1: "green",
2: "blue",
3: "red",
4: "orange",
5: "purple",
6: "brown",
}
if value_range is None:
# Infer from data by finding the min and max from the nodes
raise NotImplementedError("value_range keyword argument must be supplied.")
if len(value_range) == 1:
value_range = (-value_range[0], value_range[0])
else:
value_range = (value_range[0], value_range[-1])
# Extract some parameters from kwargs
bins = np.arange(value_range[0], value_range[1], bin_size)
# add_uc = kwargs.pop('add_posterior_uncertainty', True)
# add_mean = kwargs.pop('add_posterior_mean', True)
sample_hist_alpha = kwargs.pop("alpha", 0.05)
lw_ = kwargs.pop("linewidth", 0.5)
add_legend = kwargs.pop("add_legend", True)
model_ = kwargs.pop("model_", "lca_no_bias_4")
choices = model_config[model_]["choices"]
n_choices = model_config[model_]["n_choices"]
legend_loc = kwargs.pop("legend_loc", "upper right")
legend_fs = kwargs.pop("legend_fontsize", 12)
legend_shadow = kwargs.pop("legend_shadow", True)
# like = np.empty((samples, len(value_range)), dtype=np.float32)
if type(bottom_node) == pd.DataFrame:
samples = None
data_tmp = bottom_node
data_only = 1
else:
samples = _post_pred_generate(
bottom_node,
samples=samples,
data=None,
append_data=False,
add_model_parameters=False,
)
data_tmp = bottom_node.value
data_only = 0
# Go sample by sample (to show uncertainty)
if add_posterior_uncertainty and not data_only:
for sample in samples:
if n_choices == 2:
if np.sum(sample.rt < 0) == 0:
sample.loc[sample.response != 1, "rt"] = (-1) * sample.rt[
sample.response != 1
].values
axis.hist(
sample.rt,
bins=bins,
density=True,
color="black",
label="posterior",
histtype="step",
lw=lw_,
alpha=sample_hist_alpha,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / sample.shape[0],
reps=sample.loc[sample.response == choice, :].shape[0],
)
axis.hist(
sample.rt[sample.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="posterior",
histtype="step",
lw=lw_,
alpha=sample_hist_alpha,
)
# Add a 'mean' line
if add_posterior_mean and not data_only:
concat_data = pd.concat(samples)
if n_choices == 2:
if np.sum(concat_data.rt < 0) == 0:
concat_data.loc[concat_data.response != 1, "rt"] = (
-1
) * concat_data.rt[concat_data.response != 1].values
axis.hist(
concat_data.rt,
bins=bins,
density=True,
color="black",
label="posterior",
histtype="step",
lw=lw_,
alpha=1.0,
)
else:
for choice in choices:
weights = np.tile(
(1 / bin_size) / concat_data.shape[0],
reps=concat_data.loc[concat_data.response == choice, :].shape[0],
)
axis.hist(
concat_data.rt[concat_data.response == choice],
bins=bins,
weights=weights,
color=color_dict[choice],
label="posterior",
histtype="step",
lw=1.0,
alpha=lw_,
)
# Plot data
if len(data_tmp) != 0:
if n_choices == 2:
rt_dat = data_tmp.copy()
if
|
np.sum(rt_dat.rt < 0)
|
numpy.sum
|
#!/usr/bin/env python
#
# Created by: <NAME>, March 2002
#
""" Test functions for linalg.basic module
"""
from __future__ import division, print_function, absolute_import
"""
Bugs:
1) solve.check_random_sym_complex fails if a is complex
and transpose(a) = conjugate(a) (a is Hermitian).
"""
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_basic.py
"""
import numpy as np
from numpy import arange, array, dot, zeros, identity, conjugate, transpose, \
float32
import numpy.linalg as linalg
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,\
solve_banded, solveh_banded, solve_triangular
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestSolveBanded(TestCase):
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0,1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1,4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
class TestSolveHBanded(TestCase):
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0, 0.0]).reshape(-1,1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
|
assert_array_almost_equal(x, expected)
|
numpy.testing.assert_array_almost_equal
|
import librosa
import numpy as np
import copy
from numpy.lib.stride_tricks import as_strided
from scipy.fftpack import dct, idct
from scipy.signal import butter, lfilter
import scipy.ndimage
import tensorflow as tf
log = False
# Most of the Spectrograms and Inversion are taken from: https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def overlap(X, window_size, window_step):
"""
Create an overlapped version of X
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
window_step : int
Step size between windows
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError('Window size must be even!')
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def halfoverlap(X, window_size):
"""
Create an overlapped version of X using 50% of window_size as overlap.
Parameters
----------
X : ndarray, shape=(n_samples,)
Input signal to window and overlap
window_size : int
Size of windows to take
Returns
-------
X_strided : shape=(n_windows, window_size)
2D array of overlapped X
"""
if window_size % 2 != 0:
raise ValueError('Window size must be even!')
window_step = window_size // 2
# Make sure there are an even number of windows before stridetricks
append = np.zeros((window_size - len(X) % window_size))
X = np.hstack((X, append))
num_frames = len(X) // window_step - 1
row_stride = X.itemsize * window_step
col_stride = X.itemsize
X_strided = as_strided(X, shape=(num_frames, window_size),
strides=(row_stride, col_stride))
return X_strided
def invert_halfoverlap(X_strided):
"""
Invert ``halfoverlap`` function to reconstruct X
Parameters
----------
X_strided : ndarray, shape=(n_windows, window_size)
X as overlapped windows
Returns
-------
X : ndarray, shape=(n_samples,)
Reconstructed version of X
"""
# Hardcoded 50% overlap! Can generalize later...
n_rows, n_cols = X_strided.shape
X = np.zeros((((int(n_rows // 2) + 1) * n_cols),)).astype(X_strided.dtype)
start_index = 0
end_index = n_cols
window_step = n_cols // 2
for row in range(X_strided.shape[0]):
X[start_index:end_index] += X_strided[row]
start_index += window_step
end_index += window_step
return X
def denoise(spectogram):
denoised = np.copy(spectogram)
print(np.mean(denoised))
print(np.min(denoised))
denoised = np.log1p(denoised)
print(np.mean(denoised))
print(np.min(denoised))
denoised[np.where(denoised < 8)] = 0
denoised = np.expm1(denoised)
print(np.mean(denoised))
print(np.min(denoised))
return denoised
def revert_stft(y, fft_size, num_iter):
if log:
y =
|
np.expm1(y)
|
numpy.expm1
|
import emcee
import numpy
import matplotlib.pyplot as pl
from corner import corner
import corner
from model_functions import forward_model
#from foward_model_for_mcmc import forward_model_old
### Data constraints:
###########################
## Surface Temp
observ_T=numpy.loadtxt('obs_T.txt',delimiter=',')
time_T=observ_T[0,:]
time_T=numpy.array(time_T.astype(int))
obs_T=observ_T[1,:]+273.15
er_T=observ_T[2,:]
#######################
## Deep ocean Temp
observ_Td=numpy.loadtxt('obs_Td.txt',delimiter=',')
time_Td=observ_Td[0,:]#time_CO2
time_Td=numpy.array(time_Td.astype(int))
obs_Td=observ_Td[1,:]+273.15
er_Td=observ_Td[2,:]
##############################
## pCO2 constraints
preinudsmod=280.0
observ_CO2=numpy.loadtxt('obs_CO2.txt',delimiter=',')
time_CO2=observ_CO2[0,:]#time_CO2
time_CO2=numpy.array(time_CO2.astype(int))
obs_CO2=observ_CO2[1,:]/preinudsmod
er_CO2=observ_CO2[2,:]/preinudsmod
########################
# ocean carbonate precipitation
obs_prec_zero=2.3e12 #need spread modifier later
obs_prec_zero=2.35e12 #need spread modifier later
time_prec=99
er_prec_zero=0.64e12 # need spread modifier late
er_prec_zero=0.75e12 # need spread modifier late
########################
## ocean saturation state
#observ_omega=numpy.loadtxt('obs_omega.txt',delimiter=',') #aragonite
observ_omega=numpy.loadtxt('obs_omega_calc.txt',delimiter=',') #calcite
time_omega=observ_omega[0,:]#time_CO2
time_omega=time_omega.astype(int)
obs_omega_o=observ_omega[1,:]
er_omega_o=observ_omega[2,:]
#########################
#pH constraints
observ_pH_saved=numpy.loadtxt('obs_pH.txt',delimiter=',')
time_pH=observ_pH_saved[0,:]#numpy.array([5,15,35,45,55])
time_pH=time_pH.astype(int)
obs_pH=observ_pH_saved[1,:]
er_pH=observ_pH_saved[2,:]
##########################
### This is the likelihood function:
def LnLike(x):
#import pdb
#pdb.set_trace()
### Each range defines the prior for unknown variables that wea are trying to determine
if (x[0] > 0.5 ) or (x[0]<0.2): # exponent for pCO2-dependence continental weeathering (default)
#if (x[0] > 1.0 ) or (x[0]<0.01): # alternative exponent for plant modified weathering
#if (x[0] > 0.045 ) or (x[0]<0.025): # alternative exponent for runoff parameterization
return -numpy.inf ## likelihood is -infinity if parameter is outside prior range
if (x[1] > 50 ) or (x[1]<5): #Te parameter (K)
return -numpy.inf
if (x[2] > 1.2 ) or (x[2] < 0.2): # This is the weatherability parameter, W+1 (so W: -0.8 to 1.2)
return -numpy.inf
if (x[3] > 8.0) or (x[3] < 1.5): # climate sensitivity
return -numpy.inf
if (x[4] > 1.5) or (x[4] < 0.2): ### Volcanic outgassing parameter, V (I think this is V not V+1)
return -numpy.inf
if (x[5] > 1.5) or (x[5] < -0.9): ## Carbonate weatherability parameter
return -numpy.inf
if (x[6] > 10e12) or (x[6] < 4e12): #modern outgassing, Tmol C/yr
return -numpy.inf
if (x[7] > 14e12) or (x[7] < 7e12): #modern carbonate weathering, Tmol C/yr
return -numpy.inf
if (x[8] > 1e6) or (x[8] < 2e4): # Mixing time for pore-space, yrs. Mixing flux will be Mass_ocean / x[8]
return -numpy.inf
if (x[9] > 2.5) or (x[9] < 1.0): # Exponent for carbonate precipitation, dependence on saturation state
return -numpy.inf
if (x[10] > 1.5) or (x[10] < 0.5): # fraction of carbonate precipitation seafloor relative to alkalinity release of seafloor?
return -numpy.inf
if (x[11] > 1.4) or (x[11] < 0.8): # gradient deep oecan temperature to surface temperature
return -numpy.inf
if (x[12] > 0.5) or (x[12] < 0.0): #dissolution exponent for seafloor weathering pH-dependence
return -numpy.inf
if (x[13] > 110000) or (x[13] < 40000): #activation energy seafloor weathering
return -numpy.inf
if (x[14] > 0.6) or (x[14] < 0.4): #pelagic carbonate fraction
return -numpy.inf
if (x[15] > 1.0) or (x[15] < 0.0): #" beta" parameter for outgassing dependence seafloor weathering
return -numpy.inf
if (x[16] > 5.0) or (x[16] < 0.0): # Paleogeography fudge factor
return -numpy.inf
# Now try and run the parameters for the parameters above.
try: #climp,tdep_weath,lfrac,cl_sens,change_out,CWF,F_outgass,F_carbw,tau_mix,n,alt_frac,deep_grad,coef_for_diss,Eact,fpel,beta,PG)
#x0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
[[out0,out1,out2,out3,time,pH_array_o,CO2_array_o,pH_array_p,CO2_array_p,Ca_array_o,Ca_array_p,CO3_array_o,CO3_array_p,HCO3_array_o,HCO3_array_p,omega_o,omega_p,Tsurf_array,Tdeep_array,Fd_array,Fs_array,Precip_ocean_array,Precip_pore_array],imbalance]=forward_model(x[8],x[6],x[9],x[0],x[1],0.45e12,x[10],0.01,x[2],x[3],x[4],x[7],x[14],x[5],x[11],x[12],x[15],x[13],x[16])
#[out0,out1,out2,out3,time,pH_array_o,CO2_array_o,pH_array_p,CO2_array_p,Ca_array_o,Ca_array_p,CO3_array_o,CO3_array_p,HCO3_array_o,HCO3_array_p,omega_o,omega_p,Tsurf_array,Tdeep_array,Fd_array,Fs_array,Precip_ocean_array,Precip_pore_array]=forward_model_old(x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10],x[11],x[12],x[13],x[14],x[15],x[16])
#print(out0[0],out00[0])
#print(pH_array_p[0],pH_array_p0[0])
#print(Fs_array[0],Fs_array0[0])
#print(Fd_array[0],Fd_array0[0])
#import pdb
#pdb.set_trace()
# W,F_outgass, n, CO2_dep,Te,mod_sea,alt_frac,Mp_frac,W_plus_1,cl_sens,change_out,F_carbw,frac_pel,CWF,deep_grad,coef_for_diss,beta,Ebas,PG
# x[8], x[6], x[9],x[0],x[1],0.45e12,x[10], 0.01, x[2], x[3], x[4], x[7], x[14], x[5], x[11], x[12], x[15], x[13], x[16]
except:
return -numpy.inf ## if it fails, throw it away
# dont allow unbounded or stupid solutions
if numpy.isnan(pH_array_p[98]):
return -numpy.inf
if HCO3_array_p[98]<0.0:
return -numpy.inf
if out2[98]<0.0:
return -numpy.inf
spread= ( (x[6]+x[4]*x[6])/x[6] )**x[15] ## Spreading rate at 100 Myr for this particular run
## will be used to calculate model carbonate precipitation in seafloor at 100 Myr below
### Now get outputs from model at the times at which we have data to compare
model_T=Tsurf_array[time_T]
model_Td=Tdeep_array[time_Td]
model_CO2=CO2_array_o[time_CO2]/CO2_array_o[0]
model_prec=numpy.mean(Precip_pore_array[time_prec])
model_omega_o=omega_o[time_omega]
model_pH=pH_array_o[time_pH]
# Also need to use spreading rates from 100 Myr to get "observed" precipitation data
obs_prec=obs_prec_zero*spread
er_prec=er_prec_zero*spread
## Now calculate the log-likelihood i.e. compare data the model outputs
## The larger log_like, the better the fit
log_terms=numpy.sum(numpy.log(2*numpy.pi*er_T**2))+numpy.sum(numpy.log(2*numpy.pi*er_Td**2)) +numpy.sum(numpy.log(2*numpy.pi*er_omega_o**2)) +numpy.log(2*numpy.pi*er_prec**2) +numpy.sum(numpy.log(2*numpy.pi*er_pH**2)) + numpy.sum(numpy.log(2*numpy.pi*er_CO2**2))
log_like = -0.5 * ( numpy.sum((obs_T-model_T)**2/er_T**2) + numpy.sum((obs_Td-model_Td)**2/er_Td**2) + (obs_prec-model_prec)**2/er_prec**2 + numpy.sum((obs_CO2-model_CO2)**2/er_CO2**2) +
numpy.sum((obs_omega_o-model_omega_o)**2/er_omega_o**2) + log_terms + numpy.sum((obs_pH-model_pH)**2/er_pH**2) )
return log_like # return the likelihood value to the program running the inverse analysis
# Set up the sampler
ndim = 17 ### This is the number of parameters we are solving for (x[0], x[1], ... x[16] = 17 parameters)
### Next two parameters control the number of walks and the number of steps to take when solving inverse problem
### nwalk * nsteps = total number of model runs
### To do a full model run, nwalk = 1000 and nsteps = 10000 is appropriate
### For a quick run, nwalk = 100 and nsteps = 1000 will tell you if the code is working.
nwalk = 500
nsteps = 2000
#nwalk = 200
#nsteps = 500
### Important: if you make nsteps < 1000, you will need to modify some of the plotting stuff below
### This is because, ideally, you want to throw out the first 1000 steps, as it takes a while for the Markov
### chains to converge on the posteriors. But if you are just doing a quick test run to see if the code works
### you might have <1000 steps, and so you definitely don't want to throw away your whole sample!
### Define the initial random valules for the 17 variables. This is where the walkers start.
#p0 = numpy.vstack([[0.01+0.99*numpy.random.random() for i in range(nwalk)], #for plant pCO2 dependence 0-1 WEATH1
#p0 = numpy.vstack([[0.025+0.02*numpy.random.random() for i in range(nwalk)], #for runoff no pCo2 dependence WEATH2
p0 = numpy.vstack([[0.2+0.3*numpy.random.random() for i in range(nwalk)], #Default
[5+45*numpy.random.random() for i in range(nwalk)],
[0.2+1.0*numpy.random.random() for i in range(nwalk)],
[1.5+6.5*numpy.random.random() for i in range(nwalk)],
[0.2+1.3*numpy.random.random() for i in range(nwalk)],
[-0.9+2.4*numpy.random.random() for i in range(nwalk)],
[4e12+6e12*numpy.random.random() for i in range(nwalk)],
[7e12+7e12*numpy.random.random() for i in range(nwalk)],
[20000+980000.*numpy.random.random() for i in range(nwalk)],
[1.0+1.5*numpy.random.random() for i in range(nwalk)],
[0.5+1.0*numpy.random.random() for i in range(nwalk)],
[0.8+0.6*numpy.random.random() for i in range(nwalk)],
[0.+0.5*numpy.random.random() for i in range(nwalk)],
[40000.+70000.*numpy.random.random() for i in range(nwalk)],
[0.4+0.2*numpy.random.random() for i in range(nwalk)],
[1.0*numpy.random.random() for i in range(nwalk)],
[5.0*numpy.random.random() for i in range(nwalk)]]).T
## Actually do the inverse analysis:
## Note that the "threads" variable below is very important for code parallelization.
## This needs to be chosen carefully to reflect the number of cores on your computer.
## If you can max out all your cores, that would be ideal.
## But if threads > number of cores then it'll run slow.
#from multiprocessing import Pool
#with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalk, ndim, LnLike,threads=14)
#args = (t, y, error),
#pool = emcee.interruptible_pool.InterruptiblePool()) #for parallel
pos, lnprob, rstate=sampler.run_mcmc(p0, nsteps)
## Save the ouputs just in case the plotting code crashes (don't want to have to redo the inverse analysis!)
numpy.save('newchain2',sampler.chain[:,:,:])
numpy.save('newln2',sampler.lnprobability)
## autocorrelation parameter (useful for checking if you did enough model runs to have valid posteriors)
try:
print ("ESS",nsteps * nwalk / numpy.nanmax(sampler.acor))
except:
print ("couldn't calculate autocorrelation")
chain=numpy.load('newchain2.npy')
lnprob=numpy.load('newln2.npy')
##### The rest is all plotting
# Plot a few individual chains
fig, ax = pl.subplots(4)
for n in range(nwalk):
ax[0].plot(chain[n,:,0])
ax[1].plot(chain[n,:,1])
ax[2].plot(chain[n,:,2])
ax[3].plot(chain[n,:,3])
#find highest likelihood run
logprob=numpy.array(lnprob)
values=chain
ii,jj = numpy.unravel_index(logprob.argmax(), logprob.shape)
print ("indeces for best",ii,jj)
print ("loglikelihood and values",logprob[ii,jj],values[ii,jj,:])
# Plot the corner plot, discarding the first 1000 steps as burn-in
production = chain[:,1000:,:]
#production = chain[:,0:,:] ## Use this if you have <1000 steps
s = production.shape
flatchain = production.reshape(s[0] * s[1], s[2])
flatchain2=numpy.copy(flatchain)
## convert some variables:
flatchain2[:,4]=flatchain2[:,4]+1 #make outgassing relative Cretaceous outgassing, V+1
## Weatherability is already Cretaceous weatherability, W+1, assuming w=1+W with plus sign
flatchain2[:,5]=flatchain2[:,5]+1 #Carbonate weathering modifier
flatchain2[:,8]=flatchain2[:,8]/1000.0 #Convert to ky
flatchain2[:,13]=flatchain2[:,13]/1000.0 #Convert to kJ/mol
flatchain2[:,6]=flatchain2[:,6]/1e12 #Convert to Tmol
flatchain2[:,7]=flatchain2[:,7]/1e12 #Convert to Tmol
from matplotlib import rc
## Plot posteriors as corner plots (compare to Fig. 6 in the paper)
corner.corner(flatchain2[:,[0,1,2,3,4]], quantiles=[0.16, 0.5, 0.84],labels=[r"CO$_2$-dependence, $\alpha$", "Temp. dep. cont.\nweathering, $T_e$ (K)", "Relative Cretaceous\nweatherability, 1+$W$","Climate sensitivity,\n${\Delta}T_{2X}$ (K)","Relative Cretaceous\noutgassing, 1+$V$"])#,truths=values[ii,jj,:])
corner.corner(flatchain2[:,[5,8,11,13,6,16]], quantiles=[0.16, 0.5, 0.84],labels=["Carbonate weath.\nmodifier, 1+$C_{WF}$",r"Circulation time, $\tau$ (kyr)","Surface-deep\ntemp. gradient, $a_{grad}$","Temp. dependence\nseafloor, $E_{bas}$ (kJ/mol)","Modern outgassing,\n$F_{out}^{mod}$ (Tmol C/yr)","Paleogeography parameter,\n${\Delta}P$ (K)"])#,truths=values[ii,jj,:])
corner.corner(flatchain2[:,[7,9,10,12,14,15]], quantiles=[0.16, 0.5, 0.84],labels=["Modern carb.\nweathering, $F_{carb}^{mod}$ (Tmol C/yr)","Carb. precip.\ncoefficient, $n$","Modern seafloor diss.\nrelative precip.","pH dependence\nseafloor, $\gamma$","Modern pelagic\nfraction",r"Spreading rate dep., $\beta$"])#,truths=values[ii,jj,:])
## Confidence intervals for unknown parameter:
ab, bc, cd,de,ef ,fg,gh,hi,ij,jk,kl,lm,mn,no,op,pq,qr= map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),zip(*numpy.percentile(flatchain, [16, 50, 84],axis=0)))
print ("median values with errors", numpy.array([ab, bc, cd,de,ef,fg,gh,hi,ij,jk,kl,lm,mn,no,op,pq,qr]))
print ("confidence intervals")
map(lambda v: (v[0], v[1], v[2]),zip(*numpy.percentile(flatchain, [5, 50, 95],axis=0)))
from plotting_everything import mc_plotter_spread,dist_plotter
## Can't remember what this does - probably not important
import pylab
pylab.figure(figsize=(30,15))
legend_counter=0
for x_ex in flatchain[numpy.random.randint(len(flatchain), size=100)]:
#print (x_ex)
#outputs=forward_model_old(x_ex[0],x_ex[1],x_ex[2],x_ex[3],x_ex[4],x_ex[5],x_ex[6],x_ex[7],x_ex[8],x_ex[9],x_ex[10],x_ex[11],x_ex[12],x_ex[13],x_ex[14],x_ex[15],x_ex[16])
[outputs,imbalance] = forward_model(x_ex[8],x_ex[6],x_ex[9],x_ex[0],x_ex[1],0.45e12,x_ex[10],0.01,x_ex[2],x_ex[3],x_ex[4],x_ex[7],x_ex[14],x_ex[5],x_ex[11],x_ex[12],x_ex[15],x_ex[13],x_ex[16])
sp=((x_ex[6]+x_ex[4]*x_ex[6])/x_ex[6])**x_ex[15]
mc_plotter_spread(outputs,"y",legend_counter,sp)
legend_counter=legend_counter+1
### This is important. This takes 1000 sets of parameter values from your posterior
### and re-runs the forward model 1000 times to get distributions for the time-evolution
### of different model parameters e.g. Fig. 5a-f
### Another script is called to actually do the plotting
mega_output=[]
spread_output=[]
carbw_factor=[]
sil_change=[]
seafloor_change=[]
for x_ex in flatchain[numpy.random.randint(len(flatchain), size=1000)]:
#print (x_ex)
#outputs=forward_model_old(x_ex[0],x_ex[1],x_ex[2],x_ex[3],x_ex[4],x_ex[5],x_ex[6],x_ex[7],x_ex[8],x_ex[9],x_ex[10],x_ex[11],x_ex[12],x_ex[13],x_ex[14],x_ex[15],x_ex[16])
[outputs,imbalance]= forward_model(x_ex[8],x_ex[6],x_ex[9],x_ex[0],x_ex[1],0.45e12,x_ex[10],0.01,x_ex[2],x_ex[3],x_ex[4],x_ex[7],x_ex[14],x_ex[5],x_ex[11],x_ex[12],x_ex[15],x_ex[13],x_ex[16])
spread_output.append( ((x_ex[6]+x_ex[4]*x_ex[6])/x_ex[6])**x_ex[15])
mega_output.append(outputs)
carbw_factor.append((1+x_ex[5])*outputs[20][99]/outputs[20][0])
sil_change.append(outputs[20][99]-outputs[20][0])
seafloor_change.append(outputs[19][99]-outputs[19][0])
mega_output=numpy.array(mega_output)
spread_output=numpy.array(spread_output)
dist_plotter(mega_output,spread_output,"y")
sil_change=numpy.array(sil_change)/1e12
seafloor_change=numpy.array(seafloor_change)/1e12
change_precip_array=mega_output[:,21,99]/mega_output[:,21,0]
print (numpy.percentile(numpy.array(change_precip_array),2.5),numpy.percentile(numpy.array(change_precip_array),50),numpy.percentile(numpy.array(change_precip_array),97.5))
print (numpy.percentile(numpy.array(change_precip_array),16),numpy.percentile(numpy.array(change_precip_array),50),numpy.percentile(numpy.array(change_precip_array),84))
## The rest has something to do with Fig. 5g-i,
#final subplot
pylab.subplot(3, 3, 9)
pylab.hist2d(sil_change, seafloor_change, range=[[-1, 6], [0, 6]],bins=30,normed=True,cmap=pylab.cm.jet)
pylab.colorbar(label='Probability density')
pylab.xlabel('Decrease in continental weathering\nsince mid Cretaceous (Tmol/yr)')
pylab.ylabel('Decrease in seafloor weathering\nsince mid Cretaceous (Tmol/yr)')
# carbonate plot:
pylab.subplot(3, 3, 7)
pylab.hist(numpy.array(carbw_factor),bins=30,color='grey',normed=True)
pylab.xlabel('Relative change carbonate weathering')
pylab.ylabel('Probability density')
print (numpy.percentile(numpy.array(carbw_factor),2.5),numpy.percentile(numpy.array(carbw_factor),50),numpy.percentile(
|
numpy.array(carbw_factor)
|
numpy.array
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : Logistic.py
@Time : 2019/05/21 20:36:26
@Author : <NAME>
@Version : 1.0
@Contact : <EMAIL>
@Desc : 逻辑回归
@github : https://github.com/aimi-cn/AILearners
'''
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.font_manager import FontProperties
'''
@description: 梯度上升算法测试函数
求函数f(x) = -x^2 + 4x的极大值
@param {type}
@return:
'''
def Gradient_Ascent_test():
#f(x)的导数
def f_prime(x_old):
return -2 * x_old + 4
#初始值,给一个小于x_new的值
x_old = -1
#梯度上升算法初始值,即从(0,0)开始
x_new = 0
#步长,也就是学习速率,控制更新的幅度
alpha = 0.01
#精度,也就是更新阈值
presision = 0.00000001
#上面提到的公式
while abs(x_new - x_old) > presision:
x_old = x_new
x_new = x_old + alpha * f_prime(x_old)
#打印最终求解的极值近似值
print(x_new)
'''
@description: 加载数据
@param {type}
@return:
dataMat - 数据列表
labelMat - 标签列表
'''
def loadDataSet():
#创建数据标签
dataMat = []
#创建标签列表
labelMat = []
#打开文件
fr = open('C:/Users/Administrator/Desktop/blog/github/AILearners/data/ml/jqxxsz/5.Logistic/testSet.txt')
#逐行读取
for line in fr.readlines():
#去回车,放入列表
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
fr.close()
return dataMat,labelMat
'''
@description: 绘制数据集
@param {type}
@return:
'''
def plotDataSet():
#加载数据集
dataMat, labelMat = loadDataSet()
#转换成numpy的array数组
dataArr =
|
np.array(dataMat)
|
numpy.array
|
import pandas as pd
import pymc3 as pm
import matplotlib.pyplot as plt
import pickle
import numpy as np
import sys
import theano.tensor as tt
data = pd.read_csv('markers.csv')
data = data.loc[data['epoch_i']<1,:] #Just do a single epoch for simplicity
# shapes and sizes
n_epochs = data['epoch_i'].max() + 1 #each epoch indexed by epoch_i
n_raters = data['rater_i'].max() + 1 #each rater indexed by rater_i
n_data = data.shape[0] #each spindle marker indexed by t
# static priors vars
trust_purcell = 0.1 #crank up to give more weight to purcell et al, 2017
purcell = np.array([0.3587, 0.6387, 0.0026, 0., 0., 0.])+(1-trust_purcell)
spindle_number_prior = purcell/purcell.sum()
max_spindles_per_epoch = len(spindle_number_prior)-1
spindle_duration_alpha = 0.975
spindle_duration_beta = 0.0899
duration_min = 0.4
duration_max = 2
spindle_refractory_mu = 8.81
spindle_refractory_lam = 14.91
spindle_refractory_prob_scale = 0.0339
expected_std_for_accuracy = 0.2
with pm.Model() as model:
# --- Spindles come from True or Contamiate locations ---- #
# Contaminates start and end can come from anywhere withing the 0-25 epoch
contaminate_spindle_start = pm.Uniform.dist(lower=0., upper=25., shape=n_data)
# True spindles in an epoch, must be ordered
tss = pm.Uniform('true_spindle_starts', lower=0., upper=25., shape=max_spindles_per_epoch,
transform=pm.distributions.transforms.Ordered(),
testval=np.array([1., 5., 10., 15., 20.]).T) # Real spindles
# The number of spindles per epoch:
num_spindles_per_epoch = pm.Categorical('num_spindles_per_epoch',
p=pm.Dirichlet('spindle_num_prior', a=spindle_number_prior),
testval=1)
# ----Tracking is a raters spindle marker is real or contaminate-----
# if the number of spindles in an epoch (z) is greater than 0, then use conf to determine if a spindle is real or not
#spindle_chance = data['conf'] # pm.math.switch(num_spindles_per_epoch[data['epoch_i']] > 0, data['conf'], 0)
spindle_chance_prior = pm.Beta('spindle_chance_prior', alpha=2, beta=1)
marker_is_from_real_spindle = pm.Bernoulli('marker_is_from_real_spindle', p=spindle_chance_prior, shape=n_data)
marker_is_from_real_spindle_stacked = tt.stack([marker_is_from_real_spindle, 1 - marker_is_from_real_spindle],
axis=1) # stack theta for use in mixture model
# ----Mapping between rater's spindles and real spindles (w)----
## Handy matrix to compare z too
compare =
|
np.arange(0, max_spindles_per_epoch + 1)
|
numpy.arange
|
import numpy as np
import pandas as pd
from gurobipy import Model as gurobi_Model, GRB, quicksum
from mosek.fusion import Model as mosek_Model, Variable, Expr, Domain, ObjectiveSense
from scipy.stats import norm, multivariate_normal
from sklearn.cluster import KMeans
from itertools import product
from IPython import embed
def breaking(p, m):
p = p - 1
# seleect p points out of the m points
# then minimize the piecewise linear approximation L(*)
u = np.linspace(start=-3, stop=3, endpoint=True, num=m)
cache_cdf = list(map(lambda x: norm.cdf(x), u))
cost = np.zeros((m, m))
for i in range(m):
for j in range(i + 1, m):
s = (cache_cdf[j] - cache_cdf[i]) / (u[j] - u[i])
x_star = np.sqrt(-np.log(2 * np.pi) - 2 * np.log(s))
x_star = x_star if u[i] <= x_star <= u[j] else -x_star
if u[i] <= -x_star <= u[j]:
cost1 = abs(norm.cdf(x_star) - (cache_cdf[i] + s * (x_star - u[i])))
cost2 = abs(norm.cdf(-x_star) - (cache_cdf[i] + s * (-x_star - u[i])))
cost[i, j] = max(cost1, cost2)
else:
cost[i, j] = abs(norm.cdf(x_star) - (cache_cdf[i] + s * (x_star - u[i])))
D = np.zeros((p + 1, m))
D[1, p-1:m-1] = cost[p-1:m-1, -1]
for k in range(2, p + 1):
for i in range(p - k, m - k):
min_cost = np.inf
for j in range(i + 1, m - k + 1):
temp = D[k - 1, j] + cost[i, j]
if temp < min_cost:
min_cost = temp
D[k, i] = min_cost
return D, cost
def backtracking(D, cost):
p, m = D.shape
p = p - 1
u = np.linspace(start=-3, stop=3, endpoint=True, num=m)
# backtracking
optim_points = [-1000, ]
optim_points.append(u[0])
min_cost = D[p, 0]
j = 0
for k in range(p - 1, 0, -1):
ind = np.argwhere(abs(min_cost - D[k, j + 1: m - k] - cost[j, j + 1:m - k]) < 1e-10)[0][0]
j = j + ind + 1
min_cost = D[k, j]
optim_points.append(u[j])
optim_points.append(u[-1])
optim_points.append(1000)
return optim_points
def find_break_points(p=6, m=100):
D, cost = breaking(p=p, m=m)
# print("the minimum loss of PWL is: ", D[-1, 0])
v = backtracking(D, cost)
return norm.cdf(v), v, D[-1, 0]
class MixIntegerGaussianMixture:
def __init__(self, data, num_component, discrepancy="KS", random_seed=None, p=10, m=100):
self.data = np.array(data)
assert len(data.shape) == 2, "Expected 2D array. Reshape your data either using array.reshape(-1, 1) " \
"if your data has a single feature or " \
"array.reshape(1, -1) if it contains a single sample."
self.num_samples, self.num_features = self.data.shape
self.num_components = num_component
self.num_directions = self.num_features ** 2
# draw projected directions randomly
rng = np.random.RandomState(random_seed)
self.projected_directions = self.sample_unit_hyperspheres(self.num_directions, self.num_features, rng)
# self.projected_data shape: (#directions, #samples)
self.projected_data = np.dot(self.projected_directions, self.data.transpose())
# need to iterate each direction to get the projected means and projected covariances.
self.projected_means = np.zeros((self.num_directions, self.num_components))
self.projected_weights =
|
np.zeros((self.num_directions, self.num_components))
|
numpy.zeros
|
import itertools
import warnings
import pickle
import pytest
import numpy as np
from numpy.testing import assert_equal
import brian2
from brian2.core.preferences import prefs
from brian2.units.fundamentalunits import (UFUNCS_DIMENSIONLESS,
UFUNCS_DIMENSIONLESS_TWOARGS,
UFUNCS_INTEGERS,
UFUNCS_LOGICAL,
Quantity,
Unit,
have_same_dimensions,
get_dimensions,
is_scalar_type,
DimensionMismatchError,
check_units,
in_unit,
get_unit,
get_or_create_dimension,
DIMENSIONLESS,
fail_for_dimension_mismatch)
from brian2.units.allunits import *
from brian2.units.stdunits import ms, mV, kHz, nS, cm, Hz, mM, nA
from brian2.tests.utils import assert_allclose
# To work around an issue in matplotlib 1.3.1 (see
# https://github.com/matplotlib/matplotlib/pull/2591), we make `ravel`
# return a unitless array and emit a warning explaining the issue.
use_matplotlib_units_fix = False
try:
import matplotlib
if matplotlib.__version__ == '1.3.1':
use_matplotlib_units_fix = True
except ImportError:
pass
def assert_quantity(q, values, unit):
assert isinstance(q, Quantity) or (have_same_dimensions(unit, 1) and
(values.shape == () or
isinstance(q, np.ndarray))), q
assert_allclose(
|
np.asarray(q)
|
numpy.asarray
|
import numpy as np
import datetime
from calculations import *
import mahotas
from georectification import *
from os import path, listdir
from scipy.signal import argrelextrema
def lowestCountourSnowDepth(imglist,datetimelist,mask,settings,logger,objsize,light_threshold,sigma,bias):
try:
sigma = int(sigma)
objsize = float(objsize)
bias = float(bias)
light_threshold = float(light_threshold)
except:
logger.set('Parameter error. Aborting.')
if len(imglist) == 0:
return False
mask, pgs, th = mask
if (isinstance(pgs[0],list) and len(pgs) != 1) or (not isinstance(pgs[0],list) and map(sum,[pgs]) == 0.0):
logger.set('Only and only one polygon should be defined for this analysis. Aborting.')
return False
pgsx = []
pgsy = []
for i,c in enumerate(pgs):
if i%2 == 0:
pgsx.append(c)
else:
pgsy.append(c)
pbox = [min(pgsy),max(pgsy),min(pgsx),max(pgsx)]
sd = []
time = []
for i,imgf in enumerate(imglist):
try:
img = mahotas.imread(imgf,as_grey = True)
mbox = [pbox[0]*img.shape[0],pbox[1]*img.shape[0],pbox[2]*img.shape[1],pbox[3]*img.shape[1]]
mbox = map(int,map(np.rint,mbox))
# mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'1.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]).astype(np.uint8))
if sigma != 0:
img = mahotas.gaussian_filter(img, sigma)
# mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'2.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]).astype(np.uint8))
img = (img <= light_threshold)
# mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'3.jpg'),(img[mbox[0]:mbox[1],mbox[2]:mbox[3]]*255).astype(np.uint8))
img = img[mbox[0]:mbox[1],mbox[2]:mbox[3]]
bottom = mbox[1] - mbox[0]
# mahotas.imsave(path.join('/home/tanisc',str(datetimelist[i].day)+str(datetimelist[i].hour)+str(datetimelist[i].minute)+'4.jpg'),img.astype(np.uint8)*255)
labeled, n = mahotas.label(img)
bboxes = mahotas.labeled.bbox(labeled)
bbheig = []
if n == 0:
height = np.nan
else:
for j,bbox in enumerate(bboxes[1:]):
height = objsize - objsize*bbox[1]/float(bottom)
height += bias
height = np.round(height*100)/100.0
bbheig.append(height)
if bbheig == []:
height = np.nan
else:
height = min(bbheig)
time = np.append(time,(str(datetimelist[i])))
sd = np.append(sd,height)
logger.set('Image: |progress:4|queue:'+str(i+1)+'|total:'+str(len(imglist)))
except Exception as e:
print(e)
logger.set("Processing " + imgf + " failed.")
output = [["Snow Depth",["Time",time,"Snow Depth",sd]]]
return output
def salvatoriSnowDetect(img,mask,settings,logger,red,green,blue): #produces snow mask as snow=1,no-snow=0, masked=2, pass datetimelist as none if processing image handle
if not bool(float(blue)) and not bool(float(red)) and not bool(float(green)):
return (False,False)
data = histogram(img,None, mask,settings,logger,1,1,1) #output as [dn,r,g,b]
dn = data[0]
sc_img = np.zeros(img.transpose(2,0,1).shape,np.bool)
thresholds = [-1,-1,-1]
hmax = np.max(np.hstack((dn*(data[1]>0),dn*(data[2]>0),dn*(data[3]>0))))
for ch in range(3):
if bool(float([red,green,blue][ch])):
hist = data[ch+1]
hist = hist*(hist>hist.mean()*0.001) #remove floor noise
hist = hist[:hmax+1]
dn = dn[:hmax+1]
threshold = -1+(len(dn)+1)/2.0
hists = np.zeros(hist.shape)
n = 5
for i in np.arange(len(hist)):
hists[i] = hist[(i-n)*((i-n)>=0):((i+n)*((i+n)<len(hist))+(len(hist)-1)*((i+n)>=len(hist)))].mean()
for t in argrelextrema(hists, np.less)[0]:
if t >= threshold:
threshold = t
break
if threshold == 0:
threshold = -1
else:
threshold = 0
sc_img[ch] = (img.transpose(2,0,1)[ch] >= threshold)
thresholds[ch] = threshold
sc_img = (sc_img[0]*sc_img[1]*sc_img[2])
sc_img = (sc_img*(mask.transpose(2,0,1)[0] == 1)+(mask.transpose(2,0,1)[0] == 0)*2)
return (sc_img, thresholds)
#complete, 2nd output not working in storedata
def salvatoriSnowMask(imglist,datetimelist,mask,settings,logger,red,green,blue): #produces snow mask as snow=1,no-snow=0, masked=2, pass datetimelist as none if processing image handle
if len(imglist) == 0:
return False
mask, pgs, th = mask
sc = []
thr = []
thg = []
thb = []
for i,img in enumerate(imglist):
img = mahotas.imread(img)
if mask.shape != img.shape:
mask = maskers.polymask(img,pgs,logger)
sc_img,thv = salvatoriSnowDetect(img,mask*maskers.thmask(img,th),settings,logger,red,green,blue)
sc_img = np.dstack(((sc_img==0)*255,(sc_img==1)*255,(sc_img==2)*255))
sc_img = sc_img.astype('uint8')
sc.append(str(datetimelist[i])+' Snow Mask')
sc.append(sc_img[::-1])
sc.append(str(datetimelist[i])+' Image')
sc.append(img[::-1])
thr = np.append(thr,thv[0])
thg = np.append(thg,thv[1])
thb = np.append(thb,thv[2])
logger.set('Image: |progress:4|queue:'+str(i+1)+'|total:'+str(len(imglist)))
output = [["Snow Mask - 1",sc],["Snow Mask - 1 Thresholds",["Time",datetimelist,"Threshold - Red",thr,"Threshold - Green",thg,"Threshold - Blue",thb]]]
#2nd output in same list of lists not storable yet.
return output
def salvatoriSnowCover(img_imglist,datetimelist,mask,settings,logger,red,green,blue,middata,rectsw,extent,extent_proj,res,dem,C,C_proj,Cz,hd,td,vd,f,w,interpolate,flat,origin,ax,ay):
rectsw = bool(float(rectsw))
middata = bool(float(middata))
dummyImg = False
for img in img_imglist:
try:
mahotas.imread(img)
dummyImg = img
break
except:
pass
if not dummyImg:
logger.set("All images invalid.")
return False
if rectsw:
logger.set("Obtaining weight mask...")
params = map(np.copy,[extent,extent_proj,res,dem,C,C_proj,Cz,hd,td,vd,f,w,interpolate,flat,origin,ax,ay])
auxfilename = False
from definitions import AuxDir, TmpDir
readydata = False
for hdf in os.listdir(AuxDir):
if "SNOWCOV001" in hdf:
try:
auxF= h5py.File(os.path.join(AuxDir,hdf),'r')
readyfile = True
for i in range(len(params)):
attr = auxF['metadata'].attrs["param"+str(i)]
if np.prod(np.array([attr]).shape) == 1:
if (attr != params[i]):
readyfile = False
else:
if (attr != params[i]).any():
readyfile = False
if readyfile:
logger.set("Calculation has done before with same parameters, auxillary info is being read from file...")
tiles = np.copy(auxF['metadata'][...]).tolist()
for d in auxF:
if str(d) == 'metadata':
continue
varname = str(d).split()[0]
tilename = str(d).split()[1]
if len(tiles) == 1:
exec(varname +"= np.copy(auxF[d])")
else:
if varname not in locals():
exec(varname+'=None')
exec(varname + "=writeData(np.copy(auxF[d]),"+varname+",map(int,tilename.split('-')))[0]")
auxF.close()
logger.set("\tRead.")
readydata = True
auxfilename = hdf
break
auxF.close()
except:
try:
auxF.close()
except:
continue
if not readydata:
Wp = Georectify1([dummyImg],[datetimelist[0]],mask,settings,logger,extent,extent_proj,res,dem,C,C_proj,Cz,hd,td,vd,f,w,interpolate,flat,origin,ax,ay)[0][1][5]
logger.set('Writing results for next run...')
auxfilename = 'SNOWCOV001_' + str(uuid4()) + '.h5'
auxF = h5py.File(os.path.join(AuxDir,auxfilename),'w')
tiles = [[0,0,Wp.shape[0],Wp.shape[1]]]
auxF.create_dataset('metadata',data=np.array(tiles))
for i,p in enumerate(params):
auxF['metadata'].attrs.create("param"+str(i),p)
for i,tile in enumerate(tiles):
Wp_ = readData(Wp,tile)[0]
auxF.create_dataset('Wp '+str(tile).replace(', ','-').replace('[','').replace(']',''),Wp_.shape,data=Wp_)
Wp_ = None
auxF.close()
Wp = Wp[::-1]
else:
Wp = np.ones(mahotas.imread(dummyImg).shape[:2])
mask, pgs, th = mask
mask = LensCorrRadial(mask,'0',logger,origin,ax,ay,0)[0][1][1]
Wp *= (mask.transpose(2,0,1)[0]==1)
if np.mean(mask) == 1:
logger.set("Weightmask quality: " + str(np.sum(Wp[-100:,Wp.shape[1]/2-50:Wp.shape[1]/2+50] != 0)/10000))
else:
logger.set("Weightmask quality: "+ str(1 - np.sum((Wp==0)*(mask.transpose(2,0,1)[0]==1))/float(np.sum((mask.transpose(2,0,1)[0]==1)))))
logger.set("Calculating snow cover fractions...")
scr = []
ssr = []
snr = []
mar = []
scn = []
ssn = []
snn = []
man = []
time = []
thr = []
thg = []
thb = []
Wp_full = deepcopy(Wp)
for i_img,imgf in enumerate(img_imglist):
try:
snow = 0
nosnow = 0
img = mahotas.imread(imgf)
if mask.shape != img.shape:
mask = maskers.polymask(img,pgs,logger)
Wp = mahotas.imresize(Wp_full, img.shape[:2])
(img,thv) = salvatoriSnowDetect(img,mask*maskers.thmask(img,th),settings,logger,red,green,blue)
# mimg = np.dstack((img==1,img==0,img==2)).astype(np.uint8)*255
if -1 in thv:
continue
time = np.append(time,(str(datetimelist[i_img])))
img = LensCorrRadial(img,str(datetimelist[i_img]),logger,origin,ax,ay,0)[0][1][1]
snow = np.sum(((img == 1)*Wp).astype(int))
nosnow = np.sum(((img == 0)*Wp).astype(int))
masked = np.sum(((img == 2)*Wp).astype(int))
scr = np.append(scr,snow/float(snow+nosnow))
if middata:
ssr =
|
np.append(ssr,snow)
|
numpy.append
|
from collections import OrderedDict
import copy
import getpass
import itertools
import numpy as np
from scipy import signal
import time
LOCAL_MODE = getpass.getuser() == 'tom'
CONFIG = {
'halite_config_setting_divisor': 1.0,
'collect_smoothed_multiplier': 0.0,
'collect_actual_multiplier': 5.0,
'collect_less_halite_ships_multiplier_base': 0.55,
'collect_base_nearest_distance_exponent': 0.2,
'return_base_multiplier': 8.0,
'return_base_less_halite_ships_multiplier_base': 0.85,
'early_game_return_base_additional_multiplier': 0.1,
'early_game_return_boost_step': 50,
'establish_base_smoothed_multiplier': 0.0,
'establish_first_base_smoothed_multiplier_correction': 2.0,
'establish_base_dm_exponent': 1.1,
'first_base_no_4_way_camping_spot_bonus': 300*0,
'start_camp_if_not_winning': 0,
'max_camper_ship_budget': 2*1,
'relative_step_start_camping': 0.15,
'establish_base_deposit_multiplier': 1.0,
'establish_base_less_halite_ships_multiplier_base': 1.0,
'max_attackers_per_base': 3*1,
'attack_base_multiplier': 300.0,
'attack_base_less_halite_ships_multiplier_base': 0.9,
'attack_base_halite_sum_multiplier': 2.0,
'attack_base_run_opponent_multiplier': 1.0,
'attack_base_catch_opponent_multiplier': 1.0,
'collect_run_opponent_multiplier': 10.0,
'return_base_run_opponent_multiplier': 2.5,
'establish_base_run_opponent_multiplier': 2.5,
'collect_catch_opponent_multiplier': 1.0,
'return_base_catch_opponent_multiplier': 1.0,
'establish_base_catch_opponent_multiplier': 0.5,
'two_step_avoid_boxed_opponent_multiplier_base': 0.7,
'n_step_avoid_boxed_opponent_multiplier_base': 0.45,
'min_consecutive_chase_extrapolate': 6,
'chase_return_base_exponential_bonus': 2.0,
'ignore_catch_prob': 0.3,
'max_initial_ships': 60,
'max_final_ships': 60,
'max_standard_ships_decided_end_pack_hunting': 2,
'nearby_ship_halite_spawn_constant': 3.0,
'nearby_halite_spawn_constant': 5.0,
'remaining_budget_spawn_constant': 0.2,
'spawn_score_threshold': 75.0,
'boxed_in_halite_convert_divisor': 1.0,
'n_step_avoid_min_die_prob_cutoff': 0.05,
'n_step_avoid_window_size': 7,
'influence_map_base_weight': 2.0,
'influence_map_min_ship_weight': 0.0,
'influence_weights_additional_multiplier': 2.0,
'influence_weights_exponent': 8.0,
'escape_influence_prob_divisor': 3.0,
'rescue_ships_in_trouble': 1,
'target_strategic_base_distance': 8.0,
'target_strategic_num_bases_ship_divisor': 9,
'target_strategic_triangle_weight': 20.0, # initially: 20
'target_strategic_independent_base_distance_multiplier': 8.0, # initially 8.0
'target_strategic_influence_desirability_multiplier': 1.0, # initially: 1.0
'target_strategic_potential_divisor': 15.0, # initially: 15.0
'max_spawn_relative_step_divisor': 12.0,
'no_spawn_near_base_ship_limit': 100,
'avoid_cycles': 1,
'max_risk_n_step_risky': 0.5,
'max_steps_n_step_risky': 70,
'log_near_base_distance': 2,
'max_recent_considered_relevant_zero_move_count': 120,
'near_base_2_step_risky_min_count': 50,
'relative_stand_still_collect_boost': 1.5,
'initial_collect_boost_away_from_base': 2.0,
'start_hunting_season_relative_step': 0.1875,
'end_hunting_season_relative_step': 0.75,
'early_hunting_season_less_collect_relative_step': 0.375,
'max_standard_ships_early_hunting_season': 2,
'late_hunting_season_more_collect_relative_step': 0.5,
'late_hunting_season_collect_max_n_step_risk': 0.2,
'after_hunting_season_collect_max_n_step_risk': 0.5,
'late_hunting_season_standard_min_fraction': 0.7,
'max_standard_ships_late_hunting_season': 15,
'collect_on_safe_return_relative_step': 0.075,
'min_halite_to_stop_early_hunt': 15000.0,
'early_best_opponent_relative_step': 0.5,
'surrounding_ships_cycle_extrapolate_step_count': 5,
'surrounding_ships_extended_cycle_extrapolate_step_count': 7,
}
NORTH = "NORTH"
SOUTH = "SOUTH"
EAST = "EAST"
WEST = "WEST"
CONVERT = "CONVERT"
SPAWN = "SPAWN"
NOT_NONE_DIRECTIONS = [NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS = [None, NORTH, SOUTH, EAST, WEST]
MOVE_DIRECTIONS_TO_ID = {None: 0, NORTH: 1, SOUTH: 2, EAST: 3, WEST: 4}
RELATIVE_DIR_MAPPING = {None: (0, 0), NORTH: (-1, 0), SOUTH: (1, 0),
EAST: (0, 1), WEST: (0, -1)}
RELATIVE_DIR_TO_DIRECTION_MAPPING = {
v: k for k, v in RELATIVE_DIR_MAPPING.items()}
OPPOSITE_MAPPING = {None: None, NORTH: SOUTH, SOUTH: NORTH, EAST: WEST,
WEST: EAST}
RELATIVE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1), (0, 0)]
RELATIVE_NOT_NONE_DIRECTIONS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
MOVE_GATHER_OPTIONS = [(-1, 0, False), (1, 0, False), (0, -1, False),
(0, 1, False), (0, 0, True)]
TWO_STEP_THREAT_DIRECTIONS = {
(-2, 0): [(-1, 0)],
(-1, -1): [(-1, 0), (0, -1)],
(-1, 0): [(-1, 0), (0, 0)],
(-1, 1): [(-1, 0), (0, 1)],
(0, -2): [(0, -1)],
(0, -1): [(0, -1), (0, 0)],
(0, 1): [(0, 1), (0, 0)],
(0, 2): [(0, 1)],
(1, -1): [(1, 0), (0, -1)],
(1, 0): [(1, 0), (0, 0)],
(1, 1): [(1, 0),(0, 1)],
(2, 0): [(1, 0)],
}
GAUSSIAN_2D_KERNELS = {}
for dim in range(3, 20, 2):
# Modified from https://scipy-lectures.org/intro/scipy/auto_examples/solutions/plot_image_blur.html
center_distance = np.floor(np.abs(np.arange(dim) - (dim-1)/2))
horiz_distance = np.tile(center_distance, [dim, 1])
vert_distance = np.tile(np.expand_dims(center_distance, 1), [1, dim])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(dim/4))
kernel[manh_distance > dim/2] = 0
GAUSSIAN_2D_KERNELS[dim] = kernel
DISTANCES = {}
DISTANCE_MASKS = {}
HALF_PLANES_CATCH = {}
HALF_PLANES_RUN = {}
ROW_COL_DISTANCE_MASKS = {}
ROW_COL_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_MAX_DISTANCE_MASKS = {}
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS = {}
BOX_DIR_MAX_DISTANCE = 4
BOX_DIRECTION_MASKS = {}
ROW_MASK = {}
COLUMN_MASK = {}
DISTANCE_MASK_DIM = 21
half_distance_mask_dim = int(DISTANCE_MASK_DIM/2)
for row in range(DISTANCE_MASK_DIM):
row_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
row_mask[row] = 1
col_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM), dtype=np.bool)
col_mask[:, row] = 1
ROW_MASK [row] = row_mask
COLUMN_MASK[row] = col_mask
for col in range(DISTANCE_MASK_DIM):
horiz_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - col),
np.abs(np.arange(DISTANCE_MASK_DIM) - col - DISTANCE_MASK_DIM))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - col + DISTANCE_MASK_DIM))
vert_distance = np.minimum(
np.abs(np.arange(DISTANCE_MASK_DIM) - row),
np.abs(np.arange(DISTANCE_MASK_DIM) - row - DISTANCE_MASK_DIM))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(DISTANCE_MASK_DIM) - row + DISTANCE_MASK_DIM))
horiz_distance = np.tile(horiz_distance, [DISTANCE_MASK_DIM, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, DISTANCE_MASK_DIM])
manh_distance = horiz_distance + vert_distance
kernel = np.exp(-manh_distance/(DISTANCE_MASK_DIM/4))
DISTANCE_MASKS[(row, col)] = kernel
DISTANCES[(row, col)] = manh_distance
catch_distance_masks = {}
run_distance_masks = {}
for d in MOVE_DIRECTIONS:
if d is None:
catch_rows = np.array([]).astype(np.int)
catch_cols = np.array([]).astype(np.int)
if d == NORTH:
catch_rows = np.mod(row - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == SOUTH:
catch_rows = np.mod(row + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_cols = np.arange(DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == WEST:
catch_cols = np.mod(col - np.arange(half_distance_mask_dim) - 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col + np.arange(BOX_DIR_MAX_DISTANCE) + 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
if d == EAST:
catch_cols = np.mod(col + np.arange(half_distance_mask_dim) + 1,
DISTANCE_MASK_DIM)
catch_rows = np.arange(DISTANCE_MASK_DIM)
box_dir_cols = np.mod(col - np.arange(BOX_DIR_MAX_DISTANCE) - 1,
DISTANCE_MASK_DIM)
box_dir_rows = np.mod(row + np.arange(
2*(BOX_DIR_MAX_DISTANCE+1)-1) - BOX_DIR_MAX_DISTANCE,
DISTANCE_MASK_DIM)
catch_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
catch_mask[catch_rows[:, None], catch_cols] = 1
run_mask = np.copy(catch_mask)
run_mask[row, col] = 1
catch_distance_masks[d] = catch_mask
run_distance_masks[d] = run_mask
if d is not None:
box_dir_mask = np.zeros((DISTANCE_MASK_DIM, DISTANCE_MASK_DIM),
dtype=np.bool)
box_dir_mask[box_dir_rows[:, None], box_dir_cols] = 1
if d in [NORTH, SOUTH]:
box_dir_mask &= (horiz_distance <= vert_distance)
else:
box_dir_mask &= (horiz_distance >= vert_distance)
ROW_COL_BOX_DIR_MAX_DISTANCE_MASKS[(row, col, d)] = box_dir_mask
HALF_PLANES_CATCH[(row, col)] = catch_distance_masks
HALF_PLANES_RUN[(row, col)] = run_distance_masks
for d in range(1, DISTANCE_MASK_DIM):
ROW_COL_DISTANCE_MASKS[(row, col, d)] = manh_distance == d
for d in range(half_distance_mask_dim):
ROW_COL_MAX_DISTANCE_MASKS[(row, col, d)] = manh_distance <= d
ROW_COL_BOX_MAX_DISTANCE_MASKS[(row, col, d)] = np.logical_and(
horiz_distance <= d, vert_distance <= d)
for dist in range(2, half_distance_mask_dim+1):
dist_mask_dim = dist*2+1
row_pos = np.tile(np.expand_dims(np.arange(dist_mask_dim), 1),
[1, dist_mask_dim])
col_pos = np.tile(np.arange(dist_mask_dim), [dist_mask_dim, 1])
for direction in NOT_NONE_DIRECTIONS:
if direction == NORTH:
box_mask = (row_pos < dist) & (
np.abs(col_pos-dist) <= (dist-row_pos))
if direction == SOUTH:
box_mask = (row_pos > dist) & (
np.abs(col_pos-dist) <= (row_pos-dist))
if direction == WEST:
box_mask = (col_pos < dist) & (
np.abs(row_pos-dist) <= (dist-col_pos))
if direction == EAST:
box_mask = (col_pos > dist) & (
np.abs(row_pos-dist) <= (col_pos-dist))
BOX_DIRECTION_MASKS[(dist, direction)] = box_mask
CONSIDERED_OTHER_DISTANCES = [13]
OTHER_DISTANCES = {}
for other_distance in CONSIDERED_OTHER_DISTANCES:
for row in range(other_distance):
for col in range(other_distance):
horiz_distance = np.minimum(
np.abs(np.arange(other_distance) - col),
np.abs(np.arange(other_distance) - col - other_distance))
horiz_distance = np.minimum(
horiz_distance,
np.abs(np.arange(other_distance) - col + other_distance))
vert_distance = np.minimum(
np.abs(np.arange(other_distance) - row),
np.abs(np.arange(other_distance) - row - other_distance))
vert_distance = np.minimum(
vert_distance,
np.abs(np.arange(other_distance) - row + other_distance))
horiz_distance = np.tile(horiz_distance, [other_distance, 1])
vert_distance = np.tile(np.expand_dims(vert_distance, 1),
[1, other_distance])
manh_distance = horiz_distance + vert_distance
OTHER_DISTANCES[(row, col, other_distance)] = manh_distance
D2_ROW_COL_SHIFTS_DISTANCES = [
(-2, 0, 2),
(-1, -1, 2), (-1, 0, 1), (-1, 1, 2),
(0, -2, 2), (0, -1, 1), (0, 1, 1), (0, 2, 2),
(1, -1, 2), (1, 0, 1), (1, 1, 2),
(2, 0, 2),
]
def row_col_from_square_grid_pos(pos, size):
col = pos % size
row = pos // size
return row, col
def move_ship_row_col(row, col, direction, size):
if direction == "NORTH":
return (size-1 if row == 0 else row-1, col)
elif direction == "SOUTH":
return (row+1 if row < (size-1) else 0, col)
elif direction == "EAST":
return (row, col+1 if col < (size-1) else 0)
elif direction == "WEST":
return (row, size-1 if col == 0 else col-1)
elif direction is None:
return (row, col)
def get_directional_distance(r1, c1, r2, c2, size, d):
relative_pos = get_relative_position(r1, c1, r2, c2, size)
if d == NORTH:
directional_distance = -relative_pos[0]
elif d == SOUTH:
directional_distance = relative_pos[0]
elif d == EAST:
directional_distance = relative_pos[1]
elif d == WEST:
directional_distance = -relative_pos[1]
return directional_distance
def mirror_edges(observation, num_mirror_dim):
if num_mirror_dim > 0:
# observation = np.arange(225).reshape((15,15)) # Debugging test
assert len(observation.shape) == 2
grid_size = observation.shape[0]
new_grid_size = grid_size + 2*num_mirror_dim
mirrored_obs = np.full((new_grid_size, new_grid_size), np.nan)
# Fill in the original data
mirrored_obs[num_mirror_dim:(-num_mirror_dim),
num_mirror_dim:(-num_mirror_dim)] = observation
# Add top and bottom mirrored data
mirrored_obs[:num_mirror_dim, num_mirror_dim:(
-num_mirror_dim)] = observation[-num_mirror_dim:, :]
mirrored_obs[-num_mirror_dim:, num_mirror_dim:(
-num_mirror_dim)] = observation[:num_mirror_dim, :]
# Add left and right mirrored data
mirrored_obs[:, :num_mirror_dim] = mirrored_obs[
:, -(2*num_mirror_dim):(-num_mirror_dim)]
mirrored_obs[:, -num_mirror_dim:] = mirrored_obs[
:, num_mirror_dim:(2*num_mirror_dim)]
observation = mirrored_obs
return observation
def smooth2d(grid, smooth_kernel_dim=7, return_kernel=False):
edge_augmented = mirror_edges(grid, smooth_kernel_dim-1)
kernel = GAUSSIAN_2D_KERNELS[int(2*smooth_kernel_dim-1)]
convolved = signal.convolve2d(edge_augmented, kernel, mode="valid")
if return_kernel:
return convolved, kernel
else:
return convolved
def get_relative_position(row, col, other_row, other_col, size):
if row >= other_row:
if (other_row + size - row) < (row - other_row):
row_diff = (other_row + size - row)
else:
row_diff = -(row - other_row)
else:
if (row + size - other_row) < (other_row - row):
row_diff = -(row + size - other_row)
else:
row_diff = other_row - row
if col >= other_col:
if (other_col + size - col) < (col - other_col):
col_diff = (other_col + size - col)
else:
col_diff = -(col - other_col)
else:
if (col + size - other_col) < (other_col - col):
col_diff = -(col + size - other_col)
else:
col_diff = other_col - col
return (row_diff, col_diff)
def update_scores_opponent_ships(
config, collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, opponent_ships, opponent_bases, halite_ships, row, col,
grid_size, spawn_cost, drop_None_valid, obs_halite, collect_rate, np_rng,
opponent_ships_sensible_actions, opponent_ships_sensible_actions_no_risk,
ignore_bad_attack_directions, observation, ship_k, my_bases, my_ships,
steps_remaining, history, escape_influence_probs, player_ids, env_obs_ids,
env_observation, main_base_distances, nearest_base_distances,
end_game_base_return, camping_override_strategy,
attack_campers_override_strategy, boxed_in_attack_squares,
safe_to_collect, boxed_in_zero_halite_opponents, ignore_convert_positions,
avoid_attack_squares_zero_halite, n_step_avoid_min_die_prob_cutoff,
safe_to_return_halites, safe_to_return_base_halites,
my_nearest_base_distances):
direction_halite_diff_distance_raw = {
NORTH: [], SOUTH: [], EAST: [], WEST: []}
my_bases_or_ships = np.logical_or(my_bases, my_ships)
chase_details = history['chase_counter'][0].get(ship_k, None)
take_my_square_next_halite_diff = None
take_my_next_square_dir = None
wide_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 3]
tight_cycle_mask = ROW_COL_MAX_DISTANCE_MASKS[row, col, 2]
opponents_in_cycle = np.any(opponent_ships[tight_cycle_mask]) and (
np.all(history['empty_or_cycled_positions'][wide_cycle_mask]) or (
np.all(history['empty_or_extended_cycled_positions'][tight_cycle_mask])))
if opponents_in_cycle:
print("EXTRAPOLATING OPPONENT CYCLIC BEHAVIOR", observation['step'], row,
col)
if len(camping_override_strategy) == 0:
navigation_zero_halite_risk_threshold = 0
else:
navigation_zero_halite_risk_threshold = camping_override_strategy[0]
if camping_override_strategy[1].max() >= 1e4:
collect_grid_scores = 1e-4*collect_grid_scores + (
camping_override_strategy[1])
else:
collect_grid_scores += camping_override_strategy[1]
attack_base_scores += camping_override_strategy[2]
if len(attack_campers_override_strategy) > 0:
ignore_opponent_row = attack_campers_override_strategy[0]
ignore_opponent_col = attack_campers_override_strategy[1]
ignore_opponent_distance = attack_campers_override_strategy[5]
collect_grid_scores[ignore_opponent_row, ignore_opponent_col] += (
attack_campers_override_strategy[2])
navigation_zero_halite_risk_threshold = max(
navigation_zero_halite_risk_threshold,
attack_campers_override_strategy[6])
else:
ignore_opponent_row = None
ignore_opponent_col = None
ignore_opponent_distance = None
# Identify directions where I can certainly reach the base in time and always
# mark them as valid
ship_halite = halite_ships[row, col]
safe_return_base_directions = []
if ship_halite < safe_to_return_halites[row, col]:
for base_safe_return_halite, base_location in safe_to_return_base_halites:
if ship_halite < base_safe_return_halite[row, col]:
for d in get_dir_from_target(
row, col, base_location[0], base_location[1], grid_size):
if not d is None and not d in safe_return_base_directions:
safe_return_base_directions.append(d)
# if observation['step'] == 131 and ship_k in ['63-1']:
# import pdb; pdb.set_trace()
can_stay_still_zero_halite = True
for row_shift, col_shift, distance in D2_ROW_COL_SHIFTS_DISTANCES:
considered_row = (row + row_shift) % grid_size
considered_col = (col + col_shift) % grid_size
if opponent_ships[considered_row, considered_col] and (
ignore_opponent_row is None or (((
considered_row != ignore_opponent_row) or (
considered_col != ignore_opponent_col)) and (
ignore_opponent_distance > 2))):
relevant_dirs = []
halite_diff = halite_ships[row, col] - halite_ships[
considered_row, considered_col]
assume_take_my_square_next = False
# if observation['step'] == 266 and row == 11 and col == 15:
# import pdb; pdb.set_trace()
# Extrapolate the opponent behavior if we have been chased for a
# while and chasing is likely to continue
if distance == 1 and chase_details is not None and (
chase_details[1] >= config[
'min_consecutive_chase_extrapolate']) and (
considered_row, considered_col) == (
chase_details[4], chase_details[5]):
chaser_row = chase_details[4]
chaser_col = chase_details[5]
to_opponent_dir = get_dir_from_target(
row, col, chaser_row, chaser_col, grid_size)[0]
opp_to_me_dir = OPPOSITE_MAPPING[to_opponent_dir]
rel_opp_to_me_dir = RELATIVE_DIR_MAPPING[opp_to_me_dir]
opp_can_move_to_me = rel_opp_to_me_dir in (
opponent_ships_sensible_actions_no_risk[chaser_row, chaser_col])
# There is a unique opponent id with the least amount of halite
# on the chaser square or the chaser has at least one friendly
# ship that can replace it
chaser_can_replace = None
chaser_is_chased_by_not_me = None
if opp_can_move_to_me:
chaser_id = player_ids[chaser_row, chaser_col]
near_chaser = ROW_COL_MAX_DISTANCE_MASKS[
chaser_row, chaser_col, 1]
near_halite = halite_ships[near_chaser]
near_chaser_friendly_halite = near_halite[
(near_halite >= 0) & (player_ids[near_chaser] == chaser_id)]
min_non_chaser_halite = near_halite[
(near_halite >= 0) & (
player_ids[near_chaser] != chaser_id)].min()
min_near_chaser_halite = near_halite[near_halite >= 0].min()
opponent_min_hal_ids = player_ids[np.logical_and(
near_chaser, halite_ships == min_near_chaser_halite)]
near_me = ROW_COL_MAX_DISTANCE_MASKS[row, col, 1]
near_me_threat_players = player_ids[np.logical_and(
near_me, (halite_ships >= 0) & (
halite_ships < halite_ships[row, col]))]
double_opp_chase = (near_me_threat_players.size > 1) and (
np.all(near_me_threat_players == chaser_id))
chaser_can_replace = ((opponent_min_hal_ids.size > 1) and (
np.all(opponent_min_hal_ids == chaser_id) or (
(opponent_min_hal_ids == chaser_id).sum() > 1)) or (
(near_chaser_friendly_halite <= (
min_non_chaser_halite)).sum() > 1)) or double_opp_chase
if opp_can_move_to_me and not chaser_can_replace:
chaser_players_index = env_obs_ids[chaser_id]
chaser_k = [k for k, v in env_observation.players[
chaser_players_index][2].items() if v[0] == (
chaser_row*grid_size + chaser_col)][0]
chaser_is_chased = chaser_k in history[
'chase_counter'][chaser_id]
chaser_is_chased_by_not_me = chaser_is_chased
if chaser_is_chased:
chaser_chaser = history['chase_counter'][chaser_id][chaser_k]
chaser_is_chased_by_not_me = (chaser_chaser[4] is None) or (
player_ids[chaser_chaser[4], chaser_chaser[5]] != 0)
if opp_can_move_to_me and not chaser_can_replace and not (
chaser_is_chased_by_not_me):
assume_take_my_square_next = True
take_my_square_next_halite_diff = halite_diff
take_my_next_square_dir = to_opponent_dir
# if observation['step'] == 96 and ship_k in ['80-1']:
# import pdb; pdb.set_trace()
can_ignore_ship = False
if (considered_row, considered_col) in boxed_in_zero_halite_opponents:
can_stay_still_zero_halite = can_stay_still_zero_halite and (
distance == 2)
else:
if halite_ships[row, col] == halite_ships[
considered_row, considered_col]:
opponent_id = player_ids[considered_row, considered_col]
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with near
# base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
considered_row, considered_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance)
if distance == 2:
can_ignore_ship = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] <= (
navigation_zero_halite_risk_threshold)
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(0)
d1_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k] > (
navigation_zero_halite_risk_threshold)
d0_threat = history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k_dist_zero] > (
navigation_zero_halite_risk_threshold)
can_stay_still_zero_halite = can_stay_still_zero_halite and (
not d0_threat)
# if is_near_base and history['zero_halite_move_behavior'][
# opponent_id][str(is_near_base) + '_' + str(0) + '_ever_risky']:
# import pdb; pdb.set_trace()
can_ignore_ship = not (d0_threat or d1_threat)
if not assume_take_my_square_next and not can_ignore_ship:
relevant_dirs += [] if row_shift >= 0 else [NORTH]
relevant_dirs += [] if row_shift <= 0 else [SOUTH]
relevant_dirs += [] if col_shift <= 0 else [EAST]
relevant_dirs += [] if col_shift >= 0 else [WEST]
# When the opponents are in a cycle: only consider the direction I
# expect my opponent to be at in the next step (if any)
if opponents_in_cycle:
relevant_dirs = []
opponent_ship_key = history['opponent_ship_pos_to_key'][(
considered_row, considered_col)]
opponent_id = player_ids[considered_row, considered_col]
likely_opponent_action = history['opponent_cycle_counters'][
opponent_id-1][opponent_ship_key][1][0]
likely_opponent_next_pos = move_ship_row_col(
considered_row, considered_col, likely_opponent_action, grid_size)
relative_other_pos = get_relative_position(
row, col, likely_opponent_next_pos[0], likely_opponent_next_pos[1],
grid_size)
current_opp_relative_dir = get_relative_position(
row, col, considered_row, considered_col, grid_size)
if np.abs(relative_other_pos[0]) + np.abs(
relative_other_pos[1]) <= 1:
# At distance 1 or 0
# import pdb; pdb.set_trace()
if relative_other_pos[0] == 0 and relative_other_pos[1] == 0:
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
current_opp_relative_dir]]
elif relative_other_pos == (0, 0):
relevant_dirs = [RELATIVE_DIR_TO_DIRECTION_MAPPING[
relative_other_pos]]
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
for d in relevant_dirs:
direction_halite_diff_distance_raw[d].append(
(halite_diff, distance))
direction_halite_diff_distance = {}
for d in direction_halite_diff_distance_raw:
vals = np.array(direction_halite_diff_distance_raw[d])
if vals.size:
diffs = vals[:, 0]
distances = vals[:, 1]
max_diff = diffs.max()
if max_diff > 0:
if can_stay_still_zero_halite:
greater_min_distance = distances[diffs > 0].min()
else:
# My halite is > 0 and I have a threat at D1 of an aggressive equal
# halite ships and a threat of a less halite ship at D2
greater_min_distance = distances[diffs >= 0].min()
direction_halite_diff_distance[d] = (max_diff, greater_min_distance)
elif max_diff == 0:
equal_min_distance = distances[diffs == 0].min()
direction_halite_diff_distance[d] = (max_diff, equal_min_distance)
else:
min_diff = diffs.min()
min_diff_min_distance = distances[diffs == min_diff].min()
direction_halite_diff_distance[d] = (min_diff, min_diff_min_distance)
else:
direction_halite_diff_distance[d] = None
preferred_directions = []
strongly_preferred_directions = []
valid_directions = copy.copy(MOVE_DIRECTIONS)
one_step_valid_directions = copy.copy(MOVE_DIRECTIONS)
bad_directions = []
ignore_catch = np_rng.uniform() < config['ignore_catch_prob']
# if observation['step'] == 221 and ship_k == '54-1':
# import pdb; pdb.set_trace()
# x=1
for direction, halite_diff_dist in direction_halite_diff_distance.items():
if halite_diff_dist is not None:
move_row, move_col = move_ship_row_col(row, col, direction, grid_size)
no_escape_bonus = 0 if not (
boxed_in_attack_squares[move_row, move_col]) else 5e3
halite_diff = halite_diff_dist[0]
if halite_diff >= 0:
# I should avoid a collision
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_RUN[(row, col)][direction])
valid_directions.remove(direction)
one_step_valid_directions.remove(direction)
bad_directions.append(direction)
if halite_diff_dist[1] == 1:
if halite_diff > 0 or not can_stay_still_zero_halite:
# Only suppress the stay still action if the opponent has something
# to gain.
# Exception: the opponent may aggressively attack my zero halite
# ships
if None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
else:
mask_collect_return[row, col] = False
# I can safely mine halite at the current square if the opponent ship
# is >1 move away
if halite_diff_dist[1] > 1:
mask_collect_return[row, col] = False
collect_grid_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['collect_run_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['return_base_run_opponent_multiplier'])
base_nearby_in_direction_mask = np.logical_and(
ROW_COL_MAX_DISTANCE_MASKS[(row, col, 2)], mask_collect_return)
base_nearby_in_direction = np.logical_and(
base_nearby_in_direction_mask, opponent_bases).sum() > 0
if not ignore_bad_attack_directions and not base_nearby_in_direction:
attack_base_scores -= mask_collect_return*(ship_halite+spawn_cost)*(
config['attack_base_run_opponent_multiplier'])
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*(ship_halite+spawn_cost)*(
config['establish_base_run_opponent_multiplier'])
elif halite_diff < 0 and (
not ignore_catch or no_escape_bonus > 0) and (not (
move_row, move_col) in ignore_convert_positions):
# I would like a collision unless if there is another opponent ship
# chasing me - risk avoiding policy for now: if there is at least
# one ship in a direction that has less halite, I should avoid it
if no_escape_bonus > 0:
halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
else:
halite_diff = 0 # Dubious choice, likely not very important
# halite_diff = max(-spawn_cost/2, halite_diff) - no_escape_bonus
distance_multiplier = 1/halite_diff_dist[1]
mask_collect_return = np.copy(HALF_PLANES_CATCH[(row, col)][direction])
collect_grid_scores -= mask_collect_return*halite_diff*(
config['collect_catch_opponent_multiplier'])*distance_multiplier
return_to_base_scores -= mask_collect_return*halite_diff*(
config['return_base_catch_opponent_multiplier'])*distance_multiplier
attack_base_scores -= mask_collect_return*halite_diff*(
config['attack_base_catch_opponent_multiplier'])*distance_multiplier
mask_establish = np.copy(mask_collect_return)
mask_establish[row, col] = False
establish_base_scores -= mask_establish*halite_diff*(
config['establish_base_catch_opponent_multiplier'])*(
distance_multiplier)
if no_escape_bonus > 0:
strongly_preferred_directions.append(direction)
if boxed_in_attack_squares[row, col] and no_escape_bonus > 0 and (
ship_halite > 0 or obs_halite[row, col] == 0):
# Also incentivize the None action when it is a possible escape
# square of an opponent - divide by 2 to make the None action less
# dominant (likely check in several directions)
collect_grid_scores[row, col] += no_escape_bonus/2
if not None in strongly_preferred_directions:
strongly_preferred_directions.append(None)
preferred_directions.append(direction)
if take_my_square_next_halite_diff is not None and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
bad_directions.append(None)
if drop_None_valid and None in valid_directions:
valid_directions.remove(None)
one_step_valid_directions.remove(None)
valid_non_base_directions = []
base_directions = []
for d in valid_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if not opponent_bases[move_row, move_col] :
valid_non_base_directions.append(d)
else:
base_directions.append(d)
# For the remaining valid non base directions: compute a score that resembles
# the probability of being boxed in during the next step
two_step_bad_directions = []
n_step_bad_directions = []
n_step_bad_directions_die_probs = {}
if steps_remaining > 1:
for d in valid_non_base_directions:
my_next_halite = halite_ships[row, col] if d != None else (
halite_ships[row, col] + int(collect_rate*obs_halite[row, col]))
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
my_next_halite = 0 if my_bases[move_row, move_col] else my_next_halite
opponent_mask = ROW_COL_MAX_DISTANCE_MASKS[(move_row, move_col, 3)]
less_halite_threat_opponents = np.where(np.logical_and(
opponent_mask, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships = less_halite_threat_opponents[0].size
if num_threat_ships > 1 and not d in safe_return_base_directions:
all_dir_threat_counter = {
(-1, 0): 0, (1, 0): 0, (0, -1): 0, (0, 1): 0, (0, 0): 0}
for i in range(num_threat_ships):
other_row = less_halite_threat_opponents[0][i]
other_col = less_halite_threat_opponents[1][i]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
for diff_rel_row, diff_rel_col, other_gather in MOVE_GATHER_OPTIONS:
# Only consider sensible opponent actions
if (diff_rel_row, diff_rel_col) in opponent_ships_sensible_actions[
other_row, other_col]:
is_threat = (not other_gather) or (my_next_halite > (
halite_ships[other_row, other_col] + int(
collect_rate*obs_halite[other_row, other_col])))
if is_threat:
other_rel_row = relative_other_pos[0] + diff_rel_row
other_rel_col = relative_other_pos[1] + diff_rel_col
move_diff = np.abs(other_rel_row) + np.abs(other_rel_col)
if move_diff < 3 and move_diff > 0:
threat_dirs = TWO_STEP_THREAT_DIRECTIONS[
(other_rel_row, other_rel_col)]
for threat_row_diff, threat_col_diff in threat_dirs:
all_dir_threat_counter[
(threat_row_diff, threat_col_diff)] += 1
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
# Aggregate the threat count in all_dir_threat_counter
threat_counts = np.array(list(all_dir_threat_counter.values()))
threat_score = np.sqrt(threat_counts.prod())
if threat_score > 0:
# Disincentivize an action that can get me boxed in on the next step
mask_avoid_two_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_two_steps[row, col] = False
collect_grid_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
return_to_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
establish_base_scores[mask_avoid_two_steps] *= ((
config['two_step_avoid_boxed_opponent_multiplier_base']) ** (
threat_score))
two_step_bad_directions.append(d)
if d not in two_step_bad_directions and not end_game_base_return and (
my_next_halite > 0) and (not d in safe_return_base_directions) and (
d is not None or not safe_to_collect[row, col]):
# For the remaining valid directions: compute a score that resembles
# the probability of being boxed in sometime in the future
opponent_mask_lt = ROW_COL_MAX_DISTANCE_MASKS[
(move_row, move_col, min(
steps_remaining, config['n_step_avoid_window_size']))]
less_halite_threat_opponents_lt = np.where(np.logical_and(
opponent_mask_lt, np.logical_and(
opponent_ships, my_next_halite > halite_ships)))
num_threat_ships_lt = less_halite_threat_opponents_lt[0].size
# Ignore the box in threat if I have a base and at least one zero
# halite ship one step from the move square
ignore_threat = my_bases[
ROW_COL_DISTANCE_MASKS[(move_row, move_col, 1)]].sum() > 0 and ((
halite_ships[np.logical_and(
my_ships,
ROW_COL_DISTANCE_MASKS[move_row, move_col, 1])] == 0).sum() > 0)
# if observation['step'] == 359 and ship_k == '67-1':
# import pdb; pdb.set_trace()
if not ignore_threat:
lt_catch_prob = {k: [] for k in RELATIVE_NOT_NONE_DIRECTIONS}
for i in range(num_threat_ships_lt):
other_row = less_halite_threat_opponents_lt[0][i]
other_col = less_halite_threat_opponents_lt[1][i]
other_sensible_actions = opponent_ships_sensible_actions[
other_row, other_col]
relative_other_pos = get_relative_position(
move_row, move_col, other_row, other_col, grid_size)
# Give less weight to the other ship if there is a base of mine or
# a/multiple less halite ships in between
# FUTURE WORK: Also give additional move leeway if I have nearby
# bases? Especially relevant for None (collect) actions
distance_move_other = np.abs(relative_other_pos).sum()
mask_between_move_and_threat = np.logical_and(
DISTANCES[(move_row, move_col)] < distance_move_other,
DISTANCES[(other_row, other_col)] < distance_move_other)
less_halite_ship_base_count = np.logical_and(
np.logical_and(my_bases_or_ships, mask_between_move_and_threat),
halite_ships <= halite_ships[other_row, other_col]).sum() + 0*(
my_bases[ROW_COL_MAX_DISTANCE_MASKS[
move_row, move_col, 2]].sum())
my_material_defense_multiplier = 2**less_halite_ship_base_count
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
nz_dim = int(threat_dir[0] == 0)
dir_offset = relative_other_pos[nz_dim]*threat_dir[nz_dim]
other_dir_abs_offset = np.abs(relative_other_pos[1-nz_dim])
# if observation['step'] == 155 and ship_k == '63-2':
# import pdb; pdb.set_trace()
if dir_offset >= 0 and (other_dir_abs_offset-1) <= dir_offset:
# Ignore the threat if the ship is on the diagonal and can not
# move in the direction of the threat dir
if (other_dir_abs_offset-1) == dir_offset and len(
other_sensible_actions) < len(MOVE_DIRECTIONS):
if nz_dim == 0:
threat_other_dir = (
0, 1 if relative_other_pos[1-nz_dim] < 0 else -1)
else:
threat_other_dir = (
1 if relative_other_pos[1-nz_dim] < 0 else -1, 0)
threat_other_dirs = [threat_other_dir, threat_dir]
threats_actionable = np.array([
t in other_sensible_actions for t in threat_other_dirs])
consider_this_threat = np.any(threats_actionable)
if threats_actionable[1] and not threats_actionable[0]:
# Lower the threat weight - the opponent can not directly
# attack the considered threat direction and can only move
# along the threat direction
other_dir_abs_offset += 2
else:
consider_this_threat = True
if other_dir_abs_offset == 0 and dir_offset == 0:
# The scenario where a one step threat is ignored due to
# being chased for a while and moving to the threat is
# currently considered.
# This avoids division by zero but is overridden later anyway
other_dir_abs_offset = 2
if consider_this_threat:
lt_catch_prob[threat_dir].append(max(2,
other_dir_abs_offset+dir_offset)*(
my_material_defense_multiplier))
# Add a "bootstrapped" catch probability using the density of the
# players towards the edge of the threat direction
# Only add it if the next halite is > 0 (otherwise assume I can
# always escape)
# Also factor in the distance to my nearest non abandoned base
if my_next_halite > 0:
current_nearest_base_distance = my_nearest_base_distances[row, col]
moved_nearest_base_distance = my_nearest_base_distances[
move_row, move_col]
move_distance_difference = current_nearest_base_distance - (
moved_nearest_base_distance)
for threat_dir in RELATIVE_NOT_NONE_DIRECTIONS:
dens_threat_rows = np.mod(move_row + threat_dir[0]*(
np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
dens_threat_cols = np.mod(move_col + threat_dir[1]*(
1+np.arange(config['n_step_avoid_window_size']//2,
config['n_step_avoid_window_size'])), grid_size)
escape_probs = escape_influence_probs[
dens_threat_rows, dens_threat_cols]
mean_escape_prob = escape_probs.mean()
if escape_probs[:2].min() < 1:
if move_distance_difference > 0:
# When in trouble, it is typically better to move towards one
# of my bases. The move closer distance is of course 1.
mean_escape_prob *= 1.25
if mean_escape_prob < 1:
lt_catch_prob[threat_dir].append(1/(1-mean_escape_prob+1e-9))
# if observation['step'] == 75 and ship_k == '64-1' and d in [
# EAST, WEST]:
# import pdb; pdb.set_trace()
# if observation['step'] == 112 and ship_k == '76-1':
# import pdb; pdb.set_trace()
if np.all([len(v) > 0 for v in lt_catch_prob.values()]):
# Interpretation: for a threat at distance d, I have a probability
# of surviving it of (d-1)/d. The probability of surviving all
# threat is the product of all individual threats
survive_probs = np.array([
(np.maximum(0.2, (np.array(lt_catch_prob[k])-1)/np.array(
lt_catch_prob[k]))).prod() for k in lt_catch_prob])
min_die_prob = 1-survive_probs.max()
if main_base_distances.max() > 0:
if main_base_distances[move_row, move_col] <= 2:
min_die_prob = 0
else:
min_die_prob = max(
0, min_die_prob-0.33**main_base_distances[
move_row, move_col])
# if observation['step'] == 155 and ship_k in ['63-2', '63-1']:
# import pdb; pdb.set_trace()
# Disincentivize an action that can get me boxed in during the next
# N steps
mask_avoid_n_steps = np.copy(HALF_PLANES_RUN[(row, col)][d])
if d is not None:
mask_avoid_n_steps[row, col] = False
collect_grid_scores[mask_avoid_n_steps] *= ((
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob))
return_to_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
establish_base_scores[mask_avoid_n_steps] *= (
config['n_step_avoid_boxed_opponent_multiplier_base']) ** (
min_die_prob)
n_step_bad_directions_die_probs[d] = min_die_prob
# Correction to act with more risk towards the end of the game
die_prob_cutoff = (n_step_avoid_min_die_prob_cutoff + 0.01*max(
0, 50-steps_remaining))
if d is None:
if observation['relative_step'] > config[
'end_hunting_season_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'after_hunting_season_collect_max_n_step_risk'])
elif observation['relative_step'] > config[
'late_hunting_season_more_collect_relative_step']:
die_prob_cutoff = max(die_prob_cutoff, config[
'late_hunting_season_collect_max_n_step_risk'])
# print(observation['step'], die_prob_cutoff)
if min_die_prob > die_prob_cutoff:
n_step_bad_directions.append(d)
# if observation['step'] == 215 and ship_k == '2-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a zero halite square: compute the risk for all available
# actions and only retain the actions with the lowest collision risks
if halite_ships[row, col] == 0 and len(valid_directions) == 0 and (
obs_halite[row, col] == 0):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
if best_risk_score < 0.05:
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
else:
valid_directions = [None]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# if observation['step'] == 169 and ship_k == '65-2':
# import pdb; pdb.set_trace()
# Corner case: if I have a zero halite ship that is boxed in by other zero
# halite ships on a non-zero halite square: prefer moving in directions where
# there is a lower risk of losing the ship as a function of opponent zero
# halite behavior
if halite_ships[row, col] == 0 and obs_halite[row, col] > 0 and (
(len(valid_directions) == 1 and (valid_directions[0] is None)) or (
len(valid_directions) == 0)):
risk_scores = np.zeros(len(MOVE_DIRECTIONS))
risk_scores[0] = 1 # Definitely don't stand still
for risk_id, d in enumerate(MOVE_DIRECTIONS):
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
for potential_threat_dir in MOVE_DIRECTIONS:
threat_row, threat_col = move_ship_row_col(
move_row, move_col, potential_threat_dir, grid_size)
if opponent_ships[threat_row, threat_col] and halite_ships[
threat_row, threat_col] == 0:
opponent_id = player_ids[threat_row, threat_col]
is_near_base = nearest_base_distances[
threat_row, threat_col] <= config['log_near_base_distance']
distance = int(d is not None) + int(potential_threat_dir is not None)
risk_lookup_k = str(is_near_base) + '_' + str(distance)
risk_scores[risk_id] = max(
risk_scores[risk_id], history['zero_halite_move_behavior'][
opponent_id][risk_lookup_k])
best_risk_score = risk_scores.min()
valid_directions = [d for d_id, d in enumerate(
MOVE_DIRECTIONS) if risk_scores[d_id] == best_risk_score]
one_step_valid_directions = copy.copy(valid_directions)
bad_directions = list(set(MOVE_DIRECTIONS) - set(valid_directions))
# Treat attack squares I should avoid with a zero halite ship as N-step bad
# directions, if that leaves us with options
if np.any(avoid_attack_squares_zero_halite) and halite_ships[
row, col] == 0 and steps_remaining > 1:
avoid_attack_directions = []
for d in valid_non_base_directions:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if avoid_attack_squares_zero_halite[move_row, move_col]:
avoid_attack_directions.append(d)
if len(avoid_attack_directions):
all_bad_dirs = set(bad_directions + (
two_step_bad_directions + n_step_bad_directions))
updated_bad_dirs = all_bad_dirs.union(set(avoid_attack_directions))
if len(updated_bad_dirs) > len(all_bad_dirs) and len(
updated_bad_dirs) < len(MOVE_DIRECTIONS):
new_bad_directions = list(updated_bad_dirs.difference(all_bad_dirs))
# import pdb; pdb.set_trace()
n_step_bad_directions.extend(new_bad_directions)
for new_bad_dir in new_bad_directions:
if not new_bad_dir in n_step_bad_directions_die_probs:
n_step_bad_directions_die_probs[new_bad_dir] = 0
# Corner case: if I can replace a chaser position and there are only very
# bad two step escape directions left: replace the chaser
if take_my_next_square_dir is not None and (
take_my_next_square_dir in two_step_bad_directions):
make_chase_replace_n_bad = True
for d in NOT_NONE_DIRECTIONS:
if not d == take_my_next_square_dir:
if d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] < 0.6:
make_chase_replace_n_bad = False
break
elif d in valid_directions:
make_chase_replace_n_bad = False
break
if make_chase_replace_n_bad:
print("CHASE: turning two step bad into n step bad", observation['step'],
row, col)
two_step_bad_directions.remove(take_my_next_square_dir)
# Treat the chasing - replace chaser position as an n-step bad action.
# Otherwise, we can get trapped in a loop of dumb behavior.
if take_my_next_square_dir is not None and not take_my_next_square_dir in (
two_step_bad_directions) and not take_my_next_square_dir in (
n_step_bad_directions):
n_step_bad_directions.append(take_my_next_square_dir)
n_step_bad_directions_die_probs[take_my_next_square_dir] = 1/4
# If all valid non base directions are n step bad actions: drop n step bad
# actions (call them 2 step bad) that are significantly worse than other n
# step bad actions
all_original_n_step_bad_directions = copy.copy(n_step_bad_directions)
all_n_step_bad_directions_die_probs = copy.copy(
n_step_bad_directions_die_probs)
if len(n_step_bad_directions) > 1 and len(
n_step_bad_directions) == len(valid_non_base_directions) and np.all(
np.array([d in n_step_bad_directions for d in (
valid_non_base_directions)])):
die_probs = np.array(list(n_step_bad_directions_die_probs.values()))
max_die_prob = min(die_probs.min()*2, die_probs.min()+0.1)
delete_from_n_step_bad = []
for d in n_step_bad_directions:
if n_step_bad_directions_die_probs[d] > max_die_prob and (
not d in safe_return_base_directions):
delete_from_n_step_bad.append(d)
for d in delete_from_n_step_bad:
two_step_bad_directions.append(d)
n_step_bad_directions.remove(d)
del n_step_bad_directions_die_probs[d]
if valid_non_base_directions:
valid_not_preferred_dirs = list(set(
two_step_bad_directions + n_step_bad_directions))
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
# Drop 2 and n step bad directions if that leaves us with valid options
bad_directions.extend(valid_not_preferred_dirs)
bad_directions = list(set(bad_directions))
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
else:
# Drop 2 step bad directions if that leaves us with valid options
valid_not_preferred_dirs = set(two_step_bad_directions)
if valid_not_preferred_dirs and (
len(valid_non_base_directions) - len(valid_not_preferred_dirs)) > 0:
bad_directions.extend(valid_not_preferred_dirs)
valid_directions = list(
set(valid_directions) - set(valid_not_preferred_dirs))
# Only keep the strongly preferred directions if there are any
if len(strongly_preferred_directions) > 0:
preferred_directions = strongly_preferred_directions
# Drop repetitive actions if that leaves us with valid options
if ship_k in history['avoid_cycle_actions']:
repetitive_action = history['avoid_cycle_actions'][ship_k]
if repetitive_action in valid_directions and len(valid_directions) > 1:
valid_directions.remove(repetitive_action)
if repetitive_action in preferred_directions:
preferred_directions.remove(repetitive_action)
if repetitive_action in one_step_valid_directions:
one_step_valid_directions.remove(repetitive_action)
bad_directions.append(repetitive_action)
# if observation['step'] == 180 and ship_k == '10-2':
# import pdb; pdb.set_trace()
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, preferred_directions, valid_directions,
len(bad_directions) == len(MOVE_DIRECTIONS), two_step_bad_directions,
n_step_bad_directions, one_step_valid_directions,
n_step_bad_directions_die_probs, all_original_n_step_bad_directions,
all_n_step_bad_directions_die_probs)
# Update the scores as a function of blocking opponent bases
def update_scores_blockers(
collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, row, col, grid_size, blockers,
blocker_max_distances_to_consider, valid_directions,
one_step_valid_directions, early_base_direct_dir=None,
blocker_max_distance=half_distance_mask_dim, update_attack_base=True):
one_step_bad_directions = []
for d in NOT_NONE_DIRECTIONS:
if d == NORTH:
rows = np.mod(row - (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == SOUTH:
rows = np.mod(row + (1 + np.arange(blocker_max_distance)), grid_size)
cols = np.repeat(col, blocker_max_distance)
considered_vals = blockers[rows, col]
considered_max_distances = blocker_max_distances_to_consider[rows, col]
elif d == WEST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col - (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
elif d == EAST:
rows = np.repeat(row, blocker_max_distance)
cols = np.mod(col + (1 + np.arange(blocker_max_distance)), grid_size)
considered_vals = blockers[row, cols]
considered_max_distances = blocker_max_distances_to_consider[row, cols]
if d == early_base_direct_dir:
considered_vals[0] = 1
is_blocking = np.logical_and(considered_vals, np.arange(
blocker_max_distance) < considered_max_distances)
if np.any(is_blocking):
first_blocking_id = np.where(is_blocking)[0][0]
mask_rows = rows[first_blocking_id:]
mask_cols = cols[first_blocking_id:]
collect_grid_scores[mask_rows, mask_cols] = -1e12
return_to_base_scores[mask_rows, mask_cols] = -1e12
establish_base_scores[mask_rows, mask_cols] = -1e12
if update_attack_base:
attack_base_scores[mask_rows, mask_cols] = -1e12
if first_blocking_id == 0:
one_step_bad_directions.append(d)
if d in valid_directions:
valid_directions.remove(d)
if d in one_step_valid_directions:
one_step_valid_directions.remove(d)
# Lower the score for entire quadrants when the two quadrant directions are
# blocking the movement
num_bad_one_directions = len(one_step_bad_directions)
if num_bad_one_directions > 1:
for i in range(num_bad_one_directions-1):
bad_direction_1 = one_step_bad_directions[i]
for j in range(i+1, num_bad_one_directions):
bad_direction_2 = one_step_bad_directions[j]
if (bad_direction_1 in [NORTH, SOUTH]) != (
bad_direction_2 in [NORTH, SOUTH]):
bad_quadrant_mask = np.logical_and(
HALF_PLANES_CATCH[row, col][bad_direction_1],
HALF_PLANES_CATCH[row, col][bad_direction_2])
collect_grid_scores[bad_quadrant_mask] = -1e12
return_to_base_scores[bad_quadrant_mask] = -1e12
establish_base_scores[bad_quadrant_mask] = -1e12
if update_attack_base:
attack_base_scores[bad_quadrant_mask] = -1e12
# Additional logic for the use of avoiding collisions when there is only a
# single escape direction
if blockers[row, col]:
collect_grid_scores[row, col] = -1e12
return_to_base_scores[row, col] = -1e12
establish_base_scores[row, col] = -1e12
attack_base_scores[row, col] = -1e12
if None in valid_directions:
valid_directions.remove(None)
if None in one_step_valid_directions:
one_step_valid_directions.remove(None)
return (collect_grid_scores, return_to_base_scores, establish_base_scores,
attack_base_scores, valid_directions, one_step_valid_directions,
one_step_bad_directions)
def set_scores_single_nearby_zero(scores, nearby, size, ship_row, ship_col,
nearby_distance=1):
nearby_pos = np.where(nearby)
row = nearby_pos[0][0]
col = nearby_pos[1][0]
next_nearby_pos = None
drop_None_valid = False
for i in range(-nearby_distance, nearby_distance+1):
near_row = (row + i) % size
for j in range(-nearby_distance, nearby_distance+1):
near_col = (col + j) % size
if i != 0 or j != 0:
# Don't gather near the base and don't move on top of it
scores[near_row, near_col] = -1e7
if near_row == ship_row and near_col == ship_col:
next_nearby_pos = get_dir_from_target(
ship_row, ship_col, row, col, size)[0]
else:
if near_row == ship_row and near_col == ship_col:
# Don't stay on top of the base
drop_None_valid = True
return scores, next_nearby_pos, drop_None_valid
def grid_distance(r1, c1, r2, c2, size):
horiz_diff = c2-c1
horiz_distance = min(np.abs(horiz_diff),
min(np.abs(horiz_diff-size), np.abs(horiz_diff+size)))
vert_diff = r2-r1
vert_distance = min(np.abs(vert_diff),
min(np.abs(vert_diff-size), np.abs(vert_diff+size)))
return horiz_distance+vert_distance
def override_early_return_base_scores(
base_return_grid_multiplier, my_bases, ship_row, ship_col, my_ship_count):
base_pos = np.where(my_bases)
base_row = base_pos[0][0]
base_col = base_pos[1][0]
dist_to_base = DISTANCES[base_row, base_col][ship_row, ship_col]
# Remember the rule that blocks spawning when a ship is about to return
if dist_to_base <= 10-my_ship_count:
base_return_grid_multiplier[base_row, base_col] = 0
return base_return_grid_multiplier
def get_nearest_base_distances(grid_size, ignore_abandoned, observation):
base_dms = []
base_distances = []
# for b in player_obs[1]:
# row, col = row_col_from_square_grid_pos(player_obs[1][b], grid_size)
# if not (row, col) in ignore_abandoned:
# base_dms.append(DISTANCE_MASKS[(row, col)])
# base_distances.append(DISTANCES[(row, col)])
my_bases = np.copy(observation['rewards_bases_ships'][0][1])
for r, c in ignore_abandoned:
my_bases[r, c] = 0
num_my_bases = my_bases.sum()
if num_my_bases > 0:
my_base_positions = np.where(my_bases)
for base_id in range(num_my_bases):
base_row = my_base_positions[0][base_id]
base_col = my_base_positions[1][base_id]
base_dms.append(DISTANCE_MASKS[(base_row, base_col)])
base_distances.append(DISTANCES[(base_row, base_col)])
if base_dms:
base_nearest_distance_scores = np.stack(base_dms).max(0)
all_base_distances = np.stack(base_distances)
else:
base_nearest_distance_scores = np.ones((grid_size, grid_size))
all_base_distances = 99*np.ones((1, grid_size, grid_size))
nearest_base_distances = np.min(all_base_distances, 0)
return (base_nearest_distance_scores, all_base_distances,
nearest_base_distances)
def get_valid_opponent_ship_actions(
config, rewards_bases_ships, halite_ships, size, history,
nearest_base_distances, observation, env_config):
opponent_ships_sensible_actions = {}
opponent_ships_sensible_actions_no_risk = {}
boxed_in_zero_halite_opponents = []
likely_convert_opponent_positions = []
possible_convert_opponent_positions = []
num_agents = len(rewards_bases_ships)
convert_cost = env_config.convertCost
stacked_bases = np.stack([rbs[1] for rbs in rewards_bases_ships])
stacked_ships = np.stack([rbs[2] for rbs in rewards_bases_ships])
num_players = stacked_ships.shape[0]
grid_size = stacked_ships.shape[1]
player_base_ids = -1*np.ones((grid_size, grid_size))
boxed_in_attack_squares = np.zeros((grid_size, grid_size), dtype=np.bool)
boxed_in_opponent_ids = -1*np.ones((grid_size, grid_size), dtype=np.int)
opponent_single_escape_pos = np.zeros(
(grid_size, grid_size), dtype=np.bool)
single_escape_mapping = {}
for i in range(num_players):
player_base_ids[stacked_bases[i]] = i
for i in range(1, num_agents):
opponent_ships = stacked_ships[i]
enemy_ships = np.delete(stacked_ships, (i), axis=0).sum(0)
ship_pos = np.where(opponent_ships)
num_ships = ship_pos[0].size
for j in range(num_ships):
valid_rel_directions = copy.copy(RELATIVE_DIRECTIONS)
valid_rel_directions_no_move_risk = copy.copy(RELATIVE_DIRECTIONS)
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
for row_diff in range(-2, 3):
for col_diff in range(-2, 3):
distance = (np.abs(row_diff) + np.abs(col_diff))
if distance == 1 or distance == 2:
other_row = (row + row_diff) % size
other_col = (col + col_diff) % size
if enemy_ships[other_row, other_col]:
hal_diff = halite_ships[other_row, other_col] - ship_halite
# if observation['step'] == 189 and row == 14 and col == 2:
# import pdb; pdb.set_trace()
ignores_move_collision = False
risky_stay_still_collision = False
if halite_ships[row, col] == halite_ships[
other_row, other_col]:
# Note: use the opponent distance because the opponent model is
# learned using the opponent distance to the nearest base (with
# near base distance cutoff typically at 2)
is_near_base = nearest_base_distances[
other_row, other_col] <= config['log_near_base_distance']
risk_lookup_k = str(is_near_base) + '_' + str(distance) + (
'_ever_risky')
if distance == 2:
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
else:
risk_lookup_k_dist_zero = str(is_near_base) + '_' + str(
0) + '_ever_risky'
risky_stay_still_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k]
ignores_move_collision = history[
'zero_halite_move_behavior'][i][risk_lookup_k_dist_zero]
# if ignores_move_collision and distance == 1:
# import pdb; pdb.set_trace()
# x=1
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
if not ignores_move_collision:
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 13 and col == 13:
# import pdb; pdb.set_trace()
# Don't check for risky opponent zero halite behavior
rem_dirs = []
if risky_stay_still_collision:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff <= 0 else []
else:
rem_dirs += [(0, 0)] if distance == 1 and hal_diff < 0 else []
rem_dirs += [(-1, 0)] if row_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(1, 0)] if row_diff > 0 and hal_diff <= 0 else []
rem_dirs += [(0, -1)] if col_diff < 0 and hal_diff <= 0 else []
rem_dirs += [(0, 1)] if col_diff > 0 and hal_diff <= 0 else []
for d in rem_dirs:
if d in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(d)
# Prune for opponent base positions
rem_dirs = []
for rel_dir in valid_rel_directions:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions.remove(d)
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
rem_dirs = []
for rel_dir in valid_rel_directions_no_move_risk:
d = RELATIVE_DIR_TO_DIRECTION_MAPPING[rel_dir]
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
move_base_id = player_base_ids[move_row, move_col]
if move_base_id >= 0 and move_base_id != i:
rem_dirs.append(rel_dir)
for d in rem_dirs:
valid_rel_directions_no_move_risk.remove(d)
if len(valid_rel_directions) == 0:
player_halite_budget = observation['rewards_bases_ships'][i][0]
if ((ship_halite + player_halite_budget) >= convert_cost):
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][1]:
likely_convert_opponent_positions.append((row, col))
if ship_halite >= history['inferred_boxed_in_conv_threshold'][i][0]:
possible_convert_opponent_positions.append((row, col))
if ship_halite > 0:
for d in MOVE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
boxed_in_attack_squares[move_row, move_col] = True
boxed_in_opponent_ids[move_row, move_col] = i
if ship_halite == 0 and len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)):
boxed_in_zero_halite_opponents.append((row, col))
if len(valid_rel_directions_no_move_risk) == 1:
escape_dir = RELATIVE_DIR_TO_DIRECTION_MAPPING[
valid_rel_directions_no_move_risk[0]]
escape_square = move_ship_row_col(row, col, escape_dir, grid_size)
opponent_single_escape_pos[escape_square] = 1
single_escape_mapping[(row, col)] = escape_square
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Do another pass over the zero halite ships to figure if they are boxed in
# by the escape squares of their own non zero halite ships - these ships
# will very likely take risky actions and should therefore be avoided
if np.any(opponent_single_escape_pos):
for j in range(num_ships):
row = ship_pos[0][j]
col = ship_pos[1][j]
ship_halite = halite_ships[row, col]
if ship_halite == 0:
valid_rel_directions = opponent_ships_sensible_actions[(row, col)]
valid_rel_directions_no_move_risk = (
opponent_ships_sensible_actions_no_risk[row, col])
# if observation['step'] == 146 and row == 15 and col == 12:
# import pdb; pdb.set_trace()
# if observation['step'] == 146 and row == 14 and col == 13:
# import pdb; pdb.set_trace()
for d in NOT_NONE_DIRECTIONS:
move_row, move_col = move_ship_row_col(row, col, d, grid_size)
if opponent_single_escape_pos[move_row, move_col]:
my_escape_square = False
if (row, col) in single_escape_mapping:
my_escape_square = (move_row, move_col) == (
single_escape_mapping[row, col])
if my_escape_square:
# Still treat it as a bad direction if there is another ship
# that has my escape square as it's only escape square
num_escape_count = np.array(
[v == (move_row, move_col) for v in (
single_escape_mapping.values())]).sum()
my_escape_square = num_escape_count == 1
if not my_escape_square:
avoid_rel_direction = RELATIVE_DIR_MAPPING[d]
if avoid_rel_direction in valid_rel_directions:
valid_rel_directions.remove(avoid_rel_direction)
if avoid_rel_direction in valid_rel_directions_no_move_risk:
valid_rel_directions_no_move_risk.remove(avoid_rel_direction)
if (len(valid_rel_directions_no_move_risk) == 0 or (
len(valid_rel_directions_no_move_risk) == 1 and (
valid_rel_directions_no_move_risk[0] == (0, 0)))) and (
not (row, col) in boxed_in_zero_halite_opponents):
# print("AVOIDING chained zero halite collision",
# observation['step'], row, col)
boxed_in_zero_halite_opponents.append((row, col))
opponent_ships_sensible_actions[(row, col)] = valid_rel_directions
opponent_ships_sensible_actions_no_risk[(row, col)] = (
valid_rel_directions_no_move_risk)
return (opponent_ships_sensible_actions,
opponent_ships_sensible_actions_no_risk, boxed_in_attack_squares,
boxed_in_opponent_ids, boxed_in_zero_halite_opponents,
likely_convert_opponent_positions,
possible_convert_opponent_positions)
def scale_attack_scores_bases_ships(
config, observation, player_obs, spawn_cost, non_abandoned_base_distances,
weighted_base_mask, steps_remaining, obs_halite, halite_ships, history,
smoothed_halite, player_influence_maps,
nearest_base_distances_with_my_excluded, player_ids,
laplace_smoother_rel_ship_count=4, initial_normalize_ship_diff=10,
final_normalize_ship_diff=3):
stacked_bases = np.stack([rbs[1] for rbs in observation[
'rewards_bases_ships']])
my_bases = stacked_bases[0]
# Exclude bases that are persistently camped by opponents
for base_pos in history['my_base_not_attacked_positions']:
my_bases[base_pos] = 0
stacked_opponent_bases = stacked_bases[1:]
stacked_ships = np.stack([rbs[2] for rbs in observation[
'rewards_bases_ships']])
stacked_opponent_ships = stacked_ships[1:]
base_counts = stacked_opponent_bases.sum((1, 2))
my_ship_count = len(player_obs[2])
ship_counts = stacked_opponent_ships.sum((1, 2))
grid_size = stacked_opponent_bases.shape[1]
approximate_scores = history['current_scores']
num_players = stacked_bases.shape[0]
player_ranks = np.zeros(num_players)
for i in range(num_players):
player_ranks[i] = (approximate_scores >= approximate_scores[i]).sum()
# print(approximate_scores)
# Factor 1: an opponent with less bases is more attractive to attack
base_count_multiplier = np.where(base_counts == 0, 0, 1/(base_counts+1e-9))
# Factor 2: an opponent that is closer in score is more attractive to attack
spawn_diffs = (approximate_scores[0] - approximate_scores[1:])/spawn_cost
abs_spawn_diffs = np.abs(spawn_diffs)
currently_winning = approximate_scores[0] >= approximate_scores[1:]
approximate_score_diff = approximate_scores[0] - approximate_scores[1:]
normalize_diff = initial_normalize_ship_diff - observation['relative_step']*(
initial_normalize_ship_diff-final_normalize_ship_diff)
abs_rel_normalized_diff = np.maximum(
0, (normalize_diff-abs_spawn_diffs)/normalize_diff)
rel_score_max_y = initial_normalize_ship_diff/normalize_diff
rel_score_multiplier = abs_rel_normalized_diff*rel_score_max_y
# Factor 3: an opponent with less ships is more attractive to attack since it
# is harder for them to defend the base
rel_ship_count_multiplier = (my_ship_count+laplace_smoother_rel_ship_count)/(
ship_counts+laplace_smoother_rel_ship_count)
# Additional term: attack bases nearby my main base
opponent_bases = stacked_opponent_bases.sum(0).astype(np.bool)
if opponent_bases.sum() > 0 and non_abandoned_base_distances.max() > 0:
additive_nearby_main_base = 3/max(0.15, observation['relative_step'])/(
1.5**non_abandoned_base_distances)/(
weighted_base_mask[my_bases].sum())
additive_nearby_main_base[~opponent_bases] = 0
else:
additive_nearby_main_base = 0
attack_multipliers = base_count_multiplier*rel_score_multiplier*(
rel_ship_count_multiplier)
tiled_multipliers = np.tile(attack_multipliers.reshape((-1, 1, 1)),
[1, grid_size, grid_size])
# if observation['step'] == 391:
# import pdb; pdb.set_trace()
opponent_bases_scaled = (stacked_opponent_bases*tiled_multipliers).sum(0) + (
additive_nearby_main_base)
# Compute the priority of attacking the ships of opponents
opponent_ships_scaled = np.maximum(0, 1 - np.abs(
approximate_scores[0]-approximate_scores[1:])/steps_remaining/10)
# print(observation['step'], opponent_ships_scaled, approximate_scores)
# if observation['step'] == 300:
# import pdb; pdb.set_trace()
# If I am winning by a considerable margin before the game is over, and the
# number three is far behind the number two: go ballistic on the number two
# Prefer opponent bases that are close to my bases and halite, and where the
# opponent has a relatively low density
# Make sure to guarantee some continuity with a start and stop mode
# Ballistic scenarios:
# - I am well ahead of all opponents: target the initial best agent
# - I am winning with a solid margin and the number three is far behind
# the number two: target the number two
# - I am in a close fight with the number two/one and the number three is
# very far behind: target the number two
winning_massively = np.all(spawn_diffs >= (
18-9*observation['relative_step']))
if not winning_massively:
history['ballistic_early_best_target_mode'] = False
winning_very_clearly = np.all(spawn_diffs >= (
14-7*observation['relative_step']))
winning_clearly = np.all(spawn_diffs >= (8-4*observation['relative_step']))
winning_considerably = np.all(spawn_diffs >= (
6-4*observation['relative_step'] + int(history[
'ballistic_early_best_target_mode'])))
winning_massively_near_end_game = winning_massively and observation[
'relative_step'] > 0.75
winning_massively_before_end_game = winning_massively and not (
winning_massively_near_end_game)
first_opp_id = np.argsort(spawn_diffs)[0]
second_opp_id = np.argsort(spawn_diffs)[1]
second_third_spawn_diff = spawn_diffs[second_opp_id] - spawn_diffs[
first_opp_id]
very_tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 1 and (
spawn_diffs[second_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_first = np.abs(spawn_diffs[first_opp_id]) < 3 and (
spawn_diffs[second_opp_id] >= (8-6*observation['relative_step']))
prev_ballistic_mode = history['ballistic_mode']
should_start_ballistic = (not winning_massively_before_end_game) and (
winning_clearly and second_third_spawn_diff > (
7-2*observation['relative_step']) or very_tight_fight_for_first or (
winning_massively_near_end_game)) and (
my_ship_count >= 15-max(0, 40*(observation['relative_step']-0.8)))
should_continue_ballistic = not (
winning_massively_before_end_game) and (winning_very_clearly or (
winning_clearly and (second_third_spawn_diff > 1)) or (
winning_considerably and (
second_third_spawn_diff > (2-observation['relative_step']))) or (
tight_fight_for_first)
) and (my_ship_count >= 10-max(0, 20*(observation['relative_step']-0.8)))
ballistic_mode = should_start_ballistic or (
prev_ballistic_mode and should_continue_ballistic)
# Select the next target in line if the opponent has no bases and no ships
if history['ballistic_early_best_targets_sorted'] is not None:
for opponent_id in history['ballistic_early_best_targets_sorted']:
ballistic_early_best_target_mode_target = opponent_id
num_opponent_bases = stacked_bases[opponent_id+1].sum()
num_opponent_ships = stacked_ships[opponent_id+1].sum()
if num_opponent_bases > 0 or num_opponent_ships > 0:
break
else:
ballistic_early_best_target_mode_target = first_opp_id
# print(observation['step'], ballistic_early_best_target_mode_target)
# if observation['step'] == 146:
# import pdb; pdb.set_trace()
# Ballistic early best target mode override of the opponent id: prefer to
# attack opponents that have a base which is close to one of my non
# abandoned bases
opponent_base_positions = np.where(stacked_opponent_bases.sum(0) > 0)
opponent_near_my_base_distances = nearest_base_distances_with_my_excluded[
opponent_base_positions]
targeted_base_override = None
if np.any(opponent_base_positions) and winning_very_clearly and (
opponent_near_my_base_distances.min() < 6):
prev_ballistic_target_override = history['prev_ballistic_target_override']
if history['prev_ballistic_target_override'] is not None and (
opponent_bases[prev_ballistic_target_override]):
targeted_base_override = prev_ballistic_target_override
else:
# Sort annoying bases by score: prefer to attack opponent bases that
# belong to the best opponent
smoothed_halite = smooth2d(observation['halite'])
opponent_near_my_base_scores = opponent_near_my_base_distances + 0.6*(
player_ranks[player_ids[opponent_base_positions]-1]) - 1e-9*(
smoothed_halite[opponent_base_positions])
target_base_id = np.argmin(opponent_near_my_base_scores)
targeted_base_override = (
opponent_base_positions[0][target_base_id],
opponent_base_positions[1][target_base_id])
history['prev_ballistic_target_override'] = targeted_base_override
if ballistic_mode and not prev_ballistic_mode and (
winning_massively_near_end_game):
# Switch to early best target mode - override of the target id
print(observation['step'], "Start attack on early best target",
ballistic_early_best_target_mode_target+1)
ballistic_early_best_target_mode = True
ballistic_target_id = ballistic_early_best_target_mode_target
elif ballistic_mode:
ballistic_early_best_target_mode = history[
'ballistic_early_best_target_mode'] and winning_very_clearly
if ballistic_early_best_target_mode or winning_massively_near_end_game:
# Early best target mode
ballistic_target_id = ballistic_early_best_target_mode_target
else:
# Standard ballistic mode
ballistic_target_id = first_opp_id
# print(observation['step'], "Winning massively near end?",
# winning_massively_near_end_game, ballistic_target_id)
else:
ballistic_early_best_target_mode = False
# Consider going ballistic on the nearest contender for the second place
# when the first place no longer seems possible
first_out_of_reach = spawn_diffs.min() <= (
-40+36*observation['relative_step']) # This should be conservative
if first_out_of_reach and np.abs(spawn_diffs[first_opp_id]) > np.abs(
spawn_diffs[second_opp_id]):
ballistic_target_id = second_opp_id
third_opp_id = np.argsort(spawn_diffs)[2]
spawn_diffs_not_best = np.array([spawn_diffs[i] for i in range(3) if (
not i == first_opp_id)])
winning_clearly_second = np.all(
spawn_diffs_not_best >= (8-4*observation['relative_step']))
winning_considerably_second = np.all(spawn_diffs_not_best >= (
6-4*observation['relative_step']))
third_fourth_spawn_diff = spawn_diffs[third_opp_id] - (
spawn_diffs[second_opp_id])
very_tight_fight_for_second = (
np.abs(spawn_diffs[second_opp_id]) < np.abs(
spawn_diffs[third_opp_id])/2) and (
spawn_diffs[third_opp_id] >= (12-8*observation['relative_step']))
tight_fight_for_second = (
|
np.abs(spawn_diffs[second_opp_id])
|
numpy.abs
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import os
import os.path as osp
import sys
import numpy as np
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
def get_fg_mask(detections, im_height, im_width, pad_ratio=0.05):
fg_mask = np.zeros((im_height, im_width), dtype=np.uint8)
# Filter out both hands and objects
for box in detections:
hr, wr = box[3] - box[1], box[2] - box[0]
if hr > im_height * 3 / 4 or wr > im_width * 3 / 4:
continue
ax1, ax2 = int(max(0, box[0] - wr * pad_ratio)), int(min(im_width, box[2] + wr * pad_ratio))
ay1, ay2 = int(max(0, box[1] - hr * pad_ratio)), int(min(im_height, box[3] + hr * pad_ratio))
fg_mask[ay1:ay2, ax1:ax2] = 1
return fg_mask
def propagate_flow(target, flow):
fl_height, fl_width = flow.shape[:2]
ndim = target.ndim
if ndim == 2:
target = target[..., None]
# Track mask
# Track pixels within bounding boxes
xx, yy = np.meshgrid(np.arange(fl_width), np.arange(fl_height))
# calculate x + dx
px = xx + flow[..., 0]
py = yy + flow[..., 1]
px = px.reshape((-1))
py = py.reshape((-1))
# Calc weights
x1 = np.clip(np.floor(px), 0, fl_width - 1).astype(np.int)
x2 = np.clip(np.floor(px) + 1, 0, fl_width - 1).astype(np.int)
y1 = np.clip(np.floor(py), 0, fl_height - 1).astype(np.int)
y2 = np.clip(np.floor(py) + 1, 0, fl_height - 1).astype(np.int)
a = np.expand_dims((np.floor(px) + 1 - px) * (np.floor(py) + 1 - py), 1)
b = np.expand_dims((px - np.floor(px)) * (np.floor(py) + 1 - py), 1)
c = np.expand_dims((np.floor(px) + 1 - px) * (py - np.floor(py)), 1)
d = np.expand_dims((px - np.floor(px)) * (py - np.floor(py)), 1)
result = target[y1, x1] * a + target[y1, x2] * b + target[y2, x1] * c + target[y2, x2] * d
if ndim == 2:
return result.reshape((fl_height, fl_width))
else:
return result.reshape((fl_height, fl_width, -1))
def apply_homography(pts, H):
"""
pts (N, 2)
"""
ndim = pts.ndim
if ndim == 1:
pts = pts[None]
pts = np.concatenate((pts, np.ones((len(pts), 1))), axis=1)
pts = np.dot(pts, H.T)
pts /= pts[:, 2:3]
if ndim == 1:
return pts[0, :2]
else:
return pts[:, :2]
def calc_consistency(fw_flow, bk_flow, ratio=0.01, offset=0.5):
"""
fw_flow: Ft->t+1
bg_flow: Ft+1->t
"""
fl_height, fl_width = fw_flow.shape[:2]
warped_bk_flow = propagate_flow(bk_flow, fw_flow)
loop_flow = fw_flow + warped_bk_flow
loop_mag = np.sqrt(loop_flow[..., 0] ** 2 + loop_flow[..., 1] ** 2)
fw_mag = np.sqrt(fw_flow[..., 0] ** 2 + fw_flow[..., 1] ** 2)
bk_mag = np.sqrt(warped_bk_flow[..., 0] ** 2 + warped_bk_flow[..., 1] ** 2)
consistency_map = loop_mag <= ratio * (fw_mag + bk_mag) + offset
return consistency_map
def correct_flow(flow, im_width, im_height, detections, valid_ratio=0.01):
"""
Image shuld be original detection size
"""
height, width = flow.shape[:2]
fg_mask = cv2.resize(get_fg_mask(detections, im_height, im_width), (width, height))
bg_mask = (1 - fg_mask).astype(np.bool)
if np.sum(bg_mask) < height * width * valid_ratio:
return flow # No correction due to less background mask
bg_flows = flow[bg_mask].reshape((-1, 2))
# Estimate homography given optical flows
stride = 4
xx, yy = np.meshgrid(np.arange(0, width, stride), np.arange(0, height, stride))
sgrid = np.stack((xx, yy), axis=2)
match0 = sgrid[bg_mask[::stride, ::stride]].reshape((-1, 2))
bg_flows = flow[::stride, ::stride][bg_mask[::stride, ::stride]].reshape((-1, 2))
match1 = match0 + bg_flows
if len(match1) < 100:
H = np.eye(3)
else:
# Backward homography
cv2.setNumThreads(4)
H, masks = cv2.findHomography(match1, match0, cv2.RANSAC, 10)
xx, yy = np.meshgrid(
|
np.arange(width)
|
numpy.arange
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.