prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 12:38:29 2021
@author: Ali
"""
import time
import os
import sys
import numpy as np
import tensorflow as tf
import h5py
MEM_DIR = './results'
default = False
top_only = False
np.random.seed(0)
keys = sys.argv[1]
metric = sys.argv[2]
num_images = int(sys.argv[3])
adversarials = sys.argv[4]
while True:
for adv in adversarials.split(','):
print(adv)
adversarial = (adv == 'True')
for key in keys.split(','):
bound = 'Bernstein'
truncation = 0.2
if metric == 'logit':
truncation = 3443
max_sample_size = 128
## Experiment Directory
if default:
experiment_dir = os.path.join(MEM_DIR, 'NShap/inceptionv3/{}_{}_new'.format(metric, key))
else:
if top_only:
experiment_dir = os.path.join(MEM_DIR, 'NShap/vgg/{}_{}_top_layer_new'.format(metric, key))
else:
experiment_dir = os.path.join(MEM_DIR, 'NShap/vgg/{}_{}_all_layer_new'.format(metric, key))
if not tf.io.gfile.exists(experiment_dir):
tf.io.gfile.makedirs(experiment_dir)
if max_sample_size is None or max_sample_size > num_images:
max_sample_size = num_images
experiment_name = 'cb_{}_{}_{}'.format(bound, truncation, max_sample_size)
if adversarial:
experiment_name = 'ADV' + experiment_name
cb_dir = os.path.join(experiment_dir, experiment_name)
if not tf.io.gfile.exists(cb_dir):
tf.io.gfile.makedirs(cb_dir)
##
if metric == 'accuracy':
R = 1.
elif metric == 'xe_loss':
R = np.log(1000)
elif metric == 'binary':
R = 1.
elif metric == 'logit':
R = 10.
else:
raise ValueError('Invalid metric!')
top_k = 100
delta = 0.2
## Start
if not tf.io.gfile.exists(os.path.join(experiment_dir, 'players.txt')):
print('Does not exist!')
continue
players = open(os.path.join(
experiment_dir, 'players.txt')).read().split(',')
players = np.array(players)
if not tf.io.gfile.exists(os.path.join(cb_dir, 'chosen_players_top_865_disabled.txt')):
open(os.path.join(cb_dir, 'chosen_players.txt'), 'w').write(','.join(
np.arange(len(players)).astype(str)))
results = np.sort([saved for saved in tf.io.gfile.listdir(cb_dir)
if 'agg' not in saved and '.h5' in saved])
squares, sums, counts = [np.zeros(len(players)) for _ in range(3)]
max_vals, min_vals = -np.ones(len(players)), np.ones(len(players))
for result in results:
try:
with h5py.File(os.path.join(cb_dir, result), 'r') as foo:
mem_tmc = foo['mem_tmc'][:]
except:
continue
if not len(mem_tmc):
continue
sums += np.sum((mem_tmc != -1) * mem_tmc, 0)
squares += np.sum((mem_tmc != -1) * (mem_tmc ** 2), 0)
counts +=
|
np.sum(mem_tmc != -1, 0)
|
numpy.sum
|
#!/usr/bin/env python
# https://www.researchgate.net/publication/320307716_Inverse_Kinematic_Analysis_Of_A_Quadruped_Robot
import numpy as np
class LegIK():
def __init__(self,
legtype="RIGHT",
shoulder_length=0.04,
elbow_length=0.1,
wrist_length=0.125,
hip_lim=[-0.548, 0.548],
shoulder_lim=[-2.17, 0.97],
leg_lim=[-0.1, 2.59]):
self.legtype = legtype
self.shoulder_length = shoulder_length
self.elbow_length = elbow_length
self.wrist_length = wrist_length
self.hip_lim = hip_lim
self.shoulder_lim = shoulder_lim
self.leg_lim = leg_lim
def get_domain(self, x, y, z):
"""
Calculates the leg's Domain and caps it in case of a breach
:param x,y,z: hip-to-foot distances in each dimension
:return: Leg Domain D
"""
D = (y**2 + (-z)**2 - self.shoulder_length**2 +
(-x)**2 - self.elbow_length**2 - self.wrist_length**2) / (
2 * self.wrist_length * self.elbow_length)
if D > 1 or D < -1:
# DOMAIN BREACHED
# print("---------DOMAIN BREACH---------")
D = np.clip(D, -1.0, 1.0)
return D
else:
return D
def solve(self, xyz_coord):
"""
Generic Leg Inverse Kinematics Solver
:param xyz_coord: hip-to-foot distances in each dimension
:return: Joint Angles required for desired position
"""
x = xyz_coord[0]
y = xyz_coord[1]
z = xyz_coord[2]
D = self.get_domain(x, y, z)
if self.legtype == "RIGHT":
return self.RightIK(x, y, z, D)
else:
return self.LeftIK(x, y, z, D)
def RightIK(self, x, y, z, D):
"""
Right Leg Inverse Kinematics Solver
:param x,y,z: hip-to-foot distances in each dimension
:param D: leg domain
:return: Joint Angles required for desired position
"""
wrist_angle = np.arctan2(-np.sqrt(1 - D**2), D)
sqrt_component = y**2 + (-z)**2 - self.shoulder_length**2
if sqrt_component < 0.0:
# print("NEGATIVE SQRT")
sqrt_component = 0.0
shoulder_angle = -np.arctan2(z, y) - np.arctan2(
np.sqrt(sqrt_component), -self.shoulder_length)
elbow_angle = np.arctan2(-x, np.sqrt(sqrt_component)) - np.arctan2(
self.wrist_length * np.sin(wrist_angle),
self.elbow_length + self.wrist_length * np.cos(wrist_angle))
joint_angles = np.array([-shoulder_angle, elbow_angle, wrist_angle])
return joint_angles
def LeftIK(self, x, y, z, D):
"""
Left Leg Inverse Kinematics Solver
:param x,y,z: hip-to-foot distances in each dimension
:param D: leg domain
:return: Joint Angles required for desired position
"""
wrist_angle = np.arctan2(-np.sqrt(1 - D**2), D)
sqrt_component = y**2 + (-z)**2 - self.shoulder_length**2
if sqrt_component < 0.0:
print("NEGATIVE SQRT")
sqrt_component = 0.0
shoulder_angle = -
|
np.arctan2(z, y)
|
numpy.arctan2
|
"""
We estimate the spectral norm of a concatenation of convolutional or locally connected matrices. We see that the mean is approximately the same
while the variance is greater for the convolutional matrices. We can compute the results for different numbers of input and output channels.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import circulant
def gen_mat_uncor(N,k,sigma):
mask = np.hstack( (np.ones((1,k) ), np.zeros( (1,N-k) )) )
mask = circulant(mask)
mat = np.random.normal(0,sigma,(N,N))
mat_uncor = mat*mask
return mat_uncor
def gen_mat_cor(N,k,sigma):
filters = np.hstack((np.random.normal(0,sigma,(1,k)),np.zeros((1,N-k))))
mat_cor = circulant(filters)
return mat_cor
def create_cor_tot(a,b,N,k,sigma):
mat1 = np.zeros((N*b,N*a))
for i in range(0,b):
st = gen_mat_cor(N,k,sigma)
tmp = st
for j in range(0,a-1):
app = gen_mat_cor(N,k,sigma)
tmp = np.hstack((tmp, app))
mat1[i*N:(i+1)*N,:] = tmp
return mat1
def create_uncor_tot(a,b,N,k,sigma):
mat2 = np.zeros((N * b, N * a))
for i in range(0, b):
st = gen_mat_cor(N, k, sigma)
tmp = st
for j in range(0, a - 1):
app = gen_mat_uncor(N, k, sigma)
tmp = np.hstack((tmp, app))
mat2[i * N:(i + 1) * N, :] = tmp
return mat2
def measure_total_matrix(a,b,N,k,sigma,num_trials):
sum_l2_mat1 = 0
sum_l2_mat2 = 0
norm_mat1 = np.zeros((num_trials,1))
norm_mat2 = np.zeros((num_trials,1))
max_mat1 = 0
max_mat2 = 0
min_mat1 = 0
min_mat2 = 0
for i in range(0, num_trials):
if i%100 == 0:
print('Inner Iteration: ', i)
mat1 = create_cor_tot(a, b, N, k, sigma)
mat2 = create_uncor_tot(a, b, N, k, sigma)
norm_mat1[i] = np.linalg.norm(mat1, 2)
norm_mat2[i] = np.linalg.norm(mat2, 2)
avg_l2_cor = np.average(norm_mat1)
avg_l2_uncor = np.average(norm_mat2)
max_l2_cor = np.max(norm_mat1)
max_l2_uncor = np.max(norm_mat2)
min_l2_cor = np.min(norm_mat1)
min_l2_uncor = np.min(norm_mat2)
return avg_l2_cor,max_l2_cor,min_l2_cor,avg_l2_uncor,max_l2_uncor,min_l2_uncor
def theory_uncor(a,b,k,epsilon,N):
uncor = (1+epsilon)*(np.sqrt(k*a)+np.sqrt(k*b)+5*np.sqrt(np.log(np.maximum(N*a,N*b))/np.log(1+epsilon)))
return uncor
def theory_cor(a,b,k,N):
#cor = (1.4*np.sqrt(k))*(np.sqrt(a)+np.sqrt(b)+np.sqrt(2*np.log(4*N)))
cor = 1.4*(np.sqrt(k))*(np.sqrt(a)+np.sqrt(b))
return cor
N = 100
k = 9
sigma = 1
num_trials = 100
ab_max = 5
epsilon = 0.2
avg_l2_cor = np.zeros((ab_max,1))
avg_l2_uncor = np.zeros((ab_max,1))
max_l2_cor = np.zeros((ab_max,1))
max_l2_uncor = np.zeros((ab_max,1))
min_l2_cor = np.zeros((ab_max,1))
min_l2_uncor = np.zeros((ab_max,1))
theory_l2_cor = np.zeros((ab_max,1))
theory_l2_uncor = np.zeros((ab_max,1))
for i in range(0,ab_max):
print('Outer Iteration: ', i)
a = i+2
b = i+2
avg_l2_cor[i],max_l2_cor[i],min_l2_cor[i],avg_l2_uncor[i],max_l2_uncor[i],min_l2_uncor[i] = measure_total_matrix(a, b, N, k, sigma, num_trials)
theory_l2_cor[i] = theory_cor(a,b,k,N)
theory_l2_uncor[i] = theory_uncor(a,b,k,epsilon,N)
fig, ax = plt.subplots()
ax.grid(linestyle='--',linewidth=1.5,alpha=0.5,zorder=0)
x_points =
|
np.arange(0,ab_max)
|
numpy.arange
|
import json
import tensorflow as tf
import numpy as np
import cv2
from model import INPUT_SHAPE
def class_map_road(seg):
# map class 0=anything, 1=road
return tf.where(seg == 7, [0, 1.0], [1.0, 0])
def cityscapes_prep(output_shape, input_shape=INPUT_SHAPE, class_map_func=None, float_range=True):
def prep_map(sample):
img = sample['image_left']
seg = sample['segmentation_label']
if float_range:
img /= 255
img = tf.image.resize(img, input_shape[0:2])
seg = tf.image.resize(seg, output_shape[0:2])
if callable(class_map_func):
seg = class_map_func(seg)
else:
seg = tf.keras.utils.to_categorical(seg, num_classes=output_shape[-1])
return img, seg
return prep_map
def create_labelme_segmentation(contents):
meta = json.loads(contents.numpy().decode('utf-8'))
seg =
|
np.zeros((meta['imageHeight'], meta['imageWidth']))
|
numpy.zeros
|
"""
Tests for relationship module
"""
# Copyright (c) <NAME>
# Distributed under the terms of the MIT License
# author: <NAME>
import unittest
import numpy as np
import scipy.stats
from uncertainties import unumpy as unp
from numpy.testing import assert_almost_equal, assert_equal
from uravu import utils
from uravu.relationship import Relationship
from uravu.distribution import Distribution
from uravu.axis import Axis
TEST_Y = []
for i in np.arange(1, 9, 1):
TEST_Y.append(Distribution(scipy.stats.norm.rvs(loc=i, scale=0.5, size=200)))
TEST_X = np.arange(1, 9, 1)
class TestRelationship(unittest.TestCase):
"""
Tests for the relationship module and class.
"""
def test_function_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(r.function, utils.straight_line)
def test_abscissa_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(isinstance(r.abscissa, np.ndarray), True)
assert_equal(r.abscissa, TEST_X)
def test_ordinate_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(isinstance(r.ordinate, Axis), True)
assert_equal(r.ordinate.values, TEST_Y)
def test_ordinate_no_distribution(self):
with self.assertRaises(ValueError):
Relationship(utils.straight_line, TEST_X, TEST_X)
def test_ordinate_and_ordinate_error(self):
r = Relationship(utils.straight_line, TEST_X, TEST_X, ordinate_error=[1]*len(TEST_X))
assert_equal(r.x, TEST_X)
assert_almost_equal(r.y.n, TEST_X, decimal=0)
assert_almost_equal(r.y.s, np.ones((2, len(TEST_X))) * 1.96, decimal=1)
def test_ordinate_stats(self):
test_y = []
for i in np.arange(1, 9, 1):
test_y.append(scipy.stats.lognorm(i, 1, 1))
r = Relationship(utils.straight_line, TEST_X, test_y)
assert_equal(isinstance(r.ordinate, Axis), True)
def test_ordinate_and_abscissa_different_length(self):
with self.assertRaises(ValueError):
Relationship(utils.straight_line, np.arange(1, 8, 1), TEST_Y)
def test_bounds_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y, bounds=((0, 10), (-1, 1)))
assert_equal(r.bounds, ((0, 10), (-1, 1)))
def test_variables_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_almost_equal(r.variables[0].n, 1)
assert_almost_equal(r.variables[1].n, 1)
def test_bounds_init_wrong_number_a(self):
with self.assertRaises(ValueError):
Relationship(utils.straight_line, TEST_X, TEST_Y, bounds=((0, 10), (-1, 1), (1, 2)))
def test_bounds_init_wrong_number_b(self):
with self.assertRaises(ValueError):
Relationship(utils.straight_line, TEST_X, TEST_Y, bounds=((0, 10)))
def test_variables_init_with_bounds(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y, bounds=((0, 10), (-1, 1)))
assert_equal(np.isclose(r.variables[0].n, 5, atol=0.75), True)
assert_equal(np.isclose(r.variables[1].n, 0, atol=0.5), True)
def test_ln_evidence_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(r.ln_evidence, None)
def test_mcmc_results_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(r.mcmc_results, None)
def test_nested_sampling_results_init(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(r.nested_sampling_results, None)
def test_x(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(isinstance(r.x, np.ndarray), True)
assert_equal(r.x, TEST_X)
def test_y(self):
r = Relationship(utils.straight_line, TEST_X, TEST_Y)
assert_equal(isinstance(r.y, Axis), True)
|
assert_equal(r.y.values, TEST_Y)
|
numpy.testing.assert_equal
|
# coding: utf-8
import numpy as np
import scipy.linalg as la
###############
### Hermite ###
###############
# Hermite, full matrix model
def hermite_sampler_full(N, beta=2):
size_sym_mat = int(N*(N-1)/2)
if beta==1:
A = np.random.randn(N, N)
elif beta==2:
A = np.random.randn(N, N) + 1j*np.random.randn(N, N)
elif beta==4:
X = np.random.randn(N, N) + 1j*np.random.randn(N, N)
Y = np.random.randn(N, N) + 1j*np.random.randn(N, N)
A = np.block([[X, Y],[-Y.conj(), X.conj()]])
else:
raise ValueError('Invalid beta parameter.\n'
'beta coefficient must be equal to 1, 2 or 4'
'Given beta={}'.format(beta))
# return la.eigvalsh(A+A.conj().T)
return la.eigvalsh(A+A.conj().T)/np.sqrt(2.0)
## Hermite tridiag
def hermite_sampler_tridiag(N, beta=2):
"""
.. seealso::
:cite:`DuEd02` II-C
"""
alpha_coef = np.sqrt(2)*np.random.randn(N)
beta_coef = np.random.chisquare(beta*np.arange(N-1, 0, step=-1))
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
# Semi-circle law
def semi_circle_law(x, R=2.0):
# :cite:`DuEd15` Table 1
# https://en.wikipedia.org/wiki/Wigner_semicircle_distribution
return 2/(np.pi*R**2) * np.sqrt(R**2 - x**2)
## mu_ref == normal
def mu_ref_normal_sampler_tridiag(loc=0.0, scale=1.0, beta=2, size=10):
"""
.. seealso::
:cite:`DuEd02` II-C
"""
# beta/2*[N-1, N-2, ..., 1]
b_2_Ni = 0.5*beta*np.arange(size-1, 0, step=-1)
alpha_coef = np.random.normal(loc=loc, scale=scale, size=size)
beta_coef = np.random.gamma(shape=b_2_Ni, scale=scale**2)
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
################
### Laguerre ###
################
# Laguerre, full matrix model
def laguerre_sampler_full(M, N, beta=2):
if beta==1:
A = np.random.randn(N, M)
elif beta==2:
A = np.random.randn(N, M) + 1j*np.random.randn(N, M)
elif beta==4:
X = np.random.randn(N, M) + 1j*np.random.randn(N, M)
Y = np.random.randn(N, M) + 1j*np.random.randn(N, M)
A = np.block([[X, Y],[-Y.conj(), X.conj()]])
else:
raise ValueError('Invalid beta parameter.\n'
'beta coefficient must be equal to 1, 2 or 4'
'Given beta={}'.format(beta))
return la.eigvalsh(A.dot(A.conj().T))
## Laguerre, tridiagonal model
def laguerre_sampler_tridiag(M, N, beta=2):
"""
.. seealso::
:cite:`DuEd02` III-B
"""
# M=>N
# xi_odd = xi_1, ... , xi_2N-1
xi_odd = np.random.chisquare(beta*np.arange(M, M-N, step=-1)) # odd
# xi_even = xi_0=0, xi_2, ... ,xi_2N-2
xi_even = np.zeros(N)
xi_even[1:] = np.random.chisquare(beta*np.arange(N-1, 0, step=-1)) # even
# alpha_i = xi_2i-2 + xi_2i-1
# alpha_1 = xi_0 + xi_1 = xi_1
alpha_coef = xi_even + xi_odd
# beta_i+1 = xi_2i-1 * xi_2i
beta_coef = xi_odd[:-1] * xi_even[1:]
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
# Marcenko Pastur law
def marcenko_pastur_law(x, M, N, sigma=1.0):
# https://en.wikipedia.org/wiki/Marchenko-Pastur_distribution
# M>=N
c = N/M
Lm, Lp = (sigma*(1-np.sqrt(c)))**2, (sigma*(1+np.sqrt(c)))**2
return 1.0/(2*np.pi*sigma**2) * 1.0/(c*x) *np.sqrt(np.maximum((Lp-x)*(x-Lm),0))
## mu_ref == Gamma
def mu_ref_gamma_sampler_tridiag(shape=1.0, scale=1.0, beta=2, size=10):
"""
.. seealso::
:cite:`DuEd02` III-B
"""
# beta/2*[N-1, N-2, ..., 1, 0]
b_2_Ni = 0.5*beta*np.arange(size-1,-1,step=-1)
# xi_odd = xi_1, ... , xi_2N-1
xi_odd = np.random.gamma(shape=b_2_Ni + shape, scale=scale) # odd
# xi_even = xi_0=0, xi_2, ... ,xi_2N-2
xi_even = np.zeros(size)
xi_even[1:] = np.random.gamma(shape=b_2_Ni[:-1], scale=scale) # even
# alpha_i = xi_2i-2 + xi_2i-1
# alpha_1 = xi_0 + xi_1 = xi_1
alpha_coef = xi_even + xi_odd
# beta_i+1 = xi_2i-1 * xi_2i
beta_coef = xi_odd[:-1] * xi_even[1:]
return la.eigvalsh_tridiagonal(alpha_coef, np.sqrt(beta_coef))
##############
### Jacobi ###
##############
# Jacobi, full matrix model
def jacobi_sampler_full(M_1, M_2, N, beta=2):
if beta==1:
X = np.random.randn(N, M_1)
Y = np.random.randn(N, M_2)
elif beta==2:
X = np.random.randn(N, M_1) + 1j*np.random.randn(N, M_1)
Y = np.random.randn(N, M_2) + 1j*np.random.randn(N, M_2)
elif beta==4:
X_1 = np.random.randn(N, M_1) + 1j*np.random.randn(N, M_1)
X_2 = np.random.randn(N, M_1) + 1j*np.random.randn(N, M_1)
Y_1 = np.random.randn(N, M_2) + 1j*np.random.randn(N, M_2)
Y_2 = np.random.randn(N, M_2) + 1j*
|
np.random.randn(N, M_2)
|
numpy.random.randn
|
import re
from conftest import generate_action_dist
import numpy as np
import pytest
from obp.ope import BalancedInverseProbabilityWeighting
from obp.types import BanditFeedback
# lambda_, err, description
invalid_input_of_bipw_init = [
(
"",
TypeError,
r"lambda_ must be an instance of \(<class 'int'>, <class 'float'>\), not <class 'str'>.",
),
(
None,
TypeError,
r"lambda_ must be an instance of \(<class 'int'>, <class 'float'>\), not <class 'NoneType'>.",
),
(-1.0, ValueError, "lambda_ == -1.0, must be >= 0.0."),
(np.nan, ValueError, "`lambda_` must not be nan"),
]
@pytest.mark.parametrize(
"lambda_, err, description",
invalid_input_of_bipw_init,
)
def test_bipw_init_using_invalid_inputs(
lambda_,
err,
description,
):
with pytest.raises(err, match=f"{description}*"):
_ = BalancedInverseProbabilityWeighting(
lambda_=lambda_,
)
# prepare bipw instances
bipw = BalancedInverseProbabilityWeighting()
# action_dist, action, reward, position, estimated_importance_weights, description
invalid_input_of_bipw = [
(
generate_action_dist(5, 4, 3),
None, #
np.zeros(5, dtype=int),
np.random.choice(3, size=5),
np.ones(5),
"`action` must be 1D array",
),
(
generate_action_dist(5, 4, 3),
|
np.zeros(5, dtype=int)
|
numpy.zeros
|
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage import affine_transform
def InverseCompositionAffine(It, It1):
# Input:
# It: template image
# It1: Current image
# Output:
# M: the Affine warp matrix [2x3 numpy array]
# put your implementation here
M = np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.]]).astype('float')
threshold = 0.1
It = np.float32(It)/np.max(It)
It1 = np.float32(It1)/
|
np.max(It1)
|
numpy.max
|
#! /usr/bin/python
# Authors: <NAME> and <NAME>
"""
Fourier methods.
"""
import numpy as np
#######################################################################
### PSDs
#############################################
def psd(time, rate, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5, verbose=False):
if binsize == None:
binsize = min(time[1:] - time[0:-1])
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
freqall = np.array([])
powerall = np.array([])
n_pgs = 0
for j in range(n_sections):
t_sec = time[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
r_sec = rate[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
pgs_sec = len(t_sec) // sec_length
n_pgs += pgs_sec
for k in range(len(t_sec) // sec_length):
r_subsec = r_sec[(sec_length * k):(sec_length * (k + 1))]
power = abs((np.fft.fft(r_subsec - np.mean(r_subsec)))[1:size + 1]) ** 2 * 2. * binsize / np.mean(
r_subsec) ** 2 / sec_length
freqall = np.concatenate([freqall, freq])
powerall = np.concatenate([powerall, power])
else:
size = (len(t_sec) + 1) // 2
if size >= min_length:
freq = (np.arange((size)) + 1.) / len(t_sec) / binsize
power = abs((np.fft.fft(r_sec - np.mean(r_sec)))[1:size + 1]) ** 2 * 2. * binsize / np.mean(
r_sec) ** 2 / len(r_sec)
freqall = np.concatenate([freqall, freq])
powerall = np.concatenate([powerall, power])
binfac_use = binfac
n_pgs = 1
# p.plot(freq,power,'k')
### Average /bin up periodograms
if n_pgs == 0:
if verbose:
print('No lightcurve sections long enough to create periodograms.')
return np.array([]), np.array([]), np.array([])
# round binfac UP to multiple of number of periodograms
if verbose: print('Number of periodograms: ' + str(n_pgs))
binfac_use = max(binfac, (int((binfac - 1) / n_pgs) + 1) * n_pgs)
if verbose: print('Binning factor: ' + str(binfac_use))
# This sorting is slow but necessary to allow possibility of different periodogram lengths
sortind = np.argsort(freqall)
freqall = freqall[sortind]
powerall = powerall[sortind]
length = (len(freqall) - 1) // binfac_use
freqbin = np.zeros(length)
powerbin = np.zeros(length)
errorbin = np.zeros(length)
for k in range(length):
freqbin[k] = np.mean(freqall[(binfac_use * k):(binfac_use * (k + 1))])
powerbin[k] = np.mean(powerall[(binfac_use * k):(binfac_use * (k + 1))])
errorbin[k] = np.sqrt(np.var(powerall[(binfac_use * k):(binfac_use * (k + 1))]) / float(binfac_use))
return freqbin, powerbin, errorbin
#############################################
#############################################
def co_psd(timea, ratea, timeb, rateb, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5,
verbose=False):
if binsize == None: binsize = min(timea[1:] - timea[0:-1])
time = np.intersect1d(timea, timeb)
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
freqall = np.array([])
powerall = np.array([])
n_pgs = 0
for j in range(n_sections):
a_start = (np.where(time[sec_ends[j] + 1] == timea)[0])[0]
a_end = a_start + sec_ends[j + 1] - sec_ends[j]
ta_sec = timea[a_start:a_end]
ra_sec = ratea[a_start:a_end]
b_start = (np.where(time[sec_ends[j] + 1] == timeb)[0])[0]
b_end = b_start + sec_ends[j + 1] - sec_ends[j]
tb_sec = timeb[b_start:b_end]
rb_sec = rateb[b_start:b_end]
assert np.all(ta_sec == tb_sec), 'Times for each lightcurve section should match' + str(
np.mean(ta_sec - tb_sec))
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
pgs_sec = len(ta_sec) // sec_length
n_pgs += pgs_sec
for k in range(pgs_sec):
ra_subsec = ra_sec[(sec_length * k):(sec_length * (k + 1))]
rb_subsec = rb_sec[(sec_length * k):(sec_length * (k + 1))]
power = np.real((np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size + 1] * np.conj(
(np.fft.fft(rb_subsec - np.mean(rb_subsec)))[1:size + 1])) * 2. * binsize / np.mean(
ra_subsec) / np.mean(rb_subsec) / sec_length
freqall = np.concatenate([freqall, freq])
powerall = np.concatenate([powerall, power])
else:
size = (len(ta_sec) + 1) // 2
if size >= min_length:
freq = (np.arange((size)) + 1.) / len(ta_sec) / binsize
power = np.real((np.fft.fft(ra_sec - np.mean(ra_sec)))[1:size + 1] * np.conj(
(np.fft.fft(rb_sec - np.mean(rb_sec)))[1:size + 1])) * 2. * binsize / np.mean(ra_sec) / np.mean(
rb_sec) / len(ra_sec)
freqall = np.concatenate([freqall, freq])
powerall = np.concatenate([powerall, power])
binfac_use = binfac
n_pgs = 0
# p.plot(freq,power,'k')
### Average /bin up periodograms
if n_pgs == 0:
if verbose:
print('No lightcurve sections long enough to create periodograms.')
return [], [], []
# round binfac UP to multiple of number of periodograms
if verbose:
print('Number of periodograms: ' + str(n_pgs))
binfac_use = max(binfac, (int((binfac - 1) / n_pgs) + 1) * n_pgs)
if verbose:
print('Binning factor: ' + str(binfac))
# This sorting is slow but necessary to allow possibility of different periodogram lengths
sortind = np.argsort(freqall)
freqall = freqall[sortind]
powerall = powerall[sortind]
length = (len(freqall) - 1) // binfac_use
freqbin = np.zeros(length)
powerbin = np.zeros(length)
errorbin = np.zeros(length)
for k in range(length):
freqbin[k] = np.mean(freqall[(binfac_use * k):(binfac_use * (k + 1))])
powerbin[k] = np.mean(powerall[(binfac_use * k):(binfac_use * (k + 1))])
errorbin[k] = np.sqrt(np.var(powerall[(binfac_use * k):(binfac_use * (k + 1))]) / float(binfac_use))
return freqbin, powerbin, errorbin
#############################################
from scipy.ndimage.filters import gaussian_filter1d
#############################################
def fad_cpsd(timea, ratea, timeb, rateb, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5,
verbose=False, smoothing=None, smoothing_length=None):
## Method of Bachetti et al. 2017
time = np.intersect1d(timea, timeb)
if binsize == None:
binsize = min(time[1:] - time[0:-1])
if smoothing_length == None:
smoothing_length = 3 * int(sec_length * binsize) # ????
# print ('Smoothing FADs by '+str(smoothing_length))
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
freqall = np.array([])
powerall = np.array([])
n_pgs = 0
for j in range(n_sections):
a_start = (np.where(time[sec_ends[j] + 1] == timea)[0])[0]
a_end = a_start + sec_ends[j + 1] - sec_ends[j]
ta_sec = timea[a_start:a_end]
ra_sec = ratea[a_start:a_end]
b_start = (np.where(time[sec_ends[j] + 1] == timeb)[0])[0]
b_end = b_start + sec_ends[j + 1] - sec_ends[j]
tb_sec = timeb[b_start:b_end]
rb_sec = rateb[b_start:b_end]
assert np.all(ta_sec == tb_sec), 'Times for each lightcurve section should match' + str(
np.mean(ta_sec - tb_sec))
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
pgs_sec = len(ta_sec) // sec_length
n_pgs += pgs_sec
for k in range(pgs_sec):
ra_subsec = ra_sec[(sec_length * k):(sec_length * (k + 1))]
rb_subsec = rb_sec[(sec_length * k):(sec_length * (k + 1))]
# power = np.real( (np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size+1] * np.conj((np.fft.fft(rb_subsec - np.mean(rb_subsec)))[1:size+1]) ) * 2. * binsize / np.mean(ra_subsec) / np.mean(rb_subsec) / sec_length
## Fourier amplitudes of each lightcurve
fta = np.fft.fft(ra_subsec - np.mean(ra_subsec))[1:size + 1]
ftb = np.fft.fft(rb_subsec - np.mean(rb_subsec))[1:size + 1]
## Subtract to give FADs
fad = np.absolute(fta - ftb)
## Smooth FADs
if smoothing:
fad2 = gaussian_filter1d(fad.real,
smoothing_length) ** 2
# fad = smoothing.function(fad)
else:
fad2 = fad.real ** 2
## Correct FTs by FADs and normalise
power = np.real(
fta * np.conj(fta)) * 2. * binsize ** 2 / fad2 # / np.sqrt(np.mean(ra_subsec) * np.mean(rb_subsec))
# *2./(fad**2 * 2/N_ph)
freqall = np.concatenate([freqall, freq])
powerall = np.concatenate([powerall, power])
else:
raise ValueError('Please enter a section length.')
# p.plot(freq,power,'k')
### Average /bin up periodograms
if n_pgs == 0:
if verbose: print('No lightcurve sections long enough to create periodograms.')
return [], [], []
# round binfac UP to multiple of number of periodograms
if verbose: print('Number of periodograms: ' + str(n_pgs))
binfac_use = max(binfac, (int((binfac - 1) / n_pgs) + 1) * n_pgs)
if verbose: print('Binning factor: ' + str(binfac))
# This sorting is slow but necessary to allow possibility of different periodogram lengths
sortind = np.argsort(freqall)
freqall = freqall[sortind]
powerall = powerall[sortind]
length = (len(freqall) - 1) // binfac_use
freqbin = np.zeros(length)
powerbin = np.zeros(length)
errorbin = np.zeros(length)
for k in range(length):
freqbin[k] = np.mean(freqall[(binfac_use * k):(binfac_use * (k + 1))])
powerbin[k] = np.mean(powerall[(binfac_use * k):(binfac_use * (k + 1))])
errorbin[k] = np.sqrt(np.var(powerall[(binfac_use * k):(binfac_use * (k + 1))]) / float(binfac_use))
return freqbin, powerbin, errorbin
#############################################
## Coherence
#############################################
def coherence2(timea, ratea, timeb, rateb, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5,
verbose=False):
assert sec_length is not None, TypeError('Please supply a section length')
if binsize == None: binsize = min(timea[1:] - timea[0:-1])
time = np.intersect1d(timea, timeb)
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
cross = np.zeros(size, dtype='complex')
powera = np.zeros(size)
powerb = np.zeros(size)
n_pgs = 0
for j in range(n_sections):
a_start = (np.where(time[sec_ends[j] + 1] == timea)[0])[0]
a_end = a_start + sec_ends[j + 1] - sec_ends[j]
ta_sec = timea[a_start:a_end]
ra_sec = ratea[a_start:a_end]
b_start = (np.where(time[sec_ends[j] + 1] == timeb)[0])[0]
b_end = b_start + sec_ends[j + 1] - sec_ends[j]
tb_sec = timeb[b_start:b_end]
rb_sec = rateb[b_start:b_end]
assert np.all(ta_sec == tb_sec), 'Times for each lightcurve section should match' + str(
np.mean(ta_sec - tb_sec))
pgs_sec = len(ta_sec) // sec_length
n_pgs += pgs_sec
for k in range(pgs_sec):
ra_subsec = ra_sec[(sec_length * k):(sec_length * (k + 1))]
rb_subsec = rb_sec[(sec_length * k):(sec_length * (k + 1))]
## Cross-spectrum
cross += np.conj((np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size + 1]) * (np.fft.fft(
rb_subsec - np.mean(rb_subsec)))[1:size + 1]
powera += np.abs((np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size + 1]) ** 2
powerb += np.abs((np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size + 1]) ** 2
### Average /bin up periodograms
if n_pgs == 0:
if verbose: print('No lightcurve sections long enough to create periodograms.')
return [], [], []
# round binfac UP to multiple of number of periodograms
if verbose: print('Number of periodograms: ' + str(n_pgs))
binfac_use = int((binfac - 1) / n_pgs) + 1
if verbose: print('Binning factor: ' + str(binfac))
coherence2 = np.abs(cross) ** 2 / powera / powerb
err_coherence = np.sqrt(2. / n_pgs / coherence2) * (1 - coherence2)
length = len(freq) // binfac_use
freqbin = np.zeros(length)
coh2bin = np.zeros(length)
errorbin = np.zeros(length)
for k in range(length):
freqbin[k] = (freq[binfac_use * k] + freq[binfac_use * (k + 1) - 1]) / 2.
coh2bin[k] = np.mean(coherence2[(binfac_use * k):(binfac_use * (k + 1))])
errorbin[k] = np.sqrt(np.mean(err_coherence[(binfac_use * k):(binfac_use * (
k + 1))] ** 2 / binfac_use)) ### np.sqrt(np.var(lagall[(binfac_use*k):(binfac_use*(k+1))])/float(binfac_use))
return freqbin, coh2bin, errorbin
#############################################
#######################################################################
### Lags
#############################################
def lag_freq(timea, ratea, timeb, rateb, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5,
verbose=False):
if binsize == None: binsize = min(timea[1:] - timea[0:-1])
time = np.intersect1d(timea, timeb)
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
freqall = np.array([])
lagall = np.array([])
n_pgs = 0
for j in range(n_sections):
a_start = (np.where(time[sec_ends[j] + 1] == timea)[0])[0]
a_end = a_start + sec_ends[j + 1] - sec_ends[j]
ta_sec = timea[a_start:a_end]
ra_sec = ratea[a_start:a_end]
b_start = (np.where(time[sec_ends[j] + 1] == timeb)[0])[0]
b_end = b_start + sec_ends[j + 1] - sec_ends[j]
tb_sec = timeb[b_start:b_end]
rb_sec = rateb[b_start:b_end]
assert np.all(ta_sec == tb_sec), 'Times for each lightcurve section should match' + str(
np.mean(ta_sec - tb_sec))
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
pgs_sec = len(ta_sec) // sec_length
n_pgs += pgs_sec
for k in range(pgs_sec):
ra_subsec = ra_sec[(sec_length * k):(sec_length * (k + 1))]
rb_subsec = rb_sec[(sec_length * k):(sec_length * (k + 1))]
## phase difference over -2pi to 2pi
dphase = np.angle((np.fft.fft(ra_subsec - np.mean(ra_subsec)))[1:size + 1]) - np.angle(
(np.fft.fft(rb_subsec - np.mean(rb_subsec)))[1:size + 1])
lag = (((dphase + np.pi) % (2 * np.pi)) - np.pi) / freq / 2. / np.pi
freqall = np.concatenate([freqall, freq])
lagall = np.concatenate([lagall, lag])
else:
raise TypeError('Please supply a section length')
# size = (len(ta_sec)+1)//2
# if size >=min_length:
# freq = (np.arange((size))+1.) / len(ta_sec) / binsize
# power = np.real( (np.fft.fft(ra_sec - np.mean(ra_sec)))[1:size+1] * np.conj((np.fft.fft(rb_sec - np.mean(rb_sec)))[1:size+1]) ) * 2. * binsize / np.mean(ra_sec) / np.mean(rb_sec) / len(ra_sec)
# freqall = np.concatenate([freqall, freq])
# powerall = np.concatenate([powerall, power])
# binfac_use=binfac
# n_pgs=0
# p.plot(freq,power,'k')
### Average /bin up periodograms
if n_pgs == 0:
if verbose: print('No lightcurve sections long enough to create periodograms.')
return [], [], []
# round binfac UP to multiple of number of periodograms
if verbose: print('Number of periodograms: ' + str(n_pgs))
binfac_use = max(binfac, (int((binfac - 1) / n_pgs) + 1) * n_pgs)
if verbose: print('Binning factor: ' + str(binfac))
# This sorting is slow but necessary to allow possibility of different periodogram lengths
sortind = np.argsort(freqall)
freqall = freqall[sortind]
lagall = lagall[sortind]
length = (len(freqall) - 1) // binfac_use
freqbin = np.zeros(length)
lagbin = np.zeros(length)
errorbin = np.zeros(length)
for k in range(length):
freqbin[k] = np.mean(freqall[(binfac_use * k):(binfac_use * (k + 1))])
lagbin[k] = np.mean(lagall[(binfac_use * k):(binfac_use * (k + 1))])
errorbin[k] = np.sqrt(np.var(lagall[(binfac_use * k):(binfac_use * (k + 1))]) / float(binfac_use))
return freqbin, lagbin, errorbin
#############################################
#######################################################################
### Phase folding
#############################################
def phase_profile(time, rate, frequency=None, binsize=None, binfac=20, sec_length=None, min_length=10,
gap_threshold=1.5, verbose=False, max_harm=np.inf, plot=False):
if plot: from matplotlib import pyplot as p
#############################################
## Define frequency range for fundamental
if frequency == None:
## For no frquency given, allow any frequency
freq_use = [0, np.inf]
elif type(frequency) == float or type(frequency) == int:
## For specified frequency, use closest available in PSD
f, po, er = psd(time, rate, binsize=binsize, binfac=binfac, sec_length=sec_length, min_length=min_length,
gap_threshold=gap_threshold, verbose=verbose)
freq_use = f[np.where((f - frequency) ** 2 == np.min((f - frequency) ** 2))[0][0]]
freq_use = [freq_use, freq_use * 1.00000001]
elif len(frequency) == 2:
## For range of frequencies, use frequency with greatest power in each periodogram
freq_use = frequency
else:
raise TypeError('frequency must have length at most 2')
#############################################
### Make sections as for PSD
#############################################
if binsize == None: binsize = min(time[1:] - time[0:-1])
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
# freqall = np.array([])
# powerall = np.array([])
harm_all = np.array([])
phase_diff_all = np.array([])
pulses = []
n_pgs = 0
for j in range(n_sections):
t_sec = time[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
r_sec = rate[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
## Indices of frequencies in specified range for fundamental
ind_f_range = np.intersect1d(np.where(freq >= freq_use[0])[0], np.where(freq <= freq_use[1])[0])
## Periodograms in this section
pgs_sec = len(t_sec) // sec_length
n_pgs += pgs_sec
for k in range(len(t_sec) // sec_length):
### Rates of each section from which to produce periodogram
r_subsec = r_sec[(sec_length * k):(sec_length * (k + 1))]
### Find frequency with most power in given range
ft_of_section = (np.fft.fft(r_subsec - np.mean(r_subsec)))[1:size + 1]
power = abs(ft_of_section) ** 2 * 2. * binsize / np.mean(r_subsec) ** 2 / sec_length
print(len(power[ind_f_range] * freq[ind_f_range]))
ind_fundamental = ind_f_range[
np.where(power[ind_f_range] * freq[ind_f_range] == np.max(power[ind_f_range] * freq[ind_f_range]))[
0]]
### Find phases for harmonics
phase = np.angle(ft_of_section)
harm = (np.arange(size // (ind_fundamental + 1)) + 1)
max_harm_use = int(np.minimum(max_harm, len(harm)))
ind_harm = (ind_fundamental + 1) * harm - 1
phase_diff = (phase[ind_harm] - phase[ind_fundamental] * harm) % (2 * np.pi)
ft_of_pulse = np.zeros(sec_length, dtype=np.complex)
ft_of_pulse[harm[0:max_harm_use]] = (ft_of_section[ind_harm[0:max_harm_use]]) * np.exp(
-1j * phase[ind_fundamental[0:max_harm_use]] * harm[0:max_harm_use])
pulse = np.fft.ifft(ft_of_pulse)
pulses = pulses + [pulse]
if plot: p.plot(np.arange(len(pulse)) * 2 * np.pi / len(pulse), (pulse), color='grey', alpha=0.1)
phase_diff_all = np.concatenate([phase_diff_all, phase_diff])
harm_all = np.concatenate([harm_all, harm])
# freqall = np.concatenate([freqall, freq])
# powerall = np.concatenate([powerall, power])
else:
raise TypeError('Please supply a section length')
if plot: p.plot(np.arange(len(pulse)) * 2 * np.pi / len(pulse), np.mean((np.array(pulses)), axis=0))
return np.array(np.arange(len(pulse)) * 2 * np.pi / len(pulse)), np.array(
np.mean(np.array(pulses), axis=0)), np.array(np.std(np.array(pulses), axis=0) / np.sqrt(n_pgs))
#############################################
#############################################
#######################################################################
def phase_dist_harm(time, rate, frequency=None, binsize=None, binfac=20, sec_length=None, min_length=10,
gap_threshold=1.5, verbose=False, max_harm=np.inf, plot=False):
if plot: from matplotlib import pyplot as p
#############################################
## Define frequency range for fundamental
if frequency == None:
## For no frquency given, allow any frequency
freq_use = [0, np.inf]
elif type(frequency) == float or type(frequency) == int:
## For specified frequency, use closest available in PSD
f, po, er = psd(time, rate, binsize=binsize, binfac=binfac, sec_length=sec_length, min_length=min_length,
gap_threshold=gap_threshold, verbose=verbose)
freq_use = f[np.where((f - frequency) ** 2 == np.min((f - frequency) ** 2))[0][0]]
freq_use = [freq_use, freq_use * 1.00000001]
elif len(frequency) == 2:
## For range of frequencies, use frequency with greatest power in each periodogram
freq_use = frequency
else:
raise TypeError('frequency must have length at most 2')
#############################################
### Make sections as for PSD
#############################################
if binsize == None: binsize = min(time[1:] - time[0:-1])
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
# freqall = np.array([])
# powerall = np.array([])
harm_all = []
phase_diff_all = []
pulses = []
n_pgs = 0
for j in range(n_sections):
t_sec = time[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
r_sec = rate[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
## Indices of frequencies in specified range for fundamental
ind_f_range = np.intersect1d(np.where(freq >= freq_use[0])[0], np.where(freq <= freq_use[1])[0])
## Periodograms in this section
pgs_sec = len(t_sec) // sec_length
n_pgs += pgs_sec
for k in range(len(t_sec) / sec_length):
### Rates of each section from which to produce periodogram
r_subsec = r_sec[(sec_length * k):(sec_length * (k + 1))]
### Find frequency with most power in given range
ft_of_section = (np.fft.fft(r_subsec - np.mean(r_subsec)))[1:size + 1]
power = abs(ft_of_section) ** 2 * 2. * binsize / np.mean(r_subsec) ** 2 / sec_length
ind_fundamental = ind_f_range[
np.where(power[ind_f_range] * freq[ind_f_range] == np.max(power[ind_f_range] * freq[ind_f_range]))[
0]]
### Find phases for harmonics
phase = np.angle(ft_of_section)
harm = (np.arange(size / (ind_fundamental + 1)) + 1)
max_harm_use = int(np.minimum(max_harm, len(harm)))
ind_harm = (ind_fundamental + 1) * harm - 1
phase_diff = (phase[ind_harm] - phase[ind_fundamental] * harm) % (2 * np.pi)
ft_of_pulse = np.zeros(sec_length, dtype=np.complex)
ft_of_pulse[harm[0:max_harm_use]] = (ft_of_section[ind_harm[0:max_harm_use]]) * np.exp(
-1j * phase[ind_fundamental[0:max_harm_use]] * harm[0:max_harm_use])
# pulse = np.fft.ifft(ft_of_pulse)
# pulses = pulses+[pulse]
# if plot: p.plot(np.arange(len(pulse))*2*np.pi/len(pulse), (pulse), color='grey', alpha=0.1)
phase_diff_all = np.concatenate([phase_diff_all, phase_diff])
harm_all = np.concatenate([harm_all, harm])
else:
raise TypeError('Please supply a section length')
# if plot: p.plot(np.arange(len(pulse))*2*np.pi/len(pulse), np.mean((np.array(pulses)), axis=0))
harm_fin = np.arange(1, max(harm_all) + 1)
phase_diff_fin = []
for i in range(int(max(harm_all))):
phase_diff_fin = phase_diff_fin + [phase_diff_all[np.where(harm_all == i + 1)[0]]]
return harm_fin, phase_diff_fin
#############################################
#######################################################################
def phase_dist_rel(timea, ratea, timeb, rateb, frequency=None, binsize=None, binfac=20, sec_length=None, min_length=10,
gap_threshold=1.5, verbose=False, max_harm=np.inf, plot=False):
if plot: from matplotlib import pyplot as p
#############################################
### Make sections as for PSD
#############################################
if binsize == None: binsize = min(time[1:] - time[0:-1])
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
# freqall = np.array([])
# powerall = np.array([])
harm_all = []
phase_diff_all = []
pulses = []
n_pgs = 0
for j in range(n_sections):
t_sec = time[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
r_sec = rate[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
## Indices of frequencies in specified range for fundamental
ind_f_range = np.intersect1d(np.where(freq >= freq_use[0])[0], np.where(freq <= freq_use[1])[0])
## Periodograms in this section
pgs_sec = len(t_sec) // sec_length
n_pgs += pgs_sec
for k in range(len(t_sec) / sec_length):
### Rates of each section from which to produce periodogram
r_subsec = r_sec[(sec_length * k):(sec_length * (k + 1))]
### Find frequency with most power in given range
ft_of_section = (np.fft.fft(r_subsec - np.mean(r_subsec)))[1:size + 1]
power = abs(ft_of_section) ** 2 * 2. * binsize / np.mean(r_subsec) ** 2 / sec_length
ind_fundamental = ind_f_range[
np.where(power[ind_f_range] * freq[ind_f_range] == np.max(power[ind_f_range] * freq[ind_f_range]))[
0]]
### Find phases for harmonics
phase = np.angle(ft_of_section)
harm = (np.arange(size / (ind_fundamental + 1)) + 1)
max_harm_use = int(np.minimum(max_harm, len(harm)))
ind_harm = (ind_fundamental + 1) * harm - 1
phase_diff = (phase[ind_harm] - phase[ind_fundamental] * harm) % (2 * np.pi)
ft_of_pulse = np.zeros(sec_length, dtype=np.complex)
ft_of_pulse[harm[0:max_harm_use]] = (ft_of_section[ind_harm[0:max_harm_use]]) * np.exp(
-1j * phase[ind_fundamental[0:max_harm_use]] * harm[0:max_harm_use])
# pulse = np.fft.ifft(ft_of_pulse)
# pulses = pulses+[pulse]
# if plot: p.plot(np.arange(len(pulse))*2*np.pi/len(pulse), (pulse), color='grey', alpha=0.1)
phase_diff_all = np.concatenate([phase_diff_all, phase_diff])
harm_all = np.concatenate([harm_all, harm])
else:
raise TypeError('Please supply a section length')
# if plot: p.plot(np.arange(len(pulse))*2*np.pi/len(pulse), np.mean((np.array(pulses)), axis=0))
harm_fin = np.arange(1, max(harm_all) + 1)
phase_diff_fin = []
for i in range(int(max(harm_all))):
phase_diff_fin = phase_diff_fin + [phase_diff_all[np.where(harm_all == i + 1)[0]]]
return freq_fin, phase_diff_fin
#############################################
#############################################
## Earlier names
#############################################
phase_dist = phase_dist_harm
#######################################################################
### Higher-order (bispectra etc.)
#############################################
def bispectrum(time, rate, binsize=None, binfac=20, sec_length=None, min_length=10, gap_threshold=1.5, verbose=False):
if binsize == None: binsize = min(time[1:] - time[0:-1])
breaks = (np.where([time[1:] - time[0:-1] > binsize * gap_threshold]))[1]
n_sections = len(breaks) + 1
sec_ends = np.concatenate([[-1], breaks, [len(time) - 1]])
size = (int(sec_length) + 1) / 2
freqall = np.array([])
powerall = np.array([])
bisp = np.zeros([size + 1, size + 1]) + 0j
n_pgs = 0
for j in range(n_sections):
t_sec = time[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
r_sec = rate[(sec_ends[j] + 1):(sec_ends[j + 1] + 1)]
if sec_length is not None:
size = (int(sec_length) + 1) // 2
freq = (np.arange((size)) + 1.) / sec_length / binsize
pgs_sec = len(t_sec) // sec_length
n_pgs += pgs_sec
for k in range(len(t_sec) / sec_length):
r_subsec = r_sec[(sec_length * k):(sec_length * (k + 1))]
f_comp = (np.fft.fft(r_subsec - np.mean(r_subsec)))[0:size + 1]
for fi in range(size + 1):
fj = size + 1 - fi
bisp[fi, 0:fj] += f_comp[fi] * f_comp[0:fj] * np.conj(f_comp[fi:fi + fj])
# power = abs( (np.fft.fft(r_subsec - np.mean(r_subsec)))[1:size+1] ) **2 * 2. * binsize / np.mean(r_subsec)**2 / sec_length
# freqall = np.concatenate([freqall, freq])
# powerall = np.concatenate([powerall, power])
else:
raise TypeError('sec_length is required')
# p.plot(freq,power,'k')
### Average /bin up periodograms
if False:
if n_pgs == 0:
if verbose: print('No lightcurve sections long enough to create periodograms.')
return np.array([]), np.array([]), np.array([])
# round binfac UP to multiple of number of periodograms
if verbose: print('Number of periodograms: ' + str(n_pgs))
binfac_use = max(binfac, (int((binfac - 1) / n_pgs) + 1) * n_pgs)
if verbose: print('Binning factor: ' + str(binfac_use))
length = (len(freqall) - 1) // binfac_use
freqbin = np.zeros(length)
powerbin = np.zeros(length)
errorbin =
|
np.zeros(length)
|
numpy.zeros
|
#!/usr/bin/env python3
import math
import spiceypy
import logging
import os
import numpy as np
try:
# Try import SpiceNOFRAMECONNECT exception from spiceypy 3.1.1
from spiceypy.utils.exceptions import SpiceNOFRAMECONNECT as SpiceNOFRAMECONNECT
except ImportError:
# Otherwise consider a SpiceNOFRAMECONNECT exception as an SpiceyError, for spiceypy 2.3.2
from spiceypy.utils.support_types import SpiceyError as SpiceNOFRAMECONNECT
from spiceypy.utils.support_types import *
from spiops.utils import time
from spiops.utils.utils import plot
from spiops.utils.utils import plot_attitude_error
from spiops.utils.utils import target2frame
from spiops.utils.utils import findIntersection
from spiops.utils.utils import findNearest
from spiops.utils.files import download_file
from spiops.utils.files import list_files_from_ftp
from spiops.utils.files import get_aem_quaternions
from spiops.utils.files import get_aocs_quaternions
from spiops.utils.files import download_tm_data
from spiops.utils.naif import optiks # Do not remove, called from spival
from spiops.utils.naif import brief # Do not remove, called from spival
from spiops.classes.observation import TimeWindow # Do not remove, called from spival
from spiops.classes.body import Target # Do not remove, called from spival
from spiops.classes.body import Observer # Do not remove, called from spival
import imageio
import matplotlib as mpl
import matplotlib.pyplot as plt
from spiceypy import support_types as stypes
from bokeh.plotting import figure, output_file, output_notebook, show
from bokeh.models import ColumnDataSource, DatetimeTickFormatter, LabelSet
from spiops.utils.time import et_to_datetime
# from spiops.utils.webmust.webmust_handler import WebmustHandler
"""
The MIT License (MIT)
Copyright (c) [2015-2017] [<NAME>]
Copyright (c) [2015-2017] [<NAME>]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
def load(mk):
return spiceypy.furnsh(mk)
def adcsng_fill_template(template,
file,
replacements,
cleanup=False):
#
# If the temp late file is equal to the output file then we need to create a temporary template - which will be
# a duplicate - in order to write in the file. A situation where we would like to have them be the same is
# for example if we call this function several times in a row, replacing keywords in the template in steps
#
if template == file:
with open(file, "r") as f:
with open('fill_template.temp', "w+") as t:
for line in f:
t.write(line)
template = 'fill_template.temp'
with open(file, "w+") as f:
#
# Items are replaced as per correspondance in between the replacements dictionary
#
with open(template, "r+") as t:
for line in t:
if '{' in line:
for k, v in replacements.items():
if '{' + k + '}' in line: line = line.replace('{' + k + '}', v)
f.write(line)
#
# If the option cleanup is set as true, we remove the keyword assignments in the filled templated which are
# unfilled (they should be optional)
#
if cleanup:
with open(file, "r") as f:
with open('fill_template.temp', "w+") as t:
for line in f:
t.write(line)
template = 'fill_template.temp'
with open(file, "w+") as f:
with open('fill_template.temp', "r") as t:
for line in t:
if '{' not in line:
f.write(line)
#
# The temporary files are removed
#
if os.path.isfile('fill_template.temp'):
os.remove('fill_template.temp')
# Originally an adcsng funtion, needs to be re-arranged in adcsng to be made
# more generic
def adcsng_hk_quaternions2ck_reader(tm_file,
input_time_format='UTC',
input_time_field_number='1',
delimiter=',',
input_processing=False,
qs_col=1, qx_col=2, qy_col=3, qz_col=4):
#
# We obtain the number of data fields and its correspondance
#
input_data_field_numbers = [qx_col, qy_col, qz_col, qs_col]
tm_list = []
previous_row_time = ''
sclk_partition = '1'
sclk_delimiter = '.'
filter_flag = False
index = 0
row_prev = []
sclk_fraction_prev = ''
with open(tm_file, 'r') as t:
for line in t:
#
# TODO: Main difference from fucntion from adcsng
#
if '#' not in line and 'Date' not in line and input_time_format not in line:
index += 1
row_data = []
# We need to remove the end of line character:
line = line.split('\n')[0]
try:
if ',' in delimiter:
if input_time_format == 'SCLK':
if ',' in input_time_field_number:
row_time = sclk_partition + '/' + str(line.split(delimiter)[
int(input_time_field_number[0]) - 1]) + \
sclk_delimiter + str(line.split(delimiter)[
int(input_time_field_number[2]) - 1])
else:
input_time_field_number = int(input_time_field_number)
row_time = str(line.split(delimiter)[
input_time_field_number - 1])
else:
row_time = str(line.split(delimiter)[input_time_field_number-1])
if (' ' in row_time):
if input_time_format == 'SCLK':
row_time = row_time.replace(' ','')
else:
row_time = row_time.replace(' ','T')
for data_element_field_number in input_data_field_numbers:
row_data.append(float(line.split(',')[data_element_field_number-1]))
else:
proc_line = line.strip()
row_time = str(proc_line.split(delimiter)[input_time_field_number - 1])
for data_element_field_number in input_data_field_numbers:
#
# We need to check that
#
row_data.append(float(line.split()[data_element_field_number-1]))
except:
logging.info(' HM TM Processing: Found incomplete data line in line {}:'.format(index))
logging.info(' {}'.format(line))
continue
row = row_time + ' '
# As indicated by <NAME> in an e-mail "ROS and MEX "measured" CKs"
# sometimes the scalar value is negative and the sign of the rest of the
# components of the quaternions needs to be changed!
if row_data[-1] < 0:
neg_data = [-x for x in row_data]
logging.info(' HM TM Processing: Found negative QS on input line {}:'.format(row_data))
logging.info(' ' + neg_data)
row_data = neg_data
for element in row_data:
row += str(element) + ' '
# We filter out "bad quaternions"
row += '\n'
# We remove the latest entry if a time is duplicated
if row_time == previous_row_time:
logging.info(
' HM TM Processing: Found duplicate time at {}'.format(
row_time))
else:
# We do not include the entry if one element equals 1 or gt 1
append_bool = True
for quaternion in row_data:
if quaternion >= 1.0:
append_bool = False
logging.info(
' HM TM Processing: Found quaternion GT 1 on input line {}:'.format(
row_data))
logging.info(' ' + str(row))
# This is a special filter that has been set for ExoMars2016
# More explanations in [1]
if input_processing:
sclk_fraction = line.split(':')[-1].split(' ')[0]
if filter_flag:
if sclk_fraction == sclk_fraction_prev:
row_prev.append(row)
elif len(row_prev) <= 5 and sclk_fraction == sclk_initial:
logging.info(
' HM TM Processing: Coarse quaternion: Spurious SCLK fractions before input line {}:'.format(
index))
for element in row_prev:
logging.info(' ' + str(element).split('\n')[0])
tm_list.remove(element)
filter_flag = False
tm_list = []
row_prev = []
sclk_fraction_prev = sclk_fraction
else:
row_prev = []
filter_flag = False
if sclk_fraction_prev and sclk_fraction != sclk_fraction_prev and not filter_flag:
filter_flag = True
row_prev.append(row)
sclk_initial = sclk_fraction_prev
sclk_fraction_prev = sclk_fraction
if append_bool:
tm_list.append(row)
previous_row_time = row_time
# We remove the carriage return from the last line
last_line = tm_list[-1].split('\n')[0]
tm_list = tm_list[:-1]
tm_list.append(last_line)
return(tm_list)
def fov_illum(mk, sensor, time=None, angle='DEGREES', abcorr='LT+S',
report=False, unload=False):
"""
Determine the Illumination of a given FoV (for light scattering computations
for example). This function is based on the following SPICE APIs:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/getfov_c.html
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/spkezp_c.html
:param mk: Meta-kernel to load the computation scenario
:type mk: str
:param sensor: Sensor ID code or name
:type sensor: Union[str, int]
:param time: Time to compute the quantity
:type time: Union[str, float]
:param angle: Angular unit; it can be 'DEGREES' or 'RADIANS'. Default is 'DEGREES'
:type angle: str
:param abcorr: Aberration correction. Default and recommended is 'LT+S'
:type abcorr: str
:param report: If True prints the resulting illumination angle on the screen
:type report: bool
:param unload: If True it will unload the input meta-kernel
:type unload: bool
:return: Angle in between a sensor's boresight and the sun-sc direction
:rtype: float
"""
room = 99
shapelen = 1000
framelen = 1000
angle = angle.upper()
spiceypy.furnsh(mk)
if time:
time = spiceypy.utc2et(time)
else:
time = spiceypy.utc2et('2016-08-10T00:00:00')
if angle != 'DEGREES' and angle != 'RADIANS':
print('angle should be either degrees or radians')
if isinstance(sensor, str):
instid = spiceypy.bodn2c(sensor)
else:
instid = sensor
shape, frame, bsight, n, bounds = spiceypy.getfov(instid, room, shapelen, framelen)
rotation = spiceypy.pxform(frame, 'J2000', time)
bsight = spiceypy.mxv(rotation, bsight)
# The following assumes that the IDs of the given S/C FK have been defined
# according to the NAIF/ESS standards:
#
# -NXXX
#
# where:
# N is the SC id and can consist on a given number of digits
# XXX are three digits that identify the sensor
sc_id = int(str(instid)[:-3])
ptarg, lt = spiceypy.spkezp(10, time, 'J2000', abcorr, sc_id)
fov_illumination = spiceypy.vsep(bsight, ptarg)
if unload:
spiceypy.unload(mk)
if angle == 'DEGREES':
fov_illumination = math.degrees(fov_illumination)
if report:
print('Illumination angle of {} is {} [{}]'.format(sensor,
fov_illumination,
angle))
return fov_illumination
def cov_spk_obj(mk, object, time_format='TDB', global_boundary=False,
report=False, unload=False):
"""
Provides time coverage summary for a given object for a list of
binary SPK files provided in a meta-kernel. Several options are
available. This function is based on the following SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/spkcov_c.html
The NAIF utility BRIEF can be used for the same purpose.
:param mk: Meta-kernel to load the computation scenario
:type mk: str
:param object: Ephemeris Object to obtain the coverage from
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB in calendar format) or 'TDB'. Default is 'TDB'
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the coverage windows or only the absolute start and finish coverage times
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen
:type report: bool
:param unload: If True it will unload the input meta-kernel
:type unload: bool
:return: Returns a list with the coverage intervals
:rtype: list
"""
spiceypy.furnsh(mk)
boundaries_list = []
et_boundaries_list = []
object_id = spiceypy.bodn2c(object)
maxwin = 2000
spk_count = spiceypy.ktotal('SPK') - 1
while spk_count >= 0:
spk_kernel = spiceypy.kdata(spk_count, 'SPK', 155, 155, 155)
spk_ids = spiceypy.spkobj(spk_kernel[0])
for id in spk_ids:
if id == object_id:
object_cov = SPICEDOUBLE_CELL(maxwin)
spiceypy.spkcov(spk_kernel[0], object_id, object_cov)
boundaries = time.cov_int(object_cov=object_cov,
object_id=object_id,
kernel=spk_kernel[0],
global_boundary=global_boundary,
time_format=time_format,
report=report)
boundaries_list.append(boundaries)
#
# We need to have the boundaries in TDB in order to sort out the
# min and max to obtain the global ones for multiple kernels
#
if global_boundary:
et_boundaries_list.append(time.cov_int(
object_cov=object_cov,
object_id=object_id,
kernel=spk_kernel[0],
global_boundary=True,
time_format='TDB',
report=False))
spk_count -= 1
if global_boundary:
start_time = min(et_boundaries_list)[0]
finish_time = max(et_boundaries_list)[1]
boundaries_list = time.et2cal([start_time, finish_time],
format=time_format)
if report:
print("Global Coverage for {} [{}]: {} - {}".format(
str(spiceypy.bodc2n(object_id)), time_format, boundaries_list[0],
boundaries_list[1]))
if unload:
spiceypy.unload(mk)
return boundaries_list
def cov_spk_ker(spk, object=False, time_format='TDB', support_ker ='',
report=False, unload=True):
"""
Provides time coverage summary for a given object for a given SPK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/spkcov_c.html
The NAIF utility BRIEF can be used for the same purpose.
:param spk: SPK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least it should be a leapseconds kernel (LSK) and optionally a meta-kernel (MK)
:type support_ker: Union[str, list]
:param object: Ephemeris Object or list of objects to obtain the coverage from
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' or 'SPICE' (for TDB in calendar format) or 'TDB'. Default is 'TDB'
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the coverage windows or only the absolute start and finish coverage times
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen
:type report: bool
:param unload: If True it will unload the input meta-kernel
:type unload: bool
:return: Returns a list with the coverage intervals
:rtype: list
"""
spiceypy.furnsh(spk)
object_id = []
boundaries = []
if object and not isinstance(object, list):
object = [object]
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.furnsh(ker)
maxwin = 2000
spk_ids = spiceypy.spkobj(spk)
if not object:
object_id = spk_ids
object = []
for id in spk_ids:
object.append(spiceypy.bodc2n(id))
else:
for element in object:
object_id.append(spiceypy.bodn2c(element))
for id in object_id:
if id in spk_ids:
object_cov = SPICEDOUBLE_CELL(maxwin)
spiceypy.spkcov(spk, id, object_cov)
cov = time.cov_int(object_cov=object_cov,
object_id=id,
kernel=spk,
time_format=time_format,
report=report)
else:
if report:
print('{} with ID {} is not present in {}.'.format(object,
id, spk))
if unload:
spiceypy.unload(spk)
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.unload(ker)
return False
if time_format == 'SPICE':
boundaries.append(object_cov)
else:
boundaries.append(cov)
if unload:
spiceypy.unload(spk)
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.unload(ker)
return boundaries
def spkVsOem(sc, spk, plot_style='line', notebook=True):
spiceypy.timdef('SET', 'SYSTEM', 10, 'TDB')
spiceypy.furnsh(spk)
if sc == 'MPO':
file = spk.split('/')[-1].replace('\n', '').replace('bc_mpo_fcp_', '').split('_')[0]
file = 'BCCruiseOrbit__' + file + '.bc'
download_file("data/ANCDR/BEPICOLOMBO/fdy", file)
else:
print('Unsupported spacecraft: ' + sc)
return None, None
print('OEM file: ' + file)
if not os.path.isfile(file):
print('OEM file cannot be downloaded!')
return None, None
oemfile = open(file)
error = []
pos_norm_error = []
vel_norm_error = []
data_list = []
for line in oemfile.readlines():
if 'CENTER_NAME' in line:
center = line.split('= ')[1].replace('\n', '')
if line[:2] == '20':
data = line.replace('\n', '').split()
data_list.append(data)
for i in range(0, len(data_list)-1, 1):
#
# skip OEM lines with repeated time tags (typically at the end of a
# segment) as are superseeded by the latest line with that time tag
#
if data_list[i][0] != data_list[i+1][0]:
data = data_list[i]
et = spiceypy.str2et(data[0])
state = spiceypy.spkezr(sc, et, 'J2000', 'NONE', center)[0]
curr_error = [et,
abs(state[0] - float(data[1])),
abs(state[1] - float(data[2])),
abs(state[2] - float(data[3])),
abs(state[3] - float(data[4])),
abs(state[4] - float(data[5])),
abs(state[5] - float(data[6]))]
error.append(curr_error)
pos = np.asarray(curr_error[1:4])
vel = np.asarray(curr_error[4:7])
pos_norm_error.append(spiceypy.vnorm(pos))
vel_norm_error.append(spiceypy.vnorm(vel))
max_pos_norm_error = max(pos_norm_error)
max_vel_norm_error = max(vel_norm_error)
error = np.asarray(error)
print('Avg position error [km]: ' + str(np.mean(pos_norm_error)) +
' , max position error [km]: ' + str(max_pos_norm_error))
print('Avg velocity error [km/s]: ' + str(np.mean(vel_norm_error)) +
' , max velocity error [km/s]: ' + str(max_vel_norm_error))
plot(error[:, 0],
[error[:, 1], error[:, 2], error[:, 3]],
yaxis_name=['X', 'Y', 'Z'],
title='Source OEM to generated SPK position difference',
format=plot_style,
yaxis_units='Position error Km',
notebook=notebook)
plot(error[:, 0],
[error[:, 4], error[:, 5], error[:, 6]],
yaxis_name=['VX', 'VY', 'VZ'],
title='Source OEM to generated SPK velocity difference',
format=plot_style,
yaxis_units='Km/s',
notebook=notebook)
os.remove(file)
spiceypy.timdef('SET', 'SYSTEM', 10, 'UTC')
spiceypy.unload(spk)
return max_pos_norm_error, max_vel_norm_error
def ckVsAEM(sc, ck, plot_style='line', notebook=True):
spiceypy.timdef('SET', 'SYSTEM', 10, 'TDB')
spiceypy.furnsh(ck)
if sc == 'MPO':
file = ck.split('/')[-1].replace('\n', '').split('_')[4]
file = 'AttitudePredictionST__' + file + '.bc'
download_file("data/ANCDR/BEPICOLOMBO/fdy", file)
else:
print('Unsupported spacecraft: ' + sc)
return None
print('AEM file: ' + file)
if not os.path.isfile(file):
print('AEM file cannot be downloaded!')
return None
aem_guats = get_aem_quaternions(file)
if len(aem_guats):
# If any quaternion inserted, remove the first element to create a
# margin with the start of the CK
aem_guats.pop(0)
error, max_ang_error = get_quats_ang_error(aem_guats, sc)
plot_attitude_error(np.asarray(error),
max_ang_error,
'Source AEM Quaternions to generated CK orientation difference',
plot_style,
notebook)
os.remove(file)
spiceypy.unload(ck)
return max_ang_error
def ckVsAocs(sc, ck, plot_style='line', notebook=True):
spiceypy.timdef('SET', 'SYSTEM', 10, 'UTC')
spiceypy.furnsh(ck)
if sc == 'MPO':
file = ck.split('/')[-1].replace('\n', '').split('_')[5]
file = 'mpo_raw_hk_aocs_measured_attitude_' + file + '.tab'
download_file("data/ANCDR/BEPICOLOMBO/hkt", file)
else:
print('Unsupported spacecraft: ' + sc)
return None
print('AOCS tab file: ' + file)
if not os.path.isfile(file):
print('AOCS tab file cannot be downloaded!')
return None
aocs_quats = get_aocs_quaternions(file)
error, max_ang_error = get_quats_ang_error(aocs_quats, sc)
plot_attitude_error(error,
max_ang_error,
'Source AOCS Measured Quaternions to generated CK orientation difference',
plot_style,
notebook)
os.remove(file)
spiceypy.unload(ck)
return max_ang_error
def get_quats_ang_error(quats, sc):
error = []
max_ang_error = 0
for quat in quats:
et = quat[0]
q_spice = spiceypy.m2q(spiceypy.pxform('J2000', sc + '_SPACECRAFT', et))
if quat[1] < 0:
q_spice[0] *= -1
q_spice[1] *= -1
q_spice[2] *= -1
q_spice[3] *= -1
quat[2] *= -1
quat[3] *= -1
quat[4] *= -1
q_error = [abs(q_spice[0] - quat[1]),
abs(q_spice[1] - quat[2]),
abs(q_spice[2] - quat[3]),
abs(q_spice[3] - quat[4])]
mrot_spice = spiceypy.q2m(q_spice)
mrot_quats = spiceypy.q2m(quat[1:5])
vz_spice = spiceypy.mxv(mrot_spice, [0, 0, 1])
vz_quats = spiceypy.mxv(mrot_quats, [0, 0, 1])
ang_error = spiceypy.vsep(vz_spice, vz_quats)
max_ang_error = max(max_ang_error, abs(ang_error))
curr_error = [et]
curr_error.extend(q_error)
error.append(curr_error)
max_ang_error = np.rad2deg(max_ang_error) * 1000
return np.asarray(error), max_ang_error
def saa_vs_hk_sa_position(sc, plot_style='line', notebook=True):
spiceypy.timdef('SET', 'SYSTEM', 10, 'UTC')
sa_angles = [] # Read angles from TM, List of items as [et, angle_deg]
if sc == 'MPO':
# Set some mission specific constants
sadm_frame = 'MPO_SA' # SA Rotating Frame
sadm_ref_frame = 'MPO_SA_SADM' # SA Fixed Frame
ref_vector = np.asarray([0, 0, 1]) # SA Rotating Plane normal
ref_cross_vector = np.asarray([0, 1, 0]) # Common rotation vector btw SA Rotating Frm and Fixed Frm
# For MPO SA the TM files are given in daily basis, so we need to
# concatenate N of them to obtain a greater period coverage.
hkt_path = "data/ANCDR/BEPICOLOMBO/hkt/"
hkt_expression = 'mpo_raw_hk_sa_position_????????.tab'
num_sa_files = 7 # Compare last week
# Determine files to use to fetch TM data
sa_files = list_files_from_ftp(hkt_path, hkt_expression)
sa_files = sa_files[-num_sa_files:]
# For each file, download it, add data to array, and remove it
sa_angles = download_tm_data(sa_files, hkt_path, ",", [2], [180.0 / math.pi])
if not len(sa_angles):
print("Cannot obtain required TM data, aborting.")
return None
elif sc == 'MTM':
# Set some mission specific constants
sadm_frame = 'MTM_SA+X' # SA Rotating Frame
sadm_ref_frame = 'MTM_SA+X_ZERO' # SA Fixed Frame
ref_vector = np.asarray([0, 1, 0]) # SA Rotating Plane normal
ref_cross_vector = np.asarray([1, 0, 0]) # Common rotation vector btw SA Rotating Frm and Fixed Frm
# For MTM SA the TM files are given in daily basis, so we need to
# concatenate N of them to obtain a greater period coverage.
hkt_path = "data/ANCDR/BEPICOLOMBO/hkt/"
hkt_expression = 'mtm_raw_hk_sa_position_????????.tab'
num_sa_files = 7 # Compare last week
# Determine files to use to fetch TM data
sa_files = list_files_from_ftp(hkt_path, hkt_expression)
sa_files = sa_files[-num_sa_files:]
# For each file, download it, add data to array, and remove it
sa_angles = download_tm_data(sa_files, hkt_path, ",", [2], [180.0 / math.pi])
if not len(sa_angles):
print("Cannot obtain required TM data, aborting.")
return None
else:
print('Unsupported spacecraft: ' + sc)
return None
# Compare with SPICE SA Angles
error = []
max_ang_error = 0
num_gaps = 0
for sa_angle in sa_angles:
try:
et = sa_angle[0]
hk_sa_angle = sa_angle[1]
# Determine the rotation matrix to pass from the SA Rotating Frame
# to the SA Fixed frame
sadm_rot = spiceypy.pxform(sadm_frame, sadm_ref_frame, et)
# Covert the SA reference vector in the rotating frame into
# the SA fixed frame
sadm_vector = spiceypy.mxv(sadm_rot, ref_vector)
sadm_angle = np.rad2deg(spiceypy.vsep(ref_vector, sadm_vector))
# Because vsep always is positive, we are going to get the cross product to
# determine if is a positive or negative rotation
sadm_cross_vector = np.cross(ref_vector, sadm_vector)
# The dot product of the normalised vectors shall be or 1 or -1.
sadm_angle = np.dot(spiceypy.unorm(ref_cross_vector)[0], spiceypy.unorm(sadm_cross_vector)[0]) * sadm_angle
ang_error = abs(sadm_angle - hk_sa_angle) * 1000 # mdeg
max_ang_error = max(max_ang_error, ang_error)
error.append([et, ang_error])
except SpiceNOFRAMECONNECT:
# There is a gap in the CK file, ignore this SA sample.
num_gaps += 1
continue
# Plot error
if len(error):
error = np.asarray(error)
print('Max angular error [mdeg]: ' + str(max_ang_error))
plot(error[:, 0],
[error[:, 1]],
yaxis_name=['Angular error'],
title=sc + " SA angular error between TM and SPICE",
format=plot_style,
yaxis_units='mdeg',
notebook=notebook)
return max_ang_error
else:
print('Angular error cannot be computed. Found ' + str(num_gaps) + ' of ' + str(len(sa_angles)) + ' samples without data.')
return None
"""
def sadmCkVsMust(sc, start_time, end_time, plot_style='line', notebook=True):
# TODO: METHOD NOT FINISHED!!!
if sc == 'MPO':
must_sadm_param = 'NCADAF41' # TODO: Set correct parameter for SADM
sadm_frame = 'MPO_SA'
sadm_ref_frame = 'MPO_SA_SADM'
ref_vector = np.asarray([0, 0, 1])
mission_phase = 'BEPICRUISE'
else:
print('Unsupported spacecraft: ' + sc)
return None
et_start = spiceypy.utc2et(start_time)
et_end = spiceypy.utc2et(end_time)
start_time = et_to_datetime(et_start).strftime('%Y-%b-%d %H:%M:%S')
end_time = et_to_datetime(et_end).strftime('%Y-%b-%d %H:%M:%S')
error = []
max_ang_error = 0
tm = WebmustHandler(mission_phase=mission_phase)
df_0 = tm.get_tm([must_sadm_param], start_time, end_time)
for row in df_0:
utc = row[0]
must_angle = row[1] # TODO: Must be in degrees
et = spiceypy.utc2et(utc)
sadm_rot = spiceypy.pxform(sadm_ref_frame, sadm_frame, et)
sadm_vector = spiceypy.mxv(sadm_rot, ref_vector)
sadm_angle = np.rad2deg(spiceypy.vsep(ref_vector, sadm_vector))
ang_error = abs(sadm_angle - must_angle) * 1000 # mdeg
max_ang_error = max(max_ang_error, ang_error)
error.append([et, ang_error])
# Plot error
error = np.asarray(error)
print('Max angular error [mdeg]: ' + str(max_ang_error))
plot(error[:, 0],
[error[:, 1]],
yaxis_name=['Angular error'],
title=sc + " SA angular error between WebMUST and SPICE",
format=plot_style,
yaxis_units='mdeg',
notebook=notebook)
return max_ang_error
"""
def cov_ck_obj(mk, object, time_format= 'UTC', global_boundary=False,
report=False, unload=False):
"""
Provides time coverage summary for a given object for a list of
binary CK files provided in a meta-kernel. Several options are
available. This function is based on the following SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param mk: Meta-kernel to load the computation scenario.
:type mk: str
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
spiceypy.furnsh(mk)
boundaries_list = []
et_boundaries_list = []
object_id = spiceypy.namfrm(object)
MAXIV = 2000
ck_count = spiceypy.ktotal('CK') - 1
WINSIZ = 2 * MAXIV
MAXOBJ = 10000
while ck_count >= 0:
ck_ids = spiceypy.support_types.SPICEINT_CELL(MAXOBJ)
ck_kernel = spiceypy.kdata(ck_count, 'CK', 155, 155, 155)
try:
ck_ids = spiceypy.ckobj(ck=ck_kernel[0], outCell=ck_ids)
except:
ck_ids = spiceypy.ckobj(ck=ck_kernel[0])
for id in ck_ids:
if id == object_id:
object_cov = spiceypy.support_types.SPICEDOUBLE_CELL(WINSIZ)
object_cov = spiceypy.ckcov(ck=ck_kernel[0], idcode=object_id,
needav=False, level='SEGMENT',
tol=0.0, timsys='TDB',
cover=object_cov)
boundaries = time.cov_int(object_cov=object_cov,
object_id=object_id,
kernel=ck_kernel[0],
global_boundary=global_boundary,
time_format=time_format,
report=report)
boundaries_list.append(boundaries)
#
# We need to have the boundaries in TDB in order to sort out the
# min and max to obtain the global ones for multiple kernels
#
if global_boundary:
et_boundaries_list.append(time.cov_int(
object_cov=object_cov,
object_id=object_id,
kernel=ck_kernel[0],
global_boundary=True,
time_format='TDB',
report=False))
ck_count -= 1
if global_boundary:
start_time = min(et_boundaries_list)[0]
finish_time = max(et_boundaries_list)[1]
boundaries_list = time.et2cal([start_time, finish_time],
format=time_format)
if report:
try:
body_name = spiceypy.bodc2n(object_id)
except:
body_name = spiceypy.frmnam(object_id, 60)
print("Global Coverage for {} [{}]: {} - {}".format(
body_name, time_format, boundaries_list[0],
boundaries_list[1]))
if unload:
spiceypy.unload(mk)
return boundaries_list
def cov_ck_ker(ck, object, support_ker=list(), time_format='UTC',
report=False, unload=True):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended. It
is optional since the kernels could have been already loaded.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format), 'TDB' or 'SPICE'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
spiceypy.furnsh(ck)
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.furnsh(ker)
object_id = spiceypy.namfrm(object)
MAXIV = 200000
WINSIZ = 2 * MAXIV
MAXOBJ = 100000
ck_ids = spiceypy.support_types.SPICEINT_CELL(MAXOBJ)
try:
ck_ids = spiceypy.ckobj(ck, outCell=ck_ids)
except:
ck_ids = spiceypy.ckobj(ck)
if object_id in ck_ids:
object_cov = spiceypy.support_types.SPICEDOUBLE_CELL(WINSIZ)
spiceypy.scard, 0, object_cov
object_cov = spiceypy.ckcov(ck=ck, idcode=object_id,
needav=False, level='INTERVAL',
tol=0.0, timsys='TDB',
cover=object_cov)
else:
#print('{} with ID {} is not present in {}.'.format(object,
# object_id, ck))
if unload:
spiceypy.unload(ck)
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.unload(ker)
return False
if time_format == 'SPICE':
boundaries = object_cov
else:
boundaries = time.cov_int(object_cov=object_cov,
object_id=object_id,
kernel=ck,
time_format=time_format, report=report)
if unload:
spiceypy.unload(ck)
if support_ker:
if isinstance(support_ker, str):
support_ker = [support_ker]
for ker in support_ker:
spiceypy.unload(ker)
return boundaries
def time_correlation(sc, ck, plot_style='line', notebook=True):
# Downloads a telemetry file of a given CK and computes
# the time difference between the UTC time (1st column)
# and the clock string (2nd column) in milliseconds
spiceypy.timdef('SET', 'SYSTEM', 10, 'UTC')
if sc == 'MPO':
file = ck.split('/')[-1].replace('\n', '').split('_')[5]
file = 'mpo_raw_hk_aocs_measured_attitude_' + file + '.tab'
download_file("data/ANCDR/BEPICOLOMBO/hkt", file)
else:
print('Unsupported spacecraft: ' + sc)
return None
print('AOCS tab file: ' + file)
if not os.path.isfile(file):
print('AOCS tab file cannot be downloaded!')
return None
tabfile = open(file)
times = []
time_diff = []
try:
sc_id = spiceypy.bodn2c(sc)
except Exception as e:
print('Spacecraft not found: ' + sc + ", err: " + str(e))
return None
for line in tabfile.readlines():
data = line.replace('\n', '').replace(',', ' ').split()
utc_et = spiceypy.str2et(data[0].replace('Z', ''))
scs_et = spiceypy.scs2e(sc_id, data[1])
times.append(utc_et)
time_diff.append((utc_et - scs_et) * 1000)
time_diff = np.abs(np.asarray(time_diff))
max_time_diff = np.max(time_diff)
print('Avg time difference [ms]: ' + str(np.mean(time_diff)))
print('Max time difference [ms]: ' + str(max_time_diff))
plot(times, time_diff,
yaxis_name='Time diff (UTC - SCS)',
title='Time difference between UTC and Clock String in milliseconds',
format=plot_style,
yaxis_units='milliseconds',
notebook=notebook)
os.remove(file)
return max_time_diff
def flyby_ca_altitudes(sc, target, spk_expression, num_spk_files, from_date, to_date,
distance_flyby, num_samples, plot_style='line', notebook=True, plot_prefix=""):
spiceypy.timdef('SET', 'SYSTEM', 10, 'TDB')
target = target.upper()
target_frame = "IAU_" + target
start_time = spiceypy.utc2et(from_date)
stop_time = spiceypy.utc2et(to_date)
times = np.linspace(start_time, stop_time, num_samples)
maxwin = 200000
if sc == 'MPO':
spk_path = "data/SPICE/BEPICOLOMBO/kernels/spk/"
else:
print('Unsupported spacecraft: ' + sc)
return None
# Download num_spk_files and find the flyby for each one
spk_files = list_files_from_ftp(spk_path, spk_expression)
spk_files = spk_files[-num_spk_files:]
flybys_spks = []
flybys_ets = []
flybys_alts = []
flybys_alt_list = []
for spk_file in spk_files:
# Download spk file
download_file(spk_path, spk_file)
if not os.path.isfile(spk_file):
print('OEM file cannot be downloaded!')
return None
# Obtain the flyby data
spiceypy.furnsh(spk_file)
cnfine = SPICEDOUBLE_CELL(maxwin)
result = SPICEDOUBLE_CELL(maxwin)
spiceypy.scard(0, cnfine)
spiceypy.wninsd(start_time, stop_time, cnfine)
spiceypy.gfdist(target=target,
abcorr='NONE',
obsrvr=sc,
relate='<',
refval=distance_flyby,
step=spiceypy.spd() / 4.,
nintvls=maxwin,
cnfine=cnfine,
adjust=0.0,
result=result)
final = SPICEDOUBLE_CELL(maxwin)
spiceypy.gfdist(target=target,
abcorr='NONE',
obsrvr=sc,
relate='LOCMIN',
refval=distance_flyby,
step=spiceypy.spd() / 4.,
nintvls=maxwin,
cnfine=result,
adjust=0.0,
result=final)
number_of_results = spiceypy.wncard(final)
if number_of_results == 0:
print('No ' + target + ' flyby found for that period at SPK: ' + spk_file)
return None
if number_of_results > 1:
print('Error: Multiple ' + target + ' flybys found for that period at SPK: ' + spk_file)
return None
# Get the flyby closest approach time and distance
flyby_et = spiceypy.wnfetd(final, 0)[0]
(state, lt) = spiceypy.spkezr(target, flyby_et, target_frame, 'NONE', sc)
spoint = spiceypy.sincpt('ELLIPSOID', target, flyby_et, target_frame,
'NONE', sc, target_frame, state[:3])[0]
flyby_altitude = spiceypy.vnorm(spoint + state[:3])
flybys_spks.append(spk_file)
flybys_ets.append(flyby_et)
flybys_alts.append(flyby_altitude)
# Get the flyby closest approach distance evolution
altitudes = []
for et in times:
(state, lt) = spiceypy.spkezr(target, et, target_frame, 'NONE', sc)
spoint = spiceypy.sincpt('ELLIPSOID', target, et, target_frame,
'NONE', sc, target_frame, state[:3])[0]
altitudes.append(spiceypy.vnorm(spoint + state[:3]))
flybys_alt_list.append(altitudes)
# Unload and clean spk file
spiceypy.unload(spk_file)
os.remove(spk_file)
# Reduce the SPK names to only the SPK number to reduce the legend size
spk_numbers = []
if sc == 'MPO':
for spk in flybys_spks:
spk_number = int(spk.split("_")[3])
spk_numbers.append(spk_number)
if len(plot_prefix):
plot_prefix = plot_prefix + " "
# Plot Flyby CA altitude vs spk number
plot(spk_numbers,
flybys_alts,
title=plot_prefix + target + ' Flyby CA altitude vs spk number',
format="scatter",
xaxis_name='SPK Number',
yaxis_name=['Altitude'],
yaxis_units='Km',
notebook=notebook)
# Plot Flyby CA time vs spk number
plot(flybys_ets,
spk_numbers,
title=plot_prefix + target + ' Flyby CA spk number vs time',
format="scatter",
yaxis_name=['SPK Number'],
yaxis_units='SPK Number',
notebook=notebook)
# Plot Flyby altitude evolution vs time per SPK
plot(times,
flybys_alt_list,
yaxis_name=spk_numbers,
title=plot_prefix + target + ' Flyby altitude evolution vs time',
format=plot_style,
yaxis_units='Km',
plot_height=400,
notebook=notebook)
return np.max(flybys_alts)
def fk_body_ifj2000(mission, body, pck, body_spk, frame_id, report=False,
unload=False, file=True):
"""
Generates a given Solar System Natural Body Inertial frame at J2000. This
function is based on a FORTRAN subroutine provided by <NAME>
(NAIF/JPL)
The frame definition would be as follows:
{Body} Inertial Frame at J2000 ({MISSION}_{BODY}_IF_J2000)
Definition:
The {body} Inertial Frame at J2000 is defined as follows:
- +Z axis is parallel to {body} rotation axis
at J2000, pointing toward the North side of the
invariable plane;
- +X axis is aligned with the ascending node of the {Body}
orbital plane with the {Body} equator plane at J2000;
- +Y axis completes the right-handed system;
- the origin of this frame is the center of mass of {Body}.
All vectors are geometric: no aberration corrections are used.
Remarks:
This frame is defined as a fixed offset frame using constant vectors
as the specification method. The fixed offset for these vectors were
based on the following directions (that also define a two-vector
frame):
- +Z axis along Right Ascension (RA) and Declination (DEC) of {Body}
pole at J2000 epoch in J2000 inertial frame;
- +X axis along the RA/DEC of {Body} instantaneous orbital plane
ascending node on {Body} equator at J2000 epoch in J2000
inertial frame;
This frame has been defined based on the IAU_{BODY} frame, whose
evaluation was based on the data included in the loaded PCK file.
In addition {body_spk} ephemeris have been used to compute the {Body}
instantaneous orbital plane ascending node on {Body} equator at
J2000 epoch in J2000 inertial frame.
:param mission: Name of the mission to use the frame
:type mission: str
:param body: Natural body for which the frame is defined
:type body: str§
:param pck: Planetary Constants Kernel to be used to extract the Pole information from
:type pck: str
:param body_spk: SPK kernels that contain the ephemeris of the Natural body
:type body_spk: Union[str, list]
:param frame_id: ID for the new frame. It is recommended to follow the convention recommended by NAIF: -XYYY where X is the ID of the mission S/C and YYY is a number between 900 and 999.
:type frame_id: str
:param report: If True prints some intermediate results.
:type report: bool
:param unload: If True it will unload the input PCK and SPK.
:type unload: bool
:param file: If True it generates the frame definition in a file with the following name: {MISSION}_{BODY}_IF_J2000.tf
:type file: bool
:return: Returns the Euler angles to transform the computed frame with J2000. Only if parameter file is False
:rtype: str
"""
body = body.upper()
mission = mission.upper()
spiceypy.furnsh(pck)
#
# This can actually be a list of bodies.
#
spiceypy.furnsh(body_spk)
#
# Get instantaneous Body state at J2000 and compute instantaneous
# orbital normal.
#
state, lt = spiceypy.spkezr(body, 0.0, 'J2000', 'NONE', 'SUN')
normal = spiceypy.ucrss(state[0:3:1], state[3:6:1])
#
# Get J2000 -> IAU_{BODY} rotation at J2000 and compute Body pole
# direction in J2000 at J2000.
#
mat = spiceypy.pxform('IAU_{}'.format(body), 'J2000', 0.0)
z = spiceypy.vpack(0.0, 0.0, 1.0)
pole = spiceypy.mxv(mat, z)
#
# Compute direction Body orbit's ascending node on Body equator at
# J2000 in J2000 and print it and Body pole as RA/DEC in J2000 in
# degrees
#
ascnod = spiceypy.ucrss(pole, normal)
r, ra, dec = spiceypy.recrad(pole)
if report:
print('POLE RA/DEC = {}/{}'.format(ra*spiceypy.dpr(), dec*spiceypy.dpr()))
r, ra, dec = spiceypy.recrad(ascnod)
if report:
print('ASCNOD RA/DEC = {}/{}'.format(ra * spiceypy.dpr(), dec * spiceypy.dpr()))
#
# Build two vector from a with POLE as Z and ASNOD as X and print rotation
# from that frame to J200 as Euler angles.
#
mat = spiceypy.twovec(pole, 3, ascnod, 1)
matxp = spiceypy.xpose(mat)
r3, r2, r1 = spiceypy.m2eul(matxp, 3, 2, 3)
if file:
body_id = spiceypy.bodn2c(body)
with open('{}_{}_IF_J2000.tf'.format(mission, body), 'w+') as f:
f.write(r"\begindata")
f.write('\n \n')
f.write(' FRAME_{}_{}_IF_J2000 = {}\n'.format(mission, body,
frame_id))
f.write(" FRAME_{}_NAME = '{}_{}_IF_J2000'\n".format(
frame_id, mission, body))
f.write(' FRAME_{}_CLASS = 4\n'.format(frame_id))
f.write(' FRAME_{}_CLASS_ID = {}\n'.format(frame_id,
frame_id))
f.write(' FRAME_{}_CENTER = {}\n'.format(frame_id,
body_id))
f.write('\n')
f.write(" TKFRAME_{}_SPEC = 'ANGLES'\n".format(frame_id))
f.write(" TKFRAME_{}_RELATIVE = 'J2000'\n".format(frame_id))
f.write(' TKFRAME_{}_ANGLES = (\n'.format(frame_id))
f.write(' {}\n'.format(r3 *
spiceypy.dpr()))
f.write(' {}\n'.format(r2 *
spiceypy.dpr()))
f.write(' {}\n'.format(r1 *
spiceypy.dpr()))
f.write(' )\n')
f.write(' TKFRAME_{}_AXES = (\n'.format(frame_id))
f.write(' 3,\n')
f.write(' 2,\n')
f.write(' 3\n')
f.write(' )\n')
f.write(" TKFRAME_{}_UNITS = 'DEGREES'\n".format(frame_id))
f.write('\n')
f.write(r"\begintext")
else:
return '{}_IF->J2000 (3-2-3): {} - {} - {}'.format(body,
r3 * spiceypy.dpr(),
r2 * spiceypy.dpr(),
r1 * spiceypy.dpr())
if unload:
spiceypy.unload(pck)
spiceypy.unload(body_spk)
return
def eul_angle_report(et_list, eul_ck1, eul_ck2, eul_num, tolerance, name=''):
eul_error = list(numpy.degrees(abs(numpy.array(eul_ck1) - numpy.array(eul_ck2))))
count = 0
interval_bool = False
eul_tol_list = []
with open('euler_angle_{}_{}_report.txt'.format(eul_num, name), 'w+') as f:
f.write('EULER ANGLE {} REPORT \n'.format(eul_num))
f.write('==================== \n')
for element in eul_error:
if element >= tolerance:
if interval_bool:
eul_tol_list.append(element)
else:
interval_bool = True
eul_tol_list.append(element)
utc_start = spiceypy.et2utc(et_list[count], 'ISOC', 2)
else:
if interval_bool:
utc_finish = spiceypy.et2utc(et_list[count], 'ISOC', 2)
f.write('TOLERANCE of ' + str(tolerance) + ' DEG exceeded from ' + utc_start + ' until ' +
utc_finish + ' with an average angle of ' + str(numpy.mean(eul_tol_list)) + ' DEG \n')
interval_bool = False
count += 1
f.write('\nMAX Error: {} DEG\n'.format(str(max(eul_error))))
f.write('MIN Error: {} DEG\n'.format(str(min(eul_error))))
f.write('MEAN Error: {} DEG\n'.format(str(numpy.mean(eul_error))))
return
def attitude_error_report(et_list, ang_ck1, ang_ck2, tolerance, name=''):
ang_error = list(abs(numpy.array(ang_ck1) - numpy.array(ang_ck2)))
count = 0
interval_bool = False
ang_tol_list = []
with open('attitude_error_{}_report.txt'.format(name), 'w+') as f:
f.write('ATTITUDE ERROR REPORT \n')
f.write('==================== \n')
for element in ang_error:
if element >= tolerance:
if interval_bool:
ang_tol_list.append(element)
else:
interval_bool = True
ang_tol_list.append(element)
utc_start = spiceypy.et2utc(et_list[count], 'ISOC', 2)
else:
if interval_bool:
utc_finish = spiceypy.et2utc(et_list[count], 'ISOC', 2)
f.write('TOLERANCE of ' + str(tolerance) + ' DEG exceeded from ' + utc_start + ' until ' +
utc_finish + ' with an average angle of ' + str(numpy.mean(ang_tol_list)) + ' DEG \n')
interval_bool = False
count += 1
f.write('\nMAX Error: {} ARCSECONDS\n'.format(str(max(ang_error))))
f.write('MIN Error: {} ARCSECONDS\n'.format(str(min(ang_error))))
f.write('MEAN Error: {} ARCSECONDS\n'.format(str(numpy.mean(ang_error))))
return
def state_report(et_list, pos_spk1, pos_spk2, vel_spk1, vel_spk2, pos_tolerance,
vel_tolerance, name=''):
pos_error = list(abs(numpy.array(pos_spk1) - numpy.array(pos_spk2)))
vel_error = list(abs(numpy.array(vel_spk1) - numpy.array(vel_spk2)))
count = 0
interval_bool = False
pos_tol_list = []
with open('state_{}_report.txt'.format(name), 'w+') as f:
f.write('STATE REPORT \n')
f.write('============ \n')
for element in pos_error:
if element >= pos_tolerance:
if interval_bool:
pos_tol_list.append(element)
else:
interval_bool = True
pos_tol_list.append(element)
utc_start = spiceypy.et2utc(et_list[count], 'ISOC', 2)
else:
if interval_bool:
utc_finish = spiceypy.et2utc(et_list[count], 'ISOC', 2)
f.write('TOLERANCE of ' + str(pos_tolerance) + ' KM exceeded from ' + utc_start + ' until ' +
utc_finish + ' with an average distance of ' + str(numpy.mean(pos_tol_list)) + ' KM \n')
interval_bool = False
count += 1
count = 0
interval_bool = False
vel_tol_list = []
for element in vel_error:
if element >= vel_tolerance:
if interval_bool:
vel_tol_list.append(element)
else:
interval_bool = True
vel_tol_list.append(element)
utc_start = spiceypy.et2utc(et_list[count], 'ISOC', 2)
else:
if interval_bool:
utc_finish = spiceypy.et2utc(et_list[count], 'ISOC', 2)
f.write('TOLERANCE of ' + str(vel_tolerance) + ' KM/S exceeded from ' + utc_start + ' until ' +
utc_finish + ' with an average velocity of ' + str(numpy.mean(vel_tol_list)) + ' KM/S \n')
count += 1
f.write('\nMAX Error: {} KM\n'.format(str(max(pos_error))))
f.write('MIN Error: {} KM\n'.format(str(min(pos_error))))
f.write('MEAN Error: {} KM\n'.format(str(numpy.mean(pos_error))))
f.write('\nMAX Error: {} KM/S\n'.format(str(max(vel_error))))
f.write('MIN Error: {} KM/S\n'.format(str(min(vel_error))))
f.write('MEAN Error: {} KM/S\n'.format(str(numpy.mean(vel_error))))
return
def ckdiff_euler(mk, ck1, ck2, spacecraft_frame, target_frame, resolution, tolerance,
utc_start='', utc_finish='', plot_style='line', report=True,
notebook=False):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
# Compute time windows
spiceypy.furnsh(mk)
windows_ck1 = cov_ck_ker(ck1, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck1)
windows_ck2 = cov_ck_ker(ck2, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck2)
windows_intersected = spiceypy.wnintd(windows_ck1, windows_ck2)
number_of_intervals = list(range(spiceypy.wncard(windows_intersected)))
et_boundaries_list = []
for element in number_of_intervals:
et_boundaries = spiceypy.wnfetd(windows_intersected, element)
et_boundaries_list.append(et_boundaries[0])
et_boundaries_list.append(et_boundaries[1])
start = True
for et_start, et_finish in zip(et_boundaries_list[0::2], et_boundaries_list[1::2]):
if start:
et_list = numpy.arange(et_start, et_finish, resolution)
start = False
et_list = numpy.append(et_list, numpy.arange(et_start, et_finish, resolution))
if utc_start:
et_start = spiceypy.utc2et(utc_start)
if utc_finish:
et_finish = spiceypy.utc2et(utc_finish)
et_list = numpy.arange(et_start, et_finish, resolution)
# Process CK1
spiceypy.furnsh(ck1)
eul1_ck1 = []
eul2_ck1 = []
eul3_ck1 = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame, et)
euler = (spiceypy.m2eul(rot_mat, 1, 2, 3))
eul1_ck1.append(math.degrees(euler[0]))
eul2_ck1.append(math.degrees(euler[1]))
eul3_ck1.append(math.degrees(euler[2]))
spiceypy.unload(ck1)
# Process CK2
spiceypy.furnsh(ck2)
eul1_ck2 = []
eul2_ck2 = []
eul3_ck2 = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame, et)
euler = (spiceypy.m2eul(rot_mat, 1, 2, 3))
eul1_ck2.append(math.degrees(euler[0]))
eul2_ck2.append(math.degrees(euler[1]))
eul3_ck2.append(math.degrees(euler[2]))
spiceypy.unload(ck2)
# Plot angles
ck1_filename = ck1.split('/')[-1].split('.')[0]
ck2_filename = ck2.split('/')[-1].split('.')[0]
eul1_name = '{}_{}'.format(ck1_filename, ck2_filename)
eul2_name = '{}_{}'.format(ck1_filename, ck2_filename)
eul3_name = '{}_{}'.format(ck1_filename, ck2_filename)
plot(et_list, [eul1_ck1, eul1_ck2], yaxis_name=['Euler Angle 1 CK1',
'Euler Angle 1 CK2'],
title='Euler Angle 1 {}'.format(eul1_name),
format=plot_style,
notebook=notebook)
plot(et_list, [eul2_ck1, eul2_ck2], yaxis_name=['Euler Angle 2 CK1',
'Euler Angle 2 CK2'],
title='Euler Angle 2 {}'.format(eul2_name),
format=plot_style,
notebook=notebook)
plot(et_list, [eul3_ck1, eul3_ck2], yaxis_name=['Euler Angle 3 CK1',
'Euler Angle 3 CK2'],
title='Euler Angle 3 {}'.format(eul3_name),
format=plot_style,
notebook=notebook)
# Generate reports
if report:
eul_angle_report(et_list, eul1_ck1, eul1_ck2, 1, tolerance, name=eul1_name)
eul_angle_report(et_list, eul2_ck1, eul2_ck2, 2, tolerance, name=eul2_name)
eul_angle_report(et_list, eul3_ck1, eul3_ck2, 3, tolerance, name=eul3_name)
return
def ckdiff(ck1, ck2, spacecraft_frame, target_frame, resolution, tolerance,
utc_start='', utc_finish='', mk='', output='boresight', boresight = [0,0,1],
plot_style='line', report=False, notebook=False):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
if mk:
spiceypy.furnsh(mk)
windows_ck1 = cov_ck_ker(ck1, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck1)
windows_ck2 = cov_ck_ker(ck2, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck2)
windows_intersected = spiceypy.wnintd(windows_ck1, windows_ck2)
number_of_intervals = list(range(spiceypy.wncard(windows_intersected)))
et_boundaries_list = []
for element in number_of_intervals:
et_boundaries = spiceypy.wnfetd(windows_intersected, element)
et_boundaries_list.append(et_boundaries[0])
et_boundaries_list.append(et_boundaries[1])
start = True
for et_start, et_finish in zip(et_boundaries_list[0::2], et_boundaries_list[1::2]):
if start:
et_list = numpy.arange(et_start, et_finish, resolution)
start = False
et_list = numpy.append(et_list, numpy.arange(et_start, et_finish, resolution))
if utc_start:
et_start = spiceypy.utc2et(utc_start)
if utc_finish:
et_finish = spiceypy.utc2et(utc_finish)
et_list = numpy.arange(et_start, et_finish, resolution)
spiceypy.furnsh(ck1)
eul1_ck1 = []
eul2_ck1 = []
eul3_ck1 = []
bsight_ck1 = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame, et)
euler = (spiceypy.m2eul(rot_mat, 1, 2, 3))
eul1_ck1.append(math.degrees(euler[0]))
eul2_ck1.append(math.degrees(euler[1]))
eul3_ck1.append(math.degrees(euler[2]))
bsight = spiceypy.mxv(rot_mat, boresight)
bsight_ang = spiceypy.vsep(bsight, boresight)
bsight_ck1.append(bsight_ang*spiceypy.dpr())
spiceypy.unload(ck1)
spiceypy.furnsh(ck2)
eul1_ck2 = []
eul2_ck2 = []
eul3_ck2 = []
bsight_ck2 = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame, et)
euler = (spiceypy.m2eul(rot_mat, 1, 2, 3))
eul1_ck2.append(math.degrees(euler[0]))
eul2_ck2.append(math.degrees(euler[1]))
eul3_ck2.append(math.degrees(euler[2]))
bsight = spiceypy.mxv(rot_mat, boresight)
bsight_ang = spiceypy.vsep(bsight, boresight)
bsight_ck2.append(bsight_ang*spiceypy.dpr())
spiceypy.unload(ck2)
ck1_filename = ck1.split('/')[-1].split('.')[0]
ck2_filename = ck2.split('/')[-1].split('.')[0]
title_name = '{}_{}'.format(ck1_filename, ck2_filename)
if output == 'euler_angles':
plot(et_list, [eul1_ck1,eul1_ck2], yaxis_name=['Degrees', 'Degrees'],
title='Euler Angle 1 {}'.format(title_name),
format=plot_style,
notebook=notebook)
plot(et_list, [eul2_ck1,eul2_ck2], yaxis_name=['Degrees', 'Degrees'],
title='Euler Angle 2 {}'.format(title_name),
format=plot_style,
notebook=notebook)
plot(et_list, [eul3_ck1,eul3_ck2], yaxis_name=['Degrees', 'Degrees'],
title='Euler Angle 3 {}'.format(title_name),
format=plot_style,
notebook=notebook)
else:
plot(et_list, [bsight_ck1], yaxis_name=['Degrees', 'Degrees'],
title='+Z Axis Angle Difference {}'.format(title_name),
format=plot_style,
notebook=notebook)
if report:
eul_angle_report(et_list, eul1_ck1, eul1_ck2, 1, tolerance, name=title_name)
eul_angle_report(et_list, eul2_ck1, eul2_ck2, 2, tolerance, name=title_name)
eul_angle_report(et_list, eul3_ck1, eul3_ck2, 3, tolerance, name=title_name)
return
def get_euler_boresights_angles(ck, et_list, spacecraft_frame,
target_frame, boresight):
spiceypy.furnsh(ck)
eul1 = []
eul2 = []
eul3 = []
bsights = []
angles = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame, et)
euler = (spiceypy.m2eul(rot_mat, 1, 2, 3))
eul1.append(math.degrees(euler[0]))
eul2.append(math.degrees(euler[1]))
eul3.append(math.degrees(euler[2]))
bsight = spiceypy.mxv(rot_mat, boresight)
bsight_ang = spiceypy.vsep(bsight, boresight)
bsights.append(spiceypy.convrt(bsight_ang, 'RADIANS', 'ARCSECONDS'))
(rot_axis, rot_angle) = spiceypy.raxisa(rot_mat)
angles.append(spiceypy.convrt(rot_angle, 'RADIANS', 'ARCSECONDS'))
spiceypy.unload(ck)
return eul1, eul2, eul3, bsights, angles
def ckdiff_error(ck1, ck2, spacecraft_frame, target_frame, resolution, tolerance,
mk='', utc_start='', utc_finish='', output='',
boresight=[0,0,1], plot_style='line', report=False,
notebook=False):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
if mk:
spiceypy.furnsh(mk)
try:
windows_ck1 = cov_ck_ker(ck1, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck1)
windows_ck2 = cov_ck_ker(ck2, object=spacecraft_frame, time_format='SPICE')
spiceypy.unload(ck2)
except:
print('WARNING: No Time Window could be determined')
return None
windows_intersected = spiceypy.wnintd(windows_ck1, windows_ck2)
number_of_intervals = list(range(spiceypy.wncard(windows_intersected)))
if not len(number_of_intervals):
print('WARNING: No Time Windows intersected')
return None
et_boundaries_list = []
for element in number_of_intervals:
et_boundaries = spiceypy.wnfetd(windows_intersected, element)
et_boundaries_list.append(et_boundaries[0])
et_boundaries_list.append(et_boundaries[1])
start = True
for et_start, et_finish in zip(et_boundaries_list[0::2], et_boundaries_list[1::2]):
if start:
et_list = numpy.arange(et_start, et_finish, resolution)
start = False
et_list = numpy.append(et_list, numpy.arange(et_start, et_finish, resolution))
if utc_start:
et_start = spiceypy.utc2et(utc_start)
if utc_finish:
et_finish = spiceypy.utc2et(utc_finish)
et_list = numpy.arange(et_start, et_finish, resolution)
if not len(et_list):
print('WARNING: No valid time period')
return None
eul1_ck1, eul2_ck1, eul3_ck1, bsight_ck1, angle_ck1 = get_euler_boresights_angles(ck1, et_list, spacecraft_frame,
target_frame, boresight)
eul1_ck2, eul2_ck2, eul3_ck2, bsight_ck2, angle_ck2 = get_euler_boresights_angles(ck2, et_list, spacecraft_frame,
target_frame, boresight)
angle_diff = [abs(i - j) for i, j in zip(angle_ck1, angle_ck2)]
if output == 'euler_angles':
eul1_diff = [i - j for i, j in zip(eul1_ck1, eul1_ck2)]
eul2_diff = [i - j for i, j in zip(eul2_ck1, eul2_ck2)]
eul3_diff = [i - j for i, j in zip(eul3_ck1, eul3_ck2)]
plot(et_list, [eul1_diff, eul2_diff, eul3_diff],
yaxis_name=['Degrees', 'Degrees', 'Degrees'],
title='Euler Angle Differences',
format=plot_style, yaxis_units='deg',
notebook=notebook)
elif output == 'boresight':
bsight_diff = [np.abs(i - j) for i, j in zip(bsight_ck1, bsight_ck2)]
plot(et_list, bsight_diff,
yaxis_name='',
title='Boresight Angle Difference',
format=plot_style, yaxis_units='arcsec',
notebook=notebook)
# Attitude Error
else:
plot(et_list, angle_diff,
yaxis_name='ang_diff',
title='Attitude Error',
format=plot_style, yaxis_units='arcsec',
notebook=notebook)
if report:
ck1_filename = ck1.split('/')[-1].split('.')[0]
ck2_filename = ck2.split('/')[-1].split('.')[0]
bsight_name = '{}_{}'.format(ck1_filename, ck2_filename)
attitude_error_report(et_list, bsight_ck1, bsight_ck2, tolerance, name=bsight_name)
if output == 'euler_angles':
eul1_name = '{}_{}'.format(ck1_filename, ck2_filename)
eul2_name = '{}_{}'.format(ck1_filename, ck2_filename)
eul3_name = '{}_{}'.format(ck1_filename, ck2_filename)
eul_angle_report(et_list, eul1_ck1, eul1_ck2, 1, tolerance, name=eul1_name)
eul_angle_report(et_list, eul2_ck1, eul2_ck2, 2, tolerance, name=eul2_name)
eul_angle_report(et_list, eul3_ck1, eul3_ck2, 3, tolerance, name=eul3_name)
elif output == 'rotaxis':
rotaxis_name = '{}_{}'.format(ck1_filename, ck2_filename)
attitude_error_report(et_list, angle_ck1, angle_ck2, tolerance, name=rotaxis_name)
if mk:
spiceypy.unload(mk)
return np.max(angle_diff)
def ckplot(ck1, spacecraft_frame, target_frame, resolution,
mk = '', utc_start='', utc_finish='', notebook=False,
plot_style='circle'):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
if mk:
spiceypy.furnsh(mk)
spiceypy.furnsh(ck1)
et_boundaries_list = cov_ck_ker(ck1, support_ker=mk, object=spacecraft_frame,
time_format='TDB')
start = True
for et_start, et_finish in zip(et_boundaries_list[0::2], et_boundaries_list[1::2]):
if start:
et_list = numpy.arange(et_start, et_finish, resolution)
start = False
et_list = numpy.append(et_list, numpy.arange(et_start, et_finish, resolution))
# TODO: if we want to really use start and end times and intersect it with the available intervals we need to develop this
if utc_start:
et_start = spiceypy.utc2et(utc_start)
if utc_finish:
et_finish = spiceypy.utc2et(utc_finish)
et_list = numpy.arange(et_start, et_finish, resolution)
eul1 = []
eul2 = []
eul3 = []
for et in et_list:
rot_mat = spiceypy.pxform(spacecraft_frame, target_frame,et)
euler = spiceypy.m2eul(rot_mat, 1, 2, 3)
eul1.append(math.degrees(euler[0]))
eul2.append(math.degrees(euler[1]))
eul3.append(math.degrees(euler[2]))
spiceypy.unload(ck1)
plot(et_list, [eul1,eul2,eul3],
yaxis_name=['Euler Angle 1', 'Euler Angle 2', 'Euler Angle 3'],
title='Euler Angles for {}'.format(ck1.split('/')[-1]), notebook=notebook, format=plot_style)
return
def spkdiff(mk, spk1, spk2, spacecraft, target, resolution, pos_tolerance,
vel_tolerance, target_frame='', utc_start='', utc_finish='',
plot_style='line', report=True):
"""
Provides time coverage summary for a given object for a given CK file.
Several options are available. This function is based on the following
SPICE API:
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/spiceypy/ckcov_c.html
The NAIF utility CKBRIEF can be used for the same purpose.
:param ck: CK file to be used
:type mk: str
:param support_ker: Support kernels required to run the function. At least
it should be a leapseconds kernel (LSK) and a Spacecraft clock kernel
(SCLK) optionally a meta-kernel (MK) which is highly recommended.
:type support_ker: Union[str, list]
:param object: Ephemeris Object to obtain the coverage from.
:type object: str
:param time_format: Output time format; it can be 'UTC', 'CAL' (for TDB
in calendar format) or 'TDB'. Default is 'TDB'.
:type time_format: str
:param global_boundary: Boolean to indicate whether if we want all the
coverage windows or only the absolute start and finish coverage times.
:type global_boundary: bool
:param report: If True prints the resulting coverage on the screen.
:type report: bool
:param unload: If True it will unload the input meta-kernel.
:type unload: bool
:return: Returns a list with the coverage intervals.
:rtype: list
"""
if not target_frame:
target_frame = 'IAU_{}'.format(target.upper())
spiceypy.furnsh(mk)
windows_spk1 = cov_spk_ker(spk1, object=spacecraft, time_format='SPICE')
spiceypy.unload(spk1)
windows_spk2 = cov_spk_ker(spk2, object=spacecraft, time_format='SPICE')
spiceypy.unload(spk2)
windows_intersected = spiceypy.wnintd(windows_spk1, windows_spk2)
number_of_intervals = list(range(spiceypy.wncard(windows_intersected)))
et_boundaries_list = []
for element in number_of_intervals:
et_boundaries = spiceypy.wnfetd(windows_intersected, element)
et_boundaries_list.append(et_boundaries[0])
et_boundaries_list.append(et_boundaries[1])
start = True
for et_start, et_finish in zip(et_boundaries_list[0::2], et_boundaries_list[1::2]):
if start:
et_list = np.arange(et_start, et_finish, resolution)
start = False
et_list = numpy.append(et_list, numpy.arange(et_start, et_finish, resolution))
if utc_start:
et_start = spiceypy.utc2et(utc_start)
if utc_finish:
et_finish = spiceypy.utc2et(utc_finish)
et_list = numpy.arange(et_start, et_finish, resolution)
spiceypy.furnsh(spk1)
state_spk1 = []
state_spk2 = []
pos_spk1 = []
pos_spk2 = []
vel_spk1 = []
vel_spk2 = []
for et in et_list:
state = spiceypy.spkezr(target, et, target_frame, 'NONE', spacecraft)[0]
state_spk1.append(state)
pos_spk1.append(np.sqrt(state[0]*state[0] +
state[1]*state[1] +
state[2]*state[2]))
vel_spk1.append(np.sqrt(state[3]*state[3] +
state[4]*state[4] +
state[5]*state[5]))
spiceypy.unload(spk1)
spiceypy.furnsh(spk2)
for et in et_list:
state = spiceypy.spkezr(target, et, target_frame, 'NONE', spacecraft)[0]
state_spk2.append(state)
pos_spk2.append(np.sqrt(state[0]*state[0] +
state[1]*state[1] +
state[2]*state[2]))
vel_spk2.append(np.sqrt(state[3]*state[3] +
state[4]*state[4] +
state[5]*state[5]))
plot(et_list, [pos_spk1, pos_spk2], yaxis_name=['Position SPK1',
'Position SPK2'],
title='Position of {} w.r.t {} ({})'.format(spacecraft, target, target_frame),
format=plot_style)
spiceypy.unload(spk2)
if report:
spk1_filename = spk1.split('/')[-1].split('.')[0]
spk2_filename = spk2.split('/')[-1].split('.')[0]
state_report(et_list, pos_spk1, pos_spk2, vel_spk1, vel_spk2, pos_tolerance, vel_tolerance,
name='{}_{}'.format(spk1_filename, spk2_filename))
return
def pck_body_placeholder(bodies):
"""
:param bodies:
:type bodies:
:return:
:rtype:
"""
with open('update_to_pck.tpc', 'w+') as f:
pl_id = 517
for body in bodies:
#
# Get body NAIF ID.
#
try:
id = spiceypy.bodn2c(str(body.upper))
except:
id = pl_id
pl_id += 1
f.write(' {0} {1} 1 1 1 - Placeholder radii\n'.format(id, body[:1].upper() + body[1:].lower()))
f.write('\n\n')
pl_id = 517
for body in bodies:
#
# Get body NAIF ID.
#
try:
id = spiceypy.bodn2c(str(body.upper))
except:
id = pl_id
pl_id += 1
f.write('BODY{}_RADII = (1 1 1 )\n'.format(id))
f.write('\n\n')
pl_id = 517
for body in bodies:
#
# Get body NAIF ID.
#
try:
id = spiceypy.bodn2c(str(body.upper))
except:
id = pl_id
pl_id += 1
f.write(" FRAME_IAU_{0} = {1}\n".format(body.upper(), id))
f.write(" FRAME_{0}_NAME = 'IAU_{1}'\n".format(id, body.upper()))
f.write(" FRAME_{}_CLASS = 2\n".format(id))
f.write(" FRAME_{0}_CLASS_ID = {0}\n".format(id))
f.write(" FRAME_{0}_CENTER = {0}\n".format(id))
f.write(" BODY{}_POLE_RA = ( 0. 0. 0. )\n".format(id))
f.write(" BODY{}_POLE_DEC = ( 90. 0. 0. )\n".format(id))
f.write(" BODY{}_PM = ( -90. 0. 0. )\n".format(id))
f.write(" BODY{}_LONG_AXIS = ( 0. )\n\n".format(id))
return
def read_ik_with_sectors(sensor_name):
#
# Since all IK variable names contain NAIF ID of the instrument,
# the input sensor acronym, NNN, needs to be expanded into its
# full name, ROS_RPC_NNN, which then can be used to find the
# sensor's NAIF ID code.
#
sensnm = sensor_name
secsiz = 0
secsis = 0
try:
sensid = spiceypy.bodn2c(sensnm)
except:
print('Cannot determine NAIF ID for {}'.format(sensnm))
return sensnm, 0, 0, secsiz, secsis, '', []
#
# No IK routines can be used to retrieve loaded data. First,
# retrieve the number of sectors provided in the
# INS-NNNNNN_NUMBER_OF_SECTORS keyword (here -NNNNNN is the NAIF ID
# of the sensor.)
#
ikkwd = 'INS#_NUMBER_OF_SECTORS'
ikkwd = spiceypy.repmi(ikkwd, "#", sensid)
try:
secnum = spiceypy.gipool(ikkwd, 0, 2)
except:
print('Loaded IK does not contain {}.'.format(ikkwd))
return sensnm, sensid, 0, secsiz, secsis, '', []
#
# Second, retrieve the sector size provided in the
# INS-NNNNNN_SECTOR_SIZE or INS-NNNNNN_SECTOR_SIZES keyword.
#
ikkwd = 'INS#_SECTOR_SIZES'
ikkwd = spiceypy.repmi(ikkwd, '#', sensid)
try:
secsis = spiceypy.gdpool(ikkwd, 0, 2)
#
# We need to search for INS-NNNNNN_SECTOR_SIZE in the second place
# for it would also be found by INS-NNNNNN_SECTOR_SIZES
#
except:
ikkwd = 'INS#_SECTOR_SIZE'
ikkwd = spiceypy.repmi(ikkwd, '#', sensid)
try:
room = int(secnum[0]*secnum[1]*2)
secsiz = spiceypy.gdpool(ikkwd, 0, room)
except:
print('Loaded IK does not contain {}.'.format(ikkwd))
return sensnm, sensid, secnum, secsiz, secsis, '', []
#
# Third, retrieve the frame in which sector view direction are
# defined. It is provided in the INS-NNNNNN_FRAME keyword.
#
ikkwd = 'INS#_FRAME'
ikkwd = spiceypy.repmi(ikkwd, '#', sensid)
try:
secfrm = spiceypy.gcpool(ikkwd, 0, 1)
except:
print('Loaded IK does not contain {}.'.format(ikkwd))
return sensnm, sensid, secnum, secsiz, secsis, secfrm, []
#
# Last, retrieve the sector view directions provided in the
# INS-NNNNNN_SECTOR_DIRECTIONS keyword.
#
ikkwd = 'INS#_SECTOR_DIRECTIONS'
ikkwd = spiceypy.repmi(ikkwd, '#', sensid)
try:
room = int(secnum[0]*secnum[1]*3)
secdir = spiceypy.gdpool(ikkwd, 0, room)
#
# Re-arrange the secdir list into a list of lists in which each
# individual list is a sector direction vector
#
secdir_list = []
secdir_line = []
count = 0
for element in secdir: # Start counting from 1
secdir_line.append(element)
count += 1
if count % 3 == 0:
secdir_list.append(secdir_line)
secdir_line = []
count = 0
secdir = secdir_list
except:
print('Loaded IK does not contain {}.'.format(ikkwd))
return sensnm, sensid, secnum, secsiz, secsis, secfrm, []
return sensnm, sensid, secnum, secsiz, secsis, secfrm, secdir
#def mex_tgo_occultations(interval, refval):
# (out, radii) = spiceypy.bodvrd('MARS', 'RADII', 3)
#
# # Compute flattening coefficient.
# re = radii[0]
# rp = radii[2]
# f = (re - rp) / re
#
# a = re
# b = radii[1]
# c = rp
#
# MAXIVL = 10000
# MAXWIN = 2 * MAXIVL
# TDBFMT = 'YYYY MON DD HR:MN:SC.### (TDB) ::TDB'
#
# # Initialize the "confinement" window with the interval
# # over which we'll conduct the search.
# cnfine = stypes.SPICEDOUBLE_CELL(2)
# spiceypy.wninsd(interval.start, interval.finish, cnfine)
#
# #
# # In the call below, the maximum number of window
# # intervals gfposc can store internally is set to MAXIVL.
# # We set the cell size to MAXWIN to achieve this.
# #
# riswin = stypes.SPICEDOUBLE_CELL(MAXWIN)
#
# #
# # Now search for the time period, within our confinement
# # window, during which the apparent target has elevation
# # at least equal to the elevation limit.
# #
# # VARIABLE I/O DESCRIPTION
# # --------------- --- -------------------------------------------------
# # SPICE_GF_CNVTOL P Convergence tolerance.
# # occtyp I Type of occultation.
# # front I Name of body occulting the other.
# # fshape I Type of shape model used for front body.
# # fframe I Body-fixed, body-centered frame for front body.
# # back I Name of body occulted by the other.
# # bshape I Type of shape model used for back body.
# # bframe I Body-fixed, body-centered frame for back body.
# # abcorr I Aberration correction flag.
# # obsrvr I Name of the observing body.
# # step I Step size in seconds for finding occultation
# # events.
# # cnfine I-O SPICE window to which the search is restricted.
# # result O SPICE window containing results.
# #
# spiceypy.gfoclt('ANY', 'MARS', 'ELLIPSOID', 'IAU_MARS', 'MEX',
# 'POINT', '', 'NONE', 'TGO', 60, cnfine, riswin)
#
# #
# # Now we perform another search to constrain the number of occultations by a
# # distance criteria
# #
# cnfine = riswin
#
# riswin = stypes.SPICEDOUBLE_CELL(MAXWIN)
#
# #
# # We're not using the adjustment feature, so
# # we set `adjust' to zero.
# #
# adjust = 0.0
#
# #
# # We use a step size of 1 hour
# #
# step = 60 * 60
#
# # nintvls = 2*n + ( m / step )
# #
# # where
# #
# # n is the number of intervals in the confinement
# # window
# #
# # m is the measure of the confinement window, in
# # units of seconds
# #
# # step is the search step size in seconds
# #
# ndays = 100
# nintvls = int(2 * 1 + (ndays * 24 * 60 * 60 / step))
#
# #
# # Now search for the time period, within our confinement
# # window, during which the apparent target has elevation
# # at least equal to the elevation limit.
# #
# # VARIABLE I/O DESCRIPTION
# # --------------- --- ------------------------------------------------
# # SPICE_GF_CNVTOL P Convergence tolerance
# # target I Name of the target body.
# # abcorr I Aberration correction flag.
# # obsrvr I Name of the observing body.
# # relate I Relational operator.
# # refval I Reference value.
# # adjust I Adjustment value for absolute extrema searches.
# # step I Step size used for locating extrema and roots.
# # nintvls I Workspace window interval count.
# #
# # cnfine I-O SPICE window to which the search is confined.
# # result O SPICE window containing results.
# #
# spiceypy.gfdist('MEX', 'NONE', 'TGO', '<', refval, adjust, step, nintvls,
# cnfine, riswin)
#
# #
# # The function wncard returns the number of intervals
# # in a SPICE window.
# #
# winsiz = spiceypy.wncard(riswin)
#
# lat_mid_list = []
# lon_mid_list = []
# dist_mid_list = []
#
# lat_list = []
# lon_list = []
# dist_list = []
#
# x, y, z = [], [], []
#
# if winsiz == 0:
# print('No events were found.')
#
# else:
#
# #
# # Display the visibility time periods.
# #
# print(
# 'Occultation times of {0:s} as seen from {1:s} when the distance is '
# 'less than {2:f} km:\n'.format('MEX', 'TGO', refval))
#
# for i in range(winsiz):
# #
# # Fetch the start and stop times of
# # the ith interval from the search result
# # window riswin.
# #
# [intbeg, intend] = spiceypy.wnfetd(riswin, i)
#
# #
# # Convert the rise time to a TDB calendar string.
# #
# timstr = spiceypy.timout(intbeg, TDBFMT)
# et_rise = intbeg
#
# #
# # Write the string to standard output.
# #
# # if i == 0:
# #
# # print('Occultation start time:'
# # ' {:s}'.format(timstr))
# # else:
# #
# # print('Occultation start time:'
# # ' {:s}'.format(timstr))
# #
# #
# # Convert the set time to a TDB calendar string.
# #
# timstr = spiceypy.timout(intend, TDBFMT)
# et_set = intend
#
# #
# # Write the string to standard output.
# #
# # if i == (winsiz - 1):
# #
# # print('Occultation or window stop time: '
# # ' {:s}'.format(timstr))
# # else:
# #
# # print('Occultation stop time: '
# # ' {:s}'.format(timstr))
# #
# # print(' ')
#
# #
# # Generate a Time Window with the rise and set times
# #
# utc_rise = spiceypy.et2utc(et_rise, 'ISOC', 3)
# utc_set = spiceypy.et2utc(et_set, 'ISOC', 3)
#
# time_window = spiops.TimeWindow(utc_rise, utc_set, resolution=1)
#
# interval = time_window.window
# num = 0
# for et in interval:
#
# num += 1
#
# (linept, lt) = spiceypy.spkpos('MARS', et, 'IAU_MARS', 'NONE',
# 'TGO')
# (linedr, lt) = spiceypy.spkpos('MEX', et, 'IAU_MARS', 'NONE',
# 'TGO')
#
# #
# # Variable I/O Description
# # -------- --- --------------------------------------------------
# # a I Length of ellipsoid's semi-axis in the x direction
# # b I Length of ellipsoid's semi-axis in the y direction
# # c I Length of ellipsoid's semi-axis in the z direction
# # linept I Point on line
# # linedr I Direction vector of line
# # pnear O Nearest point on ellipsoid to line
# # dist O Distance of ellipsoid from line
# #
# (pnear, dist) = spiceypy.npedln(a, b, c, linept, linedr)
#
# (lon, lat, alt) = spiceypy.recpgr('MARS', pnear, re, f)
#
# lon = spiceypy.dpr() * lon
# lat = spiceypy.dpr() * lat
#
# lon_list.append(lon)
# lat_list.append(lat)
# dist_list.append(spiceypy.vnorm(linedr))
#
# if num == int(len(interval) / 2):
# lon_mid_list.append(lon)
# lat_mid_list.append(lat)
# dist_mid_list.append(spiceypy.vnorm(linedr))
#
# spiops.plot(lon_mid_list, [lat_mid_list],
# xaxis_name='Longitude [deg]',
# yaxis_name=['Latitude [deg]'],
# title='TGO-MEX Occultation Groundtrack for MEX-TGO Distance < {}km'.format(
# refval),
# plot_height=500,
# plot_width=900,
# format='circle',
# background_image=True,
# line_width=6)
#
# spiops.plot(lon_list, [lat_list],
# xaxis_name='Longitude [deg]',
# yaxis_name=['Latitude [deg]'],
# title='TGO-MEX Occultation Groundtrack for MEX-TGO Distance < {}km'.format(
# refval),
# plot_height=500,
# plot_width=900,
# format='circle',
# background_image=True,
# line_width=1)
#
# return
def sensor_with_sectors(sensor, mk, fk=''):
#
# Load ROS FK and RPC IK files.
#
spiceypy.furnsh(mk)
if fk:
spiceypy.furnsh(fk)
#
# Get ELS IK data.
#
sensnm, sensid, secnum, secsiz, secsis, secfrm, secdir = read_ik_with_sectors(sensor)
#
# Report ELS IK data.
#
print('SENSOR NAIF NAME: {}'.format(sensnm))
print('SENSOR NAIF ID: {}'.format(sensid))
print('NUMBER OF SECTORS: {}'.format(secnum))
#if secsiz != 0:
# print('SECTOR SIZE: {}'.format(secsiz))
#else:
# print('SECTOR SIZES: {}'.format(secsis))
print('REFERENCE FRAME: {}'.format(secfrm))
print('SECTOR DIRECTIONS: {}'.format(secdir))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for element in secdir:
x = element[0]
y = element[1]
z = element[2]
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
ax.autoscale(tight=True)
plt.show()
return
def get_angle(frame1, frame2, et):
angle = 0
angle_bool = False
try:
# Get the rotation matrix between two frames
cmat = spiceypy.pxform(frame1, frame2, et)
(angle3, angle2, angle1) = spiceypy.m2eul(cmat, 3, 2, 1)
for tmp_angle in [angle3, angle2, angle1]:
if np.around(tmp_angle, 2) != 0:
angle = np.rad2deg(tmp_angle)
angle_bool = True
except ValueError as e:
print(e)
return angle, angle_bool
def get_earth_angle(frame, et, obs):
try:
(earth_vec, lt) = spiceypy.spkezr('EARTH', et, frame, 'LT+S', obs)
return np.rad2deg(spiceypy.vsep([0, 0, 1], earth_vec[:3]))
except:
return 0
def hga_angles(sc, et):
hga_el_frame = sc + '_HGA_EL'
hga_az_frame = sc + '_HGA_AZ'
hga_frame = sc + '_HGA'
if sc == 'MPO':
# First azimuth and then the elevation
hga_az, hga_az_bool = get_angle('MPO_HGA_APM', hga_az_frame, et)
if hga_az_bool:
hga_az = -hga_az + 180 # Invert azimuth and add half revolution
hga_el, hga_el_bool = get_angle(hga_az_frame, hga_el_frame, et)
elif sc == 'MTM':
return []
else:
hga_zero_frame = sc + '_SPACECRAFT'
# First elevation and then the azimuth
hga_el, hga_el_bool = get_angle(hga_zero_frame, hga_el_frame, et)
hga_az, hga_az_bool = get_angle(hga_el_frame, hga_az_frame, et)
hga_earth = get_earth_angle(hga_frame, et, sc)
return [hga_az, hga_el], hga_earth
def mga_angles(sc, et):
if sc == 'MPO':
# First azimuth and then the elevation
mga_az, mga_az_bool = get_angle('MPO_MGA_BOOM-H', 'MPO_MGA_BOOM', et)
mga_el, mga_el_bool = get_angle('MPO_MGA_ZERO', 'MPO_MGA', et)
mga_earth = get_earth_angle('MPO_MGA', et, 'MPO')
return [mga_az, mga_el], mga_earth
return [0, 0], 0
def solar_aspect_angles(sc, time):
sa_frame = ''
if sc == 'TGO':
sa_p_frame = sc+'_SA+Z'
sa_n_frame = sc+'_SA-Z'
elif sc == 'MPO':
sa_frame = sc+'_SA'
elif sc == 'MTM':
sa_p_frame = sc + '_SA+X'
sa_n_frame = sc + '_SA-X'
else:
sa_p_frame = sc+'_SA+Y'
sa_n_frame = sc+'_SA-Y'
sc_id = spiceypy.bodn2c(sc)
try:
# If there is only one Solar Array e.g.: BEPICOLOMBO MPO
if sa_frame:
(sun_vec, lt) = spiceypy.spkezp(10, time, sa_frame, 'NONE', sc_id)
saa_sa = np.rad2deg(spiceypy.vsep([1, 0, 0], sun_vec))
else:
(sun_vec, lt) = spiceypy.spkezp(10, time, sa_p_frame, 'NONE', sc_id)
saa_sa_p = np.rad2deg(spiceypy.vsep([1, 0, 0], sun_vec))
(sun_vec, lt) = spiceypy.spkezp(10, time, sa_n_frame, 'NONE', sc_id)
saa_sa_n = np.rad2deg(spiceypy.vsep([1, 0, 0], sun_vec))
(sun_vec, lt) = spiceypy.spkezp(10, time, sc+'_SPACECRAFT', 'NONE', sc_id)
saa_sc_x = np.rad2deg(spiceypy.vsep([1, 0, 0], sun_vec))
saa_sc_y = np.rad2deg(spiceypy.vsep([0, 1, 0], sun_vec))
saa_sc_z = np.rad2deg(spiceypy.vsep([0, 0, 1], sun_vec))
except:
#print('No CK information for {}'.format(time))
saa_sa, saa_sa_p, saa_sa_n, saa_sc_x, saa_sc_y, saa_sc_z = 0,0,0,0,0,0
if sa_frame:
return([saa_sa], [saa_sc_x, saa_sc_y, saa_sc_z])
else:
return ([saa_sa_p, saa_sa_n], [saa_sc_x, saa_sc_y, saa_sc_z])
def solar_array_angles(sa_frame, time):
# Rotation axis must be angle 3 to have a range of [-pi, pi], the
# rotation axis is derived from the FK.
if 'MPO' in sa_frame:
sa_zero_frame = 'MPO_SA_SADM'
elif 'MEX' in sa_frame:
sa_zero_frame = sa_frame + '_GIMBAL'
else:
sa_zero_frame = sa_frame + '_ZERO'
try:
#TODO This works for JUICE only in principle.
# Get the rotation matrix between two frames
cmat = spiceypy.pxform(sa_frame, sa_zero_frame, time)
(angle3, angle2, angle1) = spiceypy.m2eul(cmat, 2, 3, 1)
except:
# print('No CK information for {}'.format(time))
angle3 = 0
angle2 = 0
angle1 = 0
return(np.round(angle3*spiceypy.dpr(),3),
np.round(angle2*spiceypy.dpr(),3),
np.round(angle1*spiceypy.dpr(),3))
def structures_position(sc_frame, kernel, time):
return
def body_distance_to_plane(body_distance, body_plane, time):
body_1 = body_plane
body_2 = body_distance
if isinstance(time, str):
time = spiceypy.utc2et(time)
id_1 = spiceypy.bodn2c(body_1)
id_2 = spiceypy.bodn2c(body_2)
mat = spiceypy.pxform('MEX_SIDING_SPRING_PLANE','IAU_MARS', time)
vec1_1 = spiceypy.mxv(mat, [1,0,0])
vec2_1 = spiceypy.mxv(mat, [0,1,0])
state_1 = spiceypy.spkgeo(id_2, time, 'IAU_MARS', id_1)[0]
pos_1 = state_1[0:3]
vel_1 = state_1[2:5]
pos_2 = [0,0,0]
norm_1 = np.cross(vec1_1,vec2_1)
norm_1 = norm_1/np.linalg.norm(norm_1)
# https://mathinsight.org/distance_point_plane
a1, b1, c1 = norm_1[0], norm_1[1], norm_1[2]
d1 = -1*norm_1[0]*pos_1[0] - norm_1[1]*pos_1[1] - norm_1[2]*pos_1[2]
dist_1 = abs(a1 * pos_2[0] + b1 * pos_2[1] + c1 * pos_2[2] + d1) / np.sqrt(
np.square(a1) + np.square(b1) + np.square(c1))
dist_real = np.linalg.norm(pos_1)
return dist_1, dist_real
def angle_between_planes(body_1, body_2, time):
if isinstance(time, str):
time = spiceypy.utc2et(time)
mat = spiceypy.pxform('MEX_SIDING_SPRING_PLANE', 'HEE', time)
norm_1 = spiceypy.mxv(mat, [0,0,1])
norm_1 = norm_1 / np.linalg.norm(norm_1)
angle = 180 - spiceypy.dpr()*spiceypy.vsep(norm_1,[0,0,1])
return angle
def plane_ellipsoid(body_1, body_2, time):
id_1 = spiceypy.bodn2c(body_1)
id_2 = spiceypy.bodn2c(body_2)
mat = spiceypy.pxform('MEX_SIDING_SPRING_PLANE','IAU_MARS', time)
vec1 = spiceypy.mxv(mat, [1,0,0])
vec2 = spiceypy.mxv(mat, [0,1,0])
state1 = spiceypy.spkgeo(id_2, time, 'IAU_'+body_2, id_1)[0]
pos1 = state1[0:3]
plane = spiceypy.psv2pl(pos1, vec1, vec2)
# Get the body semi-axis lenght
(num, semi_axis) = spiceypy.bodvcd(id_2, "RADII", 3)
a = semi_axis[0]
b = semi_axis[1]
c = semi_axis[2]
try:
ellipse = spiceypy.inedpl(a, b, c, plane)
except:
ellipse = 0
return ellipse
def beta_angle(observer, target, time):
# Provided by <NAME>
if not isinstance(time, float):
et = spiceypy.utc2et(time)
else:
et = time
#
# compute the Sun position relative to Mars; vector from Mars to Sun
#
vec_tar_sun, lt = spiceypy.spkpos( 'SUN', et, 'J2000', 'None', target)
#
# beta angle
#
sta_tar_obs, lt = spiceypy.spkezr(observer, et, 'J2000', 'None', target)
#
# orbital plane is defined by the cross-product of position and velocity
# vector
#
vec_orbit = spiceypy.vcrss(sta_tar_obs[:3], sta_tar_obs[3:])
#
# the beta angle can be computed from the orbital plane and Sun vectors
#
beta = abs(90.-spiceypy.vsep(vec_orbit, vec_tar_sun)*spiceypy.dpr())
return beta
def ck_coverage_timeline(metakernel, frame_list, notebook=True, html_file_name='test',
plot_width=975, plot_height=700):
if notebook:
output_notebook()
else:
output_file(html_file_name + '.html')
cov_start = []
cov_finsh = []
kernels = []
with open(metakernel, 'r') as f:
for line in f:
if '/ck/' in line and 'prelaunch' not in line:
kernels.append(line.split('/ck/')[-1].strip().split("'")[0])
if 'PATH_VALUES' in line and '=' in line:
path = line.split("'")[1] + '/ck/'
kernels = list(reversed(kernels))
ck_kernels = []
colors = []
for kernel in kernels:
for frame in frame_list:
cov = cov_ck_ker(path + kernel, frame, support_ker=metakernel, time_format='TDB')
if cov:
color = "lawngreen"
if 'MPO' in frame or 'MMO' in frame or 'MTM' in frame or 'TGO' in frame:
type = kernel.split('_')[3]
if type[2] == 'p': color = 'orange'
elif type[2] == 'r': color = 'green'
elif type[2] == 't': color = 'red'
elif type[2] == 'c': color = 'purple'
elif type[2] == 'm': color = 'blue'
cov_start.append(cov[0])
cov_finsh.append(cov[-1])
ck_kernels.append(kernel)
colors.append(color)
spiceypy.furnsh(metakernel)
date_format = 'UTC'
start_dt =[]
finsh_dt =[]
for element in cov_start:
start_dt.append(et_to_datetime(element, date_format))
for element in cov_finsh:
finsh_dt.append(et_to_datetime(element, date_format))
source = ColumnDataSource(data=dict(start_dt=start_dt,
finsh_dt=finsh_dt,
ck_kernels=ck_kernels))
title = "CK Kernels Coverage"
if 'ops' in metakernel.lower():
title += ' - OPS Metakernel'
elif 'plan' in metakernel.lower():
title += ' - PLAN Metakernel'
p = figure(y_range=ck_kernels, plot_height=plot_height, plot_width=plot_width, title=title, )
p.hbar(y=ck_kernels, height=0.2, left=start_dt, right=finsh_dt, color=colors)
labels = LabelSet(x='start_dt', y='ck_kernels', text='ck_kernels', level='glyph',
x_offset=-2, y_offset=5, source=source, render_mode='canvas')
p.xaxis.formatter = DatetimeTickFormatter(seconds=["%Y-%m-%d %H:%M:%S"],
minsec=["%Y-%m-%d %H:%M:%S"],
minutes=["%Y-%m-%d %H:%M:%S"],
hourmin=["%Y-%m-%d %H:%M:%S"],
hours=["%Y-%m-%d %H:%M:%S"],
days=["%Y-%m-%d %H:%M:%S"],
months=["%Y-%m-%d %H:%M:%S"],
years=["%Y-%m-%d %H:%M:%S"])
p.xaxis.major_label_orientation = 0#pi/4
p.yaxis.visible = False
p.xaxis.axis_label_text_font_size = "5pt"
p.add_layout(labels)
show(p)
def spk_coverage_timeline(metakernel, sc_list, notebook=True, html_file_name='test',
plot_width=975, plot_height=500):
if notebook:
output_notebook()
else:
output_file(html_file_name + '.html')
cov_start = []
cov_finsh = []
kernels = []
with open(metakernel, 'r') as f:
for line in f:
if '/spk/' in line and 'prelaunch' not in line:
kernels.append(line.split('/spk/')[-1].strip().split("'")[0])
if 'PATH_VALUES' in line and '=' in line:
path = line.split("'")[1] + '/spk/'
kernels = list(reversed(kernels))
spk_kernels = []
colors = []
for kernel in kernels:
for sc in sc_list:
cov = cov_spk_ker(path+kernel, sc.upper(), support_ker=metakernel,
time_format='TDB')
if cov:
color = "lawngreen"
if 'MPO' in sc or 'MMO' in sc or 'MTM' in sc:
type = kernel.split('_')[2]
if type[2] == 'p':
color = 'orange'
elif type[2] == 'r':
color = 'green'
elif type[2] == 't':
color = 'red'
elif type[2] == 'c':
color = 'purple'
elif type[2] == 'm':
color = 'blue'
cov_start.append(cov[0][0])
cov_finsh.append(cov[0][-1])
spk_kernels.append(kernel)
colors.append(color)
spiceypy.furnsh(metakernel)
date_format = 'UTC'
start_dt =[]
finsh_dt =[]
for element in cov_start:
start_dt.append(et_to_datetime(element, date_format))
for element in cov_finsh:
finsh_dt.append(et_to_datetime(element, date_format))
source = ColumnDataSource(data=dict(start_dt=start_dt,
finsh_dt=finsh_dt,
spk_kernels=spk_kernels))
title = "SPK Kernels Coverage"
if 'ops' in metakernel.lower():
title += ' - OPS Metakernel'
elif 'plan' in metakernel.lower():
title += ' - PLAN Metakernel'
p = figure(y_range=spk_kernels, plot_height=plot_height, plot_width=plot_width, title=title, )
p.hbar(y=spk_kernels, height=0.2, left=start_dt, right=finsh_dt, color=colors)
labels = LabelSet(x='start_dt', y='spk_kernels', text='spk_kernels', level='glyph',
x_offset=-2, y_offset=5, source=source, render_mode='canvas')
p.xaxis.formatter = DatetimeTickFormatter(seconds=["%Y-%m-%d %H:%M:%S"],
minsec=["%Y-%m-%d %H:%M:%S"],
minutes=["%Y-%m-%d %H:%M:%S"],
hourmin=["%Y-%m-%d %H:%M:%S"],
hours=["%Y-%m-%d %H:%M:%S"],
days=["%Y-%m-%d %H:%M:%S"],
months=["%Y-%m-%d %H:%M:%S"],
years=["%Y-%m-%d %H:%M:%S"])
p.xaxis.major_label_orientation = 0#pi/4
p.yaxis.visible = False
p.xaxis.axis_label_text_font_size = "5pt"
p.add_layout(labels)
show(p)
spiceypy.unload(metakernel)
#
# The camera has no distortion; the image of a point
# is determined by the intersection of the focal plane
# and the line determined by the point and the camera's
# focal point.
#
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/inrypl_c.html
#
def target_center_pixel(et, camera, target, target_frame):
camera_id = spiceypy.bodn2c(camera)
focal_lenght = spiceypy.gdpool('INS{}_FOCAL_LENGTH'.format(camera_id), 0, 80)[0]
focal_lenght /= 1000000 # in milimiters (original routine for OSIRIS was 717.322)
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id, 100)
visible = spiceypy.fovtrg(camera, target, 'POINT', target_frame, 'LT+S', camera, et)
if visible:
(ptarg, lt) = spiceypy.spkpos(camera, et, frame, 'LT+S', target)
(ptarg, norm) = spiceypy.unorm(ptarg)
focus = spiceypy.vscl(focal_lenght, [0,0,1])
#
# The camera's focal plane contains the origin in
# camera coordinates, and the z-vector is orthogonal
# to the plane. Make a CSPICE plane representing
# the focal plane.
#
focal_plane = spiceypy.nvc2pl([0,0,1], 0.)
#
# The image of the target body's center in the focal
# plane is defined by the intersection with the focal
# plane of the ray whose vertex is the focal point and
# whose direction is dir.
#
(nxpts, image_focal) = spiceypy.inrypl(focus, ptarg, focal_plane)
if nxpts != 1:
print('Something went wrong')
return 0
else:
print('{}: {} Center is not in the image'.format(spiceypy.et2utc(et, 'ISOC', 3, 20),target))
return
pixel_size = spiceypy.gdpool('INS{}_PIXEL_SIZE'.format(camera_id), 0, 80)[0]
ccd_center = spiceypy.gdpool('INS{}_CCD_CENTER'.format(camera_id), 0, 80)
image = image_focal * (1000000000/pixel_size) # Pixel Size in Microns
return (image_focal, (ccd_center[1]+image[1], ccd_center[0]+image[0]))
def pixel_center_distance(et, camera, pixel_x, pixel_y):
pix_x = 1024 - pixel_y
pix_y = 1024 - pixel_x
camera_name = camera
camera_id = spiceypy.bodn2c(camera_name)
focal_lenght = 717.322/1000000
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id, 100)
(image_focal, target_pixel) = target_center_pixel(et, camera, report=False)
center_x = image_focal[0]
center_y = image_focal[1]
tar_cent_vec = [center_x, center_y, bsight[2]*focal_lenght]
pix_vec = [pix_x*13.5/1000000000, pix_y*13.5/1000000000, bsight[2]*focal_lenght]
(ptarg, lt) = spiceypy.spkpos(camera, et, frame, 'LT+S', '67P/C-G')
(ptarg, norm) = spiceypy.unorm(ptarg)
pixel_vector = [13.5/1000000000, 0, bsight[2]*focal_lenght]
pix_cent_dist = np.tan(spiceypy.vsep(pix_vec,tar_cent_vec))*norm
pixel_size = np.tan(spiceypy.vsep([0,0,bsight[2]*focal_lenght],pixel_vector))*norm
#print('Comet offset', spiceypy.dpr()*spiceypy.vsep(bsight*focal_lenght,tar_cent_vec))
#print('Pixe-Comet offset', spiceypy.dpr()*spiceypy.vsep(pix_vec,tar_cent_vec))
return pix_cent_dist, target_pixel, pixel_size
def simulate_image(utc, camera, mission_targets, camera_spk=False,
pixel_lines=False, pixel_samples=False, dsks=False,
generate_image=False, report=False, name=False,
illumination=True, metakernel='', unload_kernels=True, log=False):
'''
:param utc: Image acquisition time in UTC format e.g.: 2016-01-01T00:00:00
:type utc: str
:param metakernel: SPICE Kernel Dataset Meta-Kernel
:type metakernel: str
:param camera: Name of the camera to be used. Usually found in the
instrument kernel (IK) e.g.: 'ROS_NAVCAM-A'
:type camera: str
:param mission_targets: Targets of the observation, e.g.:'67P/C-G'
:type mission_targets: list
:param pixel_lines: Number of pixel lines usually provided by the IK.
:type pixel_lines: int
:param pixel_samples: Number of pixel samples per line usually provided by
the IK.
:type pixel_samples: int
:param dsk: Digital Shape Model to be used for the computation. Not required
of included in the Meta-Kernel.
:type dsk: str
:param generate_image: Flag to determine whether if the image is saved or
plotted.
:type generate_image: bool
:param plot_image: Flag to determine whether if the image is to be plotted or
plotted.
:type generate_image: bool
:param report: Flag for processing report.
:type generate_image: bool
:param name: Name to be provided to the image
:type generate_image: str
:return: Name of the output image
:rtype: str
'''
if metakernel:
spiceypy.furnsh(metakernel)
if dsks:
for dsk in dsks:
spiceypy.furnsh(dsk)
et = spiceypy.utc2et(utc)
#
# We retrieve the camera information using GETFOV. More info available:
#
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getfov_c.html
#
camera_name = camera
camera_id = spiceypy.bodn2c(camera_name)
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id, 100)
#
# TODO: In the future all the sensors should be epehmeris objects, see
# https://issues.cosmos.esa.int/socci/browse/SPICEMNGT-77
#
if not camera_spk:
if camera.split('_')[0] == 'ROS':
observer = 'ROSETTA'
elif camera.split('_')[0] == 'MEX':
observer = 'MEX'
elif camera.split('_')[0] == 'VEX':
observer = 'VEX'
elif camera.split('_')[0] == 'JUICE':
observer = 'JUICE'
elif camera.split('_')[0] == 'HERA':
observer = 'HERA'
elif camera.split('_')[0] == 'MTM':
observer = 'MTM'
else:
if isinstance(camera_spk, str):
observer = camera_spk
else:
observer = camera
#
# We check if the resolution of the camera has been provided as an input
# if not we try to obtain the resolution of the camera from the IK
#
if not pixel_lines or not pixel_samples:
try:
pixel_samples = int(spiceypy.gdpool('INS'+str(camera_id) + '_PIXEL_SAMPLES',0,1))
pixel_lines = int(spiceypy.gdpool('INS' + str(camera_id) + '_PIXEL_LINES',0,1))
except:
pass
print("PIXEL_SAMPLES and/or PIXEL_LINES not defined for "
"{}".format(camera))
return
#
# We generate a matrix using the resolution of the framing camera as the
# dimensions of the matrix
#
nx, ny = (pixel_lines, pixel_samples)
x = np.linspace(bounds[0][0], bounds[2][0], nx)
y = np.linspace(bounds[0][1], bounds[2][1], ny)
#
# We define the matrices that will be used as outputs and the
#
phase_matrix = np.zeros((nx, ny))
emissn_matrix = np.zeros((nx, ny))
solar_matrix = np.zeros((nx, ny))
target_matrix = np.zeros((nx, ny))
#
# Now we look for additional targets.
#
targets_frames = []
methods = []
targets = []
for target in mission_targets:
try:
target_frame = target2frame(target)
if dsks:
for dsk in dsks:
ids = spiceypy.dskobj(dsk)
if spiceypy.bodn2c(target) in ids:
method = 'DSK/UNPRIORITIZED'
break
else:
method = 'ELLIPSOID'
else:
method = 'ELLIPSOID'
visible = spiceypy.fovtrg(camera, target, 'POINT', target_frame, 'NONE',
observer, et)
if visible:
print('{} center is visible'.format(target))
targets.append(target)
targets_frames.append(target_frame)
methods.append(method)
except Exception as e:
print(e)
pass
r = []
for target in targets:
r.append(np.linalg.norm(
spiceypy.spkpos(target, et, 'J2000', 'NONE', observer)[
0]))
#
# If we have targets we order the list in target proximity order
#
if len(r) > 1:
targets = np.asarray(targets)
targets = targets[np.argsort(r)]
targets_frames = np.asarray(targets_frames)
targets_frames = targets_frames[np.argsort(r)]
methods = np.asarray(methods)
methods = methods[np.argsort(r)]
#
# For each pixel we compute the possible intersection with the target, if
# the target is intersected we then compute the illumination angles. We
# use the following SPICE APIs: SINCPT and ILLUMF
#
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sincpt_c.html
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/illumf_c.html
#
isvisible, isiluminated = [], []
for i in range(0, len(x), 1):
for j in range(0, len(y), 1):
#
# List of pixel's boresight
#
ibsight = [x[i], y[j], bsight[2]]
#
# We do another loop in order to determine if we have other
# 'targets' in addition to the 'main' target
#
for k in range(0, len(targets), 1):
try:
(spoint, trgepc, srfvec ) = spiceypy.sincpt(methods[k], targets[k], et,
targets_frames[k], 'NONE', observer, frame, ibsight)
target_matrix[i, j] = 255
(trgenpc, srfvec, phase, solar,
emissn, visiblef, iluminatedf) = spiceypy.illumf(methods[k], targets[k], 'SUN', et,
targets_frames[k], 'LT+S', observer, spoint)
emissn_matrix[i, j] = emissn
phase_matrix[i, j] = phase
#
# Add to list if the point is visible to the camera
#
if visiblef == True:
isvisible.append(visiblef)
#
# Add to list if the point is illuminated and seen by the camera
#
if iluminatedf == True:
isiluminated.append(iluminatedf)
solar_matrix[i, j] = solar
else:
#
# And we set the not illuminated pixels with np.pi/2
#
solar_matrix[i, j] = np.pi/2
break
except:
pass
#
# If SINCPT raises an error, we set that we see nothing in
# the pixel.
#
emissn_matrix[i,j] = 0
phase_matrix[i,j] = np.pi
solar_matrix[i,j] = np.pi/2
#
# We transform the matrix from illumination angles to greyscale [0-255]
#
if solar_matrix.max() == solar_matrix.min():
rescaled = solar_matrix
else:
rescaled = (255 / (solar_matrix.max()-solar_matrix.min()) * (solar_matrix - solar_matrix.min())).astype(np.uint8)
rescaled = - np.flip(rescaled, 0) + 255
#
# We generate the plot
#
if generate_image:
if not name:
name_illum = '{}_{}.PNG'.format(camera.upper(),
utc.upper())
name_tar = '{}_{}_TAR.PNG'.format(camera.upper(),
utc.upper())
else:
name_illum = '{}_{}_{}.PNG'.format(name.upper(),
camera.upper(),
utc.upper())
name_tar = '{}_{}_{}_TAR.PNG'.format(name.upper(),
camera.upper(),
utc.upper())
if np.count_nonzero(target_matrix) >= 1.0:
if illumination:
imageio.imwrite(name_illum, rescaled)
else:
imageio.imwrite(name_tar, target_matrix)
if not generate_image:
plt.imshow(rescaled, cmap='gray')
plt.axis('off')
plt.show()
if report:
print('{} {} {} {} {}'.format(utc, pixel_samples*pixel_lines, np.count_nonzero(target_matrix), len(isvisible), camera))
#if report:
# print('Pixel report for {} w.r.t {} @ {}'.format(camera,target,utc))
# print(' Total number of pixels: ', pixel_samples*pixel_lines)
# print(' Illuminated pixels: ', len(isvisible))
# print(' Hidden pixels: ', pixel_samples*pixel_lines - len(isvisible))
# print(' Shadowed points: ', pixel_samples*pixel_lines - len(isiluminated))
if log:
with open(log, "a") as f:
f.write('{} {} {} {} {}\n'.format(utc, pixel_samples*pixel_lines, np.count_nonzero(target_matrix), len(isvisible), camera))
if unload_kernels:
spiceypy.kclear()
return name
def sc_dsk_view(utc,mk, dsks, observer, sc_targets, sc_frames=False,
pixels=150, name=False, generate_image=True, illumination=True,
show3Dplot=False, unload_kernels=False):
if not sc_frames:
sc_frames = sc_targets
mpl.rcParams['figure.figsize'] = (26.0, 26.0)
spiceypy.furnsh(mk)
for dsk in dsks:
spiceypy.furnsh(dsk)
utcstr = utc.replace(':','')
et = spiceypy.utc2et(utc)
nx, ny = (pixels, pixels) # resolution of the image
x = np.linspace(-5, 5, nx)
y = np.linspace(-5, 5, ny)
xv, yv = np.meshgrid(x, y)
solar_matrix = np.zeros((nx, ny))
target_matrix = np.zeros((nx, ny))
isvisible, isiluminated = [], []
r, lt = spiceypy.spkpos(observer, et, 'J2000', 'NONE', sc_targets[0])
#
# We define a 'Nadir frame' w.r.t. J000 to make it general regardless of
#
#
zN = r
zN = zN/np.linalg.norm(zN)
xN = np.array([1,0,0]) - np.dot(np.dot([1,0,0], zN), zN)/np.linalg.norm(zN)**2
xN = xN/np.linalg.norm(xN)
yN = np.cross(zN, xN)
yN = yN/np.linalg.norm(yN)
RotM = np.linalg.inv(np.array([xN, yN, zN]))
spoints = []
flag = False
f = 0.5 # Factor for the FOV
for i, x in enumerate(xv):
for j, y in enumerate(yv):
dpxy = [x[i], y[i], -np.linalg.norm(r)*1000*f]
ibsight = spiceypy.mxv(RotM, dpxy)
#
# We do another loop in order to determine if we have other
# 'targets' in addition to the 'main' target
#
spoint_per_target = []
distance_per_target = []
for k in range(0, len(sc_targets), 1):
try:
(spoint, trgepc, srfvec) = spiceypy.sincpt('DSK/UNPRIORITIZED', sc_targets[k], et, sc_frames[k], 'NONE', observer, 'J2000', ibsight)
spoint_per_target.append(spoint)
distance_per_target.append(spiceypy.vnorm(srfvec))
except:
spoint_per_target.append([])
distance_per_target.append([])
pass
for spoint in spoint_per_target:
if not spoint.__class__ == list:
flag = True
if flag:
#
# we get the minimum distance and the range.
#
distance_per_target_floats = []
for element in distance_per_target:
if not isinstance(element, list):
distance_per_target_floats.append(element)
min_dist = min(distance_per_target_floats)
for k in range(0, len(sc_targets), 1):
if min_dist == distance_per_target[k]:
target = sc_targets[k]
tar_frame = sc_frames[k]
spoints.append(spoint_per_target[k])
spoint = spoint_per_target[k]
try:
(trgenpc, srfvec, phase, solar,
emissn, visiblef, iluminatedf) = spiceypy.illumf('DSK/UNPRIORITIZED', target, 'SUN', et, tar_frame, 'NONE', observer, spoint)
if visiblef == True:
isvisible.append(visiblef)
target_matrix[i, j] = 255
if iluminatedf == True:
isiluminated.append(iluminatedf)
if solar > np.pi / 2:
solar_matrix[i, j] = np.pi - solar
else:
solar_matrix[i, j] = solar
else:
solar_matrix[i, j] = np.pi / 2 # not illuminated
except:
solar_matrix[i, j] = np.pi / 2
else:
solar_matrix[i, j] = np.pi / 2
flag = False
if show3Dplot:
spoints = np.asarray(spoints)
fig = plt.figure(figsize=(9, 9))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(spoints[:, 0], spoints[:, 1], spoints[:, 2], marker='.')
plt.xlabel("x position")
plt.ylabel("y position")
plt.title('')
plt.axis('equal')
plt.show()
print('total number of points: ', pixels*pixels)
print('occulted points: ', pixels*pixels - len(isvisible))
print('not iluminated points: ', pixels*pixels - len(isiluminated))
#
# We transform the matrix from illumination angles to greyscale [0-255]
#
if illumination:
rescaled = (255 / (solar_matrix.max()-solar_matrix.min()) * (solar_matrix - solar_matrix.min())).astype(np.uint8)
rescaled = - np.flip(rescaled, 0) + 255
else:
rescaled = target_matrix
#
# We generate the plot
#
if generate_image:
if not name:
name = '{}_{}.PNG'.format(sc_targets[0].upper(),
utcstr.upper())
else:
name = '{}_{}_{}.PNG'.format(name.upper(),
sc_targets[0].upper(),
utcstr.upper())
imageio.imwrite(name, rescaled)
else:
plt.imshow(rescaled, cmap='gray')
plt.axis('off')
plt.show()
if unload_kernels:
spiceypy.kclear()
return
def getXYforPlanet(time_et, planet, camera, observer=''):
"""
compute for all time instances in this class the xy position of a planet
within a camera_name field-of-view. If not visible, return (-1,-1).
If planet is visible, also return the size of the planet in pixels.
Routine is tested for planet_name=Earth
"""
#
camera_id = spiceypy.bodn2c(camera)
#
# In case we do not have an SPK for the camera
#
if not observer:
observer = camera
r_planet = (spiceypy.bodvrd(planet, 'RADII', 3))[1][0]
#
# get instrument related info
#
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id,
100)
mat = spiceypy.pxform(frame, 'J2000', time_et)
for bs in range(0, 4):
bounds[bs, :] = spiceypy.mxv(mat, bounds[bs, :])
[pos, ltime] = spiceypy.spkpos(planet, time_et, 'J2000',
'LT+S', observer)
visible = spiceypy.fovray(camera, pos, 'J2000', 'S', 'MPO',
time_et)
#
# only get detector position, if target is visible
#
x = 0.0
y = 0.0
s = 0.0
if visible:
hit = []
for p in range(0, 4):
#
# compute the plane that is build up by the coordinate origin and two FOV corner vectors
#
plane = spiceypy.psv2pl([0, 0, 0], bounds[p, :],
bounds[(p + 1) % 4, :])
#
# compute the projection of the target vector onto that plane
#
vout = (spiceypy.unorm(spiceypy.vprjp(pos, plane)))[0]
#
# calculate the angle between this vector and the original corner vectors
#
alpha = spiceypy.vsep(bounds[p, :], vout)
beta = spiceypy.vsep(bounds[(p + 1) % 4, :], vout)
#
# the ratio of these angles also give the ratio of the detector on the edge
# of the field of view, in a first approximation, average of the two opposite
# FOV corner values: these are the x,y coordinates on the detector
hit.append(1024 * alpha / (alpha + beta))
# get intersection of the points
(x, y) = findIntersection(hit[0], 0, hit[1], 1023, 0,
hit[1], 1023, hit[2])
size = 2 * r_planet * 500 / (
np.tan(35. * 2 * np.pi / 360.) * spiceypy.vnorm(pos))
else:
print('Planet {} not visible by {} at {}'.format(planet, camera, spiceypy.et2utc(time_et,'ISOC',1,25)))
return (False, False, False, False)
return (time, x, y, size)
def pixel_radec(time, camera, pixel, units='radians'):
"""
Obtain the Right Ascension and Declination in J2000 of a pixel of a given
sensor at a given time.
@param time: Input time in UTC
@type time: str
@param camera: SPICE name for the camera sensor (requires IK kernel)
@type camera: str
@param pixel: Pixel location in the camera sensor CCD. Provided as two
values [x y]
@type pixel: list
@param units: Angular units for Right Ascension and Declination: radians or
degrees
@type units: str
@return: Right Ascension and Declination in the indicated units
@rtype: tuple
"""
et = spiceypy.utc2et(time)
#
# We retrieve the camera information using GETFOV. More info available:
#
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getfov_c.html
#
camera_name = camera
camera_id = spiceypy.bodn2c(camera_name)
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id, 100)
pixel_samples = \
int(spiceypy.gdpool(''.join(('INS', str(camera_id), '_PIXEL_SAMPLES')),
0, 1))
pixel_lines = \
int(spiceypy.gdpool(''.join(('INS', str(camera_id), '_PIXEL_LINES')), 0,
1))
#
# We generate a matrix using the resolution of the framing camera as the
# dimensions of the matrix
#
nx, ny = (pixel_samples, pixel_lines)
x = np.linspace(bounds[0][0], bounds[2][0], nx)
y = np.linspace(bounds[0][1], bounds[2][1], ny)
(i, j) = pixel
#
# List of pixel's boresight
#
ibsight = [x[i], y[j], bsight[2]]
mat = spiceypy.pxform(frame, 'J2000', et)
ibsight = spiceypy.mxv(mat, ibsight)
(r, ra, dec) = spiceypy.recrad(ibsight)
if units == 'degrees':
ra = np.degrees(ra)
dec = np.degrees(dec)
return ra, dec
def radec_in_fov(time, ra, dec, camera, observer=False, units='degrees'):
"""
Determine whether if a given Right Ascension and Declination coordinate in
J2000 (ultimately a given Star) is present at a given sensor Field-of-View
at a given time.
@param time: Input time in UTC
@type time: str
@param ra: Right Ascension in the indicated units (w.r.t J2000)
@type ra: float
@param dec: Declination in the indicated units (w.r.t J2000)
@type dec: float
@param camera: SPICE name for the camera sensor (requires IK kernel)
@type camera: str
@param observer: SPICE name for the camera sensor position
@type observer: str
@param units: Angular units for Right Ascension and Declination: radians or
degrees
@type units: str
@return: True/False if the RA, DEC is in the Field-of-View
@rtype: bool
"""
#
# If an observer is not provided then it is assumed that the observer is the
# camera itself. Please note that this then requires the strctures SPK to
# be present in the meta-kernel (or the loaded kernels)
#
if not observer:
observer = camera
if units == 'degrees':
ra = np.radians(ra)
dec = np.radians(dec)
et = spiceypy.utc2et(time)
#
# Create a unit direction vector pointing from the given S/C
# to the specified star. For details on corrections such
# as parallax, please see the example in GFRFOV.
#
raydir = spiceypy.radrec(1.0, ra, dec)
#
# Is the star in the field-of-view of the given sensor?
#
visible = spiceypy.fovray(camera, raydir, 'J2000', 'S', observer, et)
if visible:
return True
else:
return False
def gf_radec_in_fov(start_time, finish_time, ra, dec, camera, step=60,
units='degrees', observer=False):
"""
This functions provides the time windows for which a given Right Ascension
and Declination in J2000 (ultimately a given Star) is present in the given
camera FOV for a given start and finish UTC times.
@param start_time: Search time window start in UTC
@type start_time: str
@param finish_time: Search time window finish in UTC
@type finish_time: str
@param ra: Right Ascension in the indicated units (w.r.t J2000)
@type ra: float
@param dec: Declination in the indicated units (w.r.t J2000)
@type dec: float
@param camera: SPICE name for the camera sensor (requires IK kernel)
@type camera: str
@param step: Step with which the search will be performed in seconds
@type step: float
@param units: Angular units for Right Ascension and Declination: radians or
degrees
@type units: str
@param observer: SPICE name for the camera sensor position
@type observer: str
@return: List of Time Windows
@rtype: list
"""
#
# If an observer is not provided then it is assumed that the observer is the
# camera itself. Please note that this then requires the strctures SPK to
# be present in the meta-kernel (or the loaded kernels)
#
if not observer:
observer = camera
if units == 'degrees':
ra = np.radians(ra)
dec = np.radians(dec)
et_start = spiceypy.utc2et(start_time)
et_finish = spiceypy.utc2et(finish_time)
#
# Create a unit direction vector pointing from the given S/C
# to the specified star. For details on corrections such
# as parallax, please see the example in GFRFOV.
#
raydir = spiceypy.radrec(1.0, ra, dec)
MAXIVL = 10000
MAXWIN = 2 * MAXIVL
TDBFMT = 'YYYY MON DD HR:MN:SC.### (TDB) ::TDB'
# Initialize the "confinement" window with the interval
# over which we'll conduct the search.
cnfine = stypes.SPICEDOUBLE_CELL(2)
spiceypy.wninsd(et_start, et_finish, cnfine)
#
# In the call below, the maximum number of window
# intervals gfposc can store internally is set to MAXIVL.
# We set the cell size to MAXWIN to achieve this.
#
reswin = stypes.SPICEDOUBLE_CELL(MAXWIN)
#
# Now search for the time period, within our confinement
# window, during which the RA, DEC ray is in the camera FOV.
#
# VARIABLE I/O DESCRIPTION
# -------- --- --------------------------------------------------
# INST I Name of the instrument.
# RAYDIR I Ray's direction vector.
# RFRAME I Reference frame of ray's direction vector.
# ABCORR I Aberration correction flag.
# OBSRVR I Name of the observing body.
# STEP I Step size in seconds for finding FOV events.
# CNFINE I SPICE window to which the search is restricted.
# RESULT O SPICE window containing results.
spiceypy.gfrfov(camera, raydir, 'J2000', 'S', camera, step, cnfine, reswin)
#
# The function wncard returns the number of intervals
# in a SPICE window.
#
winsiz = spiceypy.wncard(reswin)
if winsiz == 0:
print('No events were found.')
else:
#
# Display the event time periods.
#
print('Time Windows for RA={0:f}, DEC={1:f} [DEG] in '
'{2:s} FOV:'.format(np.degrees(ra), np.degrees(dec), camera))
#
# Store the values in a list
#
intervals = []
for i in range(winsiz):
#
# Fetch the start and stop times of the ith interval from the search
# result window reswin.
#
[intbeg, intend] = spiceypy.wnfetd(reswin, i)
intervals.append([intbeg, intend])
#
# Convert the start and finish times to a TDB calendar string.
#
print(spiceypy.timout(intbeg, TDBFMT), ',',
spiceypy.timout(intend, TDBFMT))
return intervals
def radec2pixel(time, ra, dec, camera, observer=False, units='degrees'):
"""
This function determines the pixel location for a given camera of a
Right Ascension and Declination coordinate in J2000 (ultimately a star
position) at a given time.
@param time: Input time in UTC
@type time: str
@param ra: Right Ascension in the indicated units (w.r.t J2000)
@type ra: float
@param dec: Declination in the indicated units (w.r.t J2000)
@type dec: float
@param camera: SPICE name for the camera sensor (requires IK kernel)
@type camera: str
@param observer: SPICE name for the camera sensor position
@type observer: str
@param units: Angular units for Right Ascension and Declination: radians or
degrees
@type units: str
@return: Pixel location of a given Right Ascension and Declination
(if present in the FOV)
@rtype: tuple
"""
#
# We first check whether if the RA, DEC is in the FOV
#
if radec_in_fov(time, ra, dec, camera, observer=observer, units='degrees'):
(ra_matrix, dec_matrix) = camera_radec(time, camera, units=units)
#
# Now we look for the pixel location of the RA, DEC
#
(ra_idx, ra_value) = findNearest(ra_matrix, ra)
(dec_idx, dec_value) = findNearest(dec_matrix, dec)
#
# If the indexes are not the same, indicate and provide both
#
if ra_idx != dec_idx:
print('RA and DEC indexes are not the same: {}, {}'.format(ra_idx,
dec_idx))
return ra_idx
else:
return 'RA, DEC are not in the FOV'
def camera_radec(time, camera, units='radians', plot=False):
"""
This function provides two mesh grids with Right Ascensions and Declinations
for a given camera at a given time.
@param time: Input time in UTC
@type time: str
@param camera: SPICE name for the camera sensor (requires IK kernel)
@type camera: str
@param units: Angular units for Right Ascension and Declination: radians or
degrees
@type units: str
@param plot: Indicate whether if the mesh grids should be ploted or not
@type plot: bool
@return:
@rtype:
"""
et = spiceypy.utc2et(time)
#
# We retrieve the camera information using GETFOV. More info available:
#
# https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getfov_c.html
#
camera_name = camera
camera_id = spiceypy.bodn2c(camera_name)
(shape, frame, bsight, vectors, bounds) = spiceypy.getfov(camera_id, 100)
pixel_samples = \
int(spiceypy.gdpool(''.join(('INS', str(camera_id), '_PIXEL_SAMPLES')),
0, 1))
pixel_lines = \
int(spiceypy.gdpool(''.join(('INS', str(camera_id), '_PIXEL_LINES')), 0,
1))
#
# We generate a matrix using the resolution of the framing camera as the
# dimensions of the matrix
#
nx, ny = (pixel_samples, pixel_lines)
x = np.linspace(bounds[0][0], bounds[2][0], nx)
y =
|
np.linspace(bounds[0][1], bounds[2][1], ny)
|
numpy.linspace
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 13:16:54 2020
@author: pfm
"""
import numpy as np
def Dlp(A, B, p=2):
cost = np.sum(np.power(np.abs(A - B), p))
return np.power(cost, 1 / p)
def twed(A, timeSA, B, timeSB, nu, _lambda):
# [distance, DP] = TWED( A, timeSA, B, timeSB, lambda, nu )
# Compute Time Warp Edit Distance (TWED) for given time series A and B
#
# A := Time series A (e.g. [ 10 2 30 4])
# timeSA := Time stamp of time series A (e.g. 1:4)
# B := Time series B
# timeSB := Time stamp of time series B
# lambda := Penalty for deletion operation
# nu := Elasticity parameter - nu >=0 needed for distance measure
# Reference :
# <NAME>.; F. (2009). "Time Warp Edit Distance with Stiffness Adjustment for Time Series Matching".
# IEEE Transactions on Pattern Analysis and Machine Intelligence. 31 (2): 306–318. arXiv:cs/0703033
# http://people.irisa.fr/Pierre-Francois.Marteau/
# Check if input arguments
if len(A) != len(timeSA):
print("The length of A is not equal length of timeSA")
return None, None
if len(B) != len(timeSB):
print("The length of B is not equal length of timeSB")
return None, None
if nu < 0:
print("nu is negative")
return None, None
# Add padding
A = np.array([0] + list(A))
timeSA = np.array([0] + list(timeSA))
B = np.array([0] + list(B))
timeSB = np.array([0] + list(timeSB))
n = len(A)
m = len(B)
# Dynamical programming
DP = np.zeros((n, m))
# Initialize DP Matrix and set first row and column to infinity
DP[0, :] = np.inf
DP[:, 0] = np.inf
DP[0, 0] = 0
# Compute minimal cost
for i in range(1, n):
for j in range(1, m):
# Calculate and save cost of various operations
C = np.ones((3, 1)) * np.inf
# Deletion in A
C[0] = (
DP[i - 1, j]
+ Dlp(A[i - 1], A[i])
+ nu * (timeSA[i] - timeSA[i - 1])
+ _lambda
)
# Deletion in B
C[1] = (
DP[i, j - 1]
+ Dlp(B[j - 1], B[j])
+ nu * (timeSB[j] - timeSB[j - 1])
+ _lambda
)
# Keep data points in both time series
C[2] = (
DP[i - 1, j - 1]
+ Dlp(A[i], B[j])
+ Dlp(A[i - 1], B[j - 1])
+ nu * (abs(timeSA[i] - timeSB[j]) + abs(timeSA[i - 1] - timeSB[j - 1]))
)
# Choose the operation with the minimal cost and update DP Matrix
DP[i, j] = np.min(C)
distance = DP[n - 1, m - 1]
return distance, DP
def backtracking(DP):
# [ best_path ] = BACKTRACKING ( DP )
# Compute the most cost efficient path
# DP := DP matrix of the TWED function
x = np.shape(DP)
i = x[0] - 1
j = x[1] - 1
# The indices of the paths are save in opposite direction
# path = np.ones((i + j, 2 )) * np.inf;
best_path = []
steps = 0
while i != 0 or j != 0:
best_path.append((i - 1, j - 1))
C =
|
np.ones((3, 1))
|
numpy.ones
|
import random
import time
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import h5py
import pywt
import torch
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
import utils_graph
import utils_io
import utils_nn
from cnn import CNN
from gram_dataset import GramDataset
from hyperparameters import Hyperparameters
from signal_data import SignalData
PLOTS_FOLDER = 'plots'
USE_CUDA = torch.cuda.is_available()
SPECTROGRAMS_IMAGES_FOLDER = 'spectrograms/images'
SPECTROGRAMS_DATA_FOLDER = 'spectrograms/data'
SPECTROGRAMS_TRAIN_FILE_NAME = 'train_spectrograms.hdf5'
SPECTROGRAMS_TEST_FILE_NAME = 'test_spectrograms.hdf5'
SCALEOGRAMS_IMAGES_FOLDER = 'scaleograms/images'
SCALEOGRAMS_DATA_FOLDER = 'scaleograms/data'
SCALEOGRAMS_TRAIN_FILE_NAME = 'train_scaleograms.hdf5'
SCALEOGRAMS_TEST_FILE_NAME = 'test_scaleograms.hdf5'
def _save_grams(signals: np.ndarray, file_name: str, gram_type: str):
"""Computes and saves spectrograms or scaleograms for all the signals.
"""
if gram_type == 'spectrograms':
data_folder = SPECTROGRAMS_DATA_FOLDER
create_gram_func = _create_spectrogram
elif gram_type == 'scaleograms':
data_folder = SCALEOGRAMS_DATA_FOLDER
create_gram_func = _create_scaleogram
else:
raise Exception('gram_type must be "spectrograms" or "scaleograms"')
gram_path = Path(data_folder, file_name)
if not gram_path.exists():
print(f' Generating and saving {gram_type} to {file_name}.')
Path(data_folder).mkdir(exist_ok=True, parents=True)
# 2947 x 9 x 128
(num_instances, num_components, num_timesteps) = signals.shape
# 2947 x 9 x 128 x 128
grams = np.zeros((num_instances, num_components,
num_timesteps, num_timesteps))
graph_gaussian_signal = True
for instance in range(num_instances):
for component in range(num_components):
signal = signals[instance, component, :]
# 128 x 128
gram = create_gram_func(signal, graph_gaussian_signal)
grams[instance, component, :, :] = gram
graph_gaussian_signal = False
with h5py.File(gram_path, 'w') as group:
group.create_dataset(name=gram_type, shape=grams.shape,
dtype='f', data=grams)
def _save_gram_images(labels: np.ndarray, activity_names: dict,
gram_type: str) -> None:
"""Saves a few spectrogram or scaleogram images for each component if this
hasn't been done already, for debugging purposes.
Number of images saved: number of activities (6) x number of sets per
activity (3) x number of components (9).
"""
if gram_type == 'spectrograms':
data_path = Path(SPECTROGRAMS_DATA_FOLDER, SPECTROGRAMS_TEST_FILE_NAME)
images_folder = Path(SPECTROGRAMS_IMAGES_FOLDER)
elif gram_type == 'scaleograms':
data_path = Path(SCALEOGRAMS_DATA_FOLDER, SCALEOGRAMS_TEST_FILE_NAME)
images_folder = Path(SCALEOGRAMS_IMAGES_FOLDER)
else:
raise Exception('gram_type must be "spectrograms" or "scaleograms"')
# Create images folder if it doesn't exist.
images_folder.mkdir(exist_ok=True, parents=True)
# Open data file.
with h5py.File(data_path, 'r') as gram_file:
# If there are no images in the folder:
images = [item for item in images_folder.iterdir() if item.suffix == '.png']
if len(images) == 0:
print(' Saving images.')
num_sets_per_activity = 3
# Find all the unique activity numbers in our labels.
activities = np.unique(labels)
# For each activity present in the labels:
for activity in activities:
instance_indices = np.nonzero(labels == activity)[0][0:num_sets_per_activity]
# For each instance of that activity:
for instance_index in instance_indices:
# Read the image values from data file.
activity_grams = gram_file[gram_type][instance_index, :, :, :]
# For each of the 9 components:
num_components = activity_grams.shape[0]
for component in range(num_components):
gram = activity_grams[component, :, :]
activity_name = activity_names[activity]
file_name = f'{activity_name}_{instance_index + 1}_{component + 1}.png'
# Save the spectrogram or scaleogram.
utils_io.save_image(gram, images_folder, file_name)
def _normalize(my_array: np.ndarray) -> np.ndarray:
"""Normalizes an ndarray to values between 0 and 1.
The max value maps to 1, but the min value may not hit 0.
"""
return np.abs(my_array)/np.max(np.abs(my_array))
def _train_cnn_network(hyperparameter_dict: dict, full_train_labels: np.ndarray,
gram_type: str) -> Tuple[CNN, List, List, List, List]:
"""Trains a CNN using the specified hyperparameters.
"""
# Ensure reproducibility by giving PyTorch the same seed every time we train.
torch.manual_seed(1)
# Choose the data path.
if gram_type == 'spectrograms':
full_train_data_path = Path(SPECTROGRAMS_DATA_FOLDER, SPECTROGRAMS_TRAIN_FILE_NAME)
elif gram_type == 'scaleograms':
full_train_data_path = Path(SCALEOGRAMS_DATA_FOLDER, SCALEOGRAMS_TRAIN_FILE_NAME)
else:
raise Exception('gram_type must be "spectrograms" or "scaleograms"')
# Print hyperparameters.
print(f'Hyperparameters: {hyperparameter_dict}')
# Get hyperparameters.
learning_rate = hyperparameter_dict['learning_rate']
batch_size = hyperparameter_dict['batch_size']
optimizer_str = hyperparameter_dict['optimizer']
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
full_train_labels = full_train_labels - 1
# Get generators.
full_training_data = GramDataset(full_train_data_path, full_train_labels)
(training_generator, validation_generator) = utils_nn.get_trainval_generators(
full_training_data, batch_size, num_workers=0, training_fraction=0.8)
# Crete CNN.
cnn = CNN()
# Parameters should be moved to GPU before constructing the optimizer.
device = torch.device('cuda:0' if USE_CUDA else 'cpu')
cnn = cnn.to(device)
# Get optimizer.
optimizer = None
if optimizer_str == 'adam':
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
elif optimizer_str == 'sgd':
optimizer = torch.optim.SGD(cnn.parameters(), lr=learning_rate)
else:
raise Exception(f'Specified optimizer not valid: {optimizer_str}')
training_accuracy_list = []
training_loss_list = []
validation_accuracy_list = []
validation_loss_list = []
max_epochs = 10
for epoch in range(max_epochs):
print(f'Epoch {epoch}')
# Training data.
(training_accuracy, training_loss) = utils_nn.fit(cnn,
training_generator, optimizer, USE_CUDA)
training_accuracy_list.append(training_accuracy)
training_loss_list.append(training_loss)
# Validation data.
(validation_accuracy, validation_loss) = utils_nn.evaluate(cnn,
validation_generator, 'Validation', USE_CUDA)
validation_accuracy_list.append(validation_accuracy)
validation_loss_list.append(validation_loss)
return (cnn, training_accuracy_list, training_loss_list,
validation_accuracy_list, validation_loss_list)
def _get_cnn_hyperparameters() -> Hyperparameters:
"""Returns hyperparameters used to tune the network.
"""
# Spectrograms
# First pass:
# hyperparameter_values = Hyperparameters({
# 'learning_rate': [0.1, 0.01, 0.001],
# 'batch_size': [32, 64, 128],
# 'optimizer': ['adam', 'sgd']
# })
# Results:
# optimizer: adam, batch size: 64, learning rate: 0.001
# Adam with learning rate 0.001 seems to work best, regardless of batch size.
# Second pass:
# hyperparameter_values = Hyperparameters({
# 'learning_rate': [0.001],
# 'batch_size': [8, 16, 32, 64, 256],
# 'optimizer': ['adam']
# })
# Best:
# optimizer: adam, batch size: 64, learning rate: 0.001
# Scaleograms
# First pass:
# hyperparameter_values = Hyperparameters({
# 'learning_rate': [0.1, 0.01, 0.001],
# 'batch_size': [32, 64, 128],
# 'optimizer': ['adam', 'sgd']
# })
# Results:
# optimizer: adam, batch size: 32, learning rate: 0.001
# Adam with learning rate 0.001 seems to work best, regardless of batch size.
# Second pass:
hyperparameter_values = Hyperparameters({
'learning_rate': [0.001],
'batch_size': [8, 16, 32, 256],
'optimizer': ['adam']
})
# Best:
# optimizer: adam, batch size: 32, learning rate: 0.001
return hyperparameter_values
def _tune_cnn_hyperparameters(full_train_labels: np.ndarray,
gram_type: str) -> None:
"""Classifies spectrograms or scaleograms using a CNN.
"""
print(' Tuning hyperparameters.')
start_time = time.time()
# Hyperparameters to tune.
hyperparameter_values = _get_cnn_hyperparameters()
hyperparameter_combinations = hyperparameter_values.sample_combinations()
# Create Tensorboard writer.
with SummaryWriter(f'runs/{gram_type}', filename_suffix='') as writer:
# Hyperparameter loop.
for hyperparameter_dict in hyperparameter_combinations:
(_, _, _, validation_accuracy_list, _) = _train_cnn_network(
hyperparameter_dict, full_train_labels, gram_type)
writer.add_hparams(hyperparameter_dict,
{f'hparam/{gram_type}/validation_accuracy': validation_accuracy_list[-1]})
utils_io.print_elapsed_time(start_time, time.time())
def _test_cnn_network(cnn: CNN, test_labels: np.ndarray, hyperparameter_dict: dict,
gram_type: str) -> Tuple[float, float]:
"""Returns accuracy and loss of specified CNN for specified test data and
specified hyperparameters.
"""
if gram_type == 'spectrograms':
test_data_path = Path(SPECTROGRAMS_DATA_FOLDER, SPECTROGRAMS_TEST_FILE_NAME)
elif gram_type == 'scaleograms':
test_data_path = Path(SCALEOGRAMS_DATA_FOLDER, SCALEOGRAMS_TEST_FILE_NAME)
else:
raise Exception('gram_type must be "spectrograms" or "scaleograms"')
# There are 6 labels, and Pytorch expects them to go from 0 to 5.
test_labels = test_labels - 1
# Get test generator.
batch_size = hyperparameter_dict['batch_size']
test_data = GramDataset(test_data_path, test_labels)
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 0}
test_generator = data.DataLoader(test_data, **params)
(test_avg_accuracy, test_avg_loss) = utils_nn.evaluate(cnn, test_generator,
'Test', USE_CUDA)
return (test_avg_accuracy, test_avg_loss)
def _test_best_cnn_hyperparameters(full_train_labels: np.ndarray,
test_labels: np.ndarray, gram_type: str) -> None:
"""Use CNN with best hyperparameters to predict labels for test data.
Produces accuracy and loss graphs for training and validation data, as
well as accuracy and loss values for test data.
"""
hyperparameter_dict = {}
if gram_type == 'spectrograms':
hyperparameter_dict = {
'learning_rate': 0.001,
'batch_size': 64,
'optimizer': 'adam',
}
elif gram_type == 'scaleograms':
hyperparameter_dict = {
'learning_rate': 0.001,
'batch_size': 32,
'optimizer': 'adam',
}
else:
raise Exception('gram_type must be "spectrograms" or "scaleograms"')
(cnn, training_accuracy_list,
training_loss_list,
validation_accuracy_list,
validation_loss_list) = _train_cnn_network(hyperparameter_dict,
full_train_labels, gram_type)
utils_graph.graph_nn_results(training_accuracy_list, validation_accuracy_list,
f'Training and validation accuracy of classification of {gram_type}',
'Accuracy', PLOTS_FOLDER, f'{gram_type}_accuracy.html')
utils_graph.graph_nn_results(training_loss_list, validation_loss_list,
f'Training and validation loss of classification of {gram_type}',
'Loss', PLOTS_FOLDER, f'{gram_type}_loss.html')
_test_cnn_network(cnn, test_labels, hyperparameter_dict, gram_type)
with SummaryWriter(f'runs/{gram_type}', filename_suffix='') as writer:
num_epochs_train_val = len(training_accuracy_list)
for i in range(num_epochs_train_val):
writer.add_scalars(f'{gram_type}/accuracy', {
'training': training_accuracy_list[i],
'validation': validation_accuracy_list[i]
}, i)
writer.add_scalars(f'{gram_type}/loss', {
'training': training_loss_list[i],
'validation': validation_loss_list[i]
}, i)
# Spectrograms
# Test accuracy: 87.49%
# Test loss: 0.36
# Scaleograms
# Test accuracy: 89.26%
# Test loss: 0.44
def _get_gaussian_filter(b: float, b_list: np.ndarray,
sigma: float) -> np.ndarray:
"""Returns the values of a Gaussian filter centered at b, with standard
deviation sigma.
"""
a = 1/(2*sigma**2)
return np.exp(-a*(b_list-b)**2)
def _graph_gaussian_signal(signal: np.ndarray, g: np.ndarray) -> None:
"""Saves a graph containing a signal and the Gaussian function used to
filter it.
"""
# Plot Gaussian filter and signal overlayed in same graph.
time_list = np.arange(len(signal))
signal = _normalize(signal)
x = np.append([time_list], [time_list], axis=0)
y = np.append([g], [signal], axis=0)
utils_graph.graph_overlapping_lines(x, y,
['Gaussian filter', 'Signal'],
'Time', 'Amplitude',
'Example of a signal and corresponding Gaussian filter',
PLOTS_FOLDER, 'sample_gaussian_signal.html')
def _create_spectrogram(signal: np.ndarray,
graph_gaussian_signal: bool) -> np.ndarray:
"""Creates spectrogram for signal.
"""
n = len(signal)
# Times of the input signal.
time_list = np.arange(n)
# Horizontal axis of the output spectrogram (times where we will center the
# Gabor filter).
time_slide = np.arange(n)
# The vertical axis is the frequencies of the FFT, which is the same size
# as the input signal.
spectrogram = np.zeros((n, n), dtype=complex)
for (i, time) in enumerate(time_slide):
sigma = 3
g = _get_gaussian_filter(time, time_list, sigma)
ug = signal * g
ugt = np.fft.fftshift(
|
np.fft.fft(ug)
|
numpy.fft.fft
|
import numpy as np
import os
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
from utils.miscellaneous import get_GT_IoUs
import copy
def voc_ap(rec, prec, score, sc_part, use_07_metric=False):
"""
average precision calculations
[precision integrated to recall]
:param rec: recall
:param prec: precision
:param use_07_metric: 2007 metric is 11-recall-point based AP
:return: average precision
"""
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
else:
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
mscore = np.concatenate(([0.], score, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap =
|
np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
|
numpy.sum
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 11 12:08:22 2017
@author: yazar
"""
import numpy as np
import pandas as pd
from scipy import linalg
from sklearn import preprocessing
from matplotlib import pyplot as plt
from scipy import optimize
from sklearn import metrics
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def _RSS(theta, X, y):
# number of training examples
m = len(y)
theta = theta.reshape(-1, 1)
y = y.reshape(-1, 1)
prediction = np.dot(X, theta)
mean_error = prediction - y
return 1/(2*m) * np.sum(np.power(mean_error, 2))
def _logisticCostFunc(theta, X, y):
""" compute cost for logistic regression
Parameters:
-----------
theta : ndarray, shape (n_features,)
Regression coefficients
X : {array-like}, shape (n_samples, n_features)
Training data. Should include intercept.
y : ndarray, shape (n_samples,)
Target values
Returns
-------
cost : float
cost evaluation using logistic cost function
"""
# number of training examples
m = len(y)
y = y.reshape(-1, 1)
theta = theta.reshape(-1, 1)
J = 1/m * (np.dot(-y.T, np.log(sigmoid(np.dot(X, theta)))) -
np.dot((1-y.T), np.log(1-sigmoid(
|
np.dot(X, theta)
|
numpy.dot
|
#!/usr/env/bin python
import glob
import numpy as np
import os
import subprocess
import sys
import random
import re
data_desc_lookup = {
'circles': 'Filled circles, no more than 1 circle removed by mask, at most 50\% of area remains',
'circles_non_filled_mask_loc': 'Non-filled circles, at least 10 pixels guaranteed in mask region',
'circles_non_filled_mixed': 'Non-filled circles, 50\% with at least 10 pixels guaranteed in mask region, 50\% with nothing in mask region',
'circles_non_filled': 'First 10000 guaranteed maximum 1 circle missing, 10 pixels minimum. Remaining 90000, masks placed arbitrarily',
'ellipses': 'Ten pixels minimum in masked region, ellipses with major/minor axes ranging from 3 to 20 pixels',
'ellipses_overfit': 'First 1000 data points from ellipses set, for testing whether a model can overfit'
}
# assert(len(sys.argv[1:]))
# models = sys.argv[1:]
models = open('models_to_test').read().split('\n')
models = models
weight_fun_re = re.compile('diff_in_numerator: (\w+)')
local_loss_re = re.compile('local_loss: (\w+)')
grad_loss_re = re.compile('grad_loss: (\w+)')
weighted_grad_re = re.compile('weighted_grad: (\w+)')
num_discrims_re = re.compile('num_discrims: (\d+)')
batch_size_re = re.compile('batchSize: (\d+)')
learning_rate_re = re.compile('lr: ([\d\.e-]+)')
dataroot_re = re.compile('dataroot: ([\w/-]+)')
with open('circle_results_report.tex', 'w') as out_file:
out_file.write('\n'.join([r'\documentclass{article}',
r'\usepackage{graphicx}',
r'\usepackage{subcaption}',
r'\usepackage{amsmath}',
r'\usepackage[table]{xcolor}',
'\\usepackage{geometry}\n'
'\\geometry{\n'
'a4paper,\n'
'total={170mm,257mm},\n'
'left=20mm,\n'
'top=20mm,\n'
'}\n',
'\\begin{document}',
'\\tableofcontents\n'
]))
out_file.write('\n')
out_file.write('\n'.join(
[
r'\section{Conventions}',
'The dataset description refers to the training dataset. All tests are conducted on a dataset where the mask region has been placed arbitrarily\n',
'$F_k$ refers to pixels of a class $k$ in the ground truth image.\n',
"$F_{k'}$ refers to pixels of a class $k$ in the output image.\n",
'$T$ refers to the total number of pixels\n',
'Global loss means L2 loss is taken on the whole image, local loss is just within the mask region\n',
'Gradient loss is L2 loss on the gradient images, and the region of interest is the same as the overall L2 loss (i.e. global or local)\n',
'Reported L2 error is averaged over 1000 images\n'
]
))
out_file.write('\\clearpage\n')
model_params = [{} for _ in range(len(models))]
smallest_l2_global = np.Inf
smallest_l2_global_idx = -1
smallest_l2_local = np.Inf
smallest_l2_local_idx = -1
biggest_l2_global = -np.Inf
biggest_l2_global_idx = -1
biggest_l2_local = -np.Inf
biggest_l2_local_idx = -1
for j, model in enumerate(models):
results_text = open(os.path.join('results', model, 'test_latest', '{}_results'.format(model))).read()
global_losses, local_losses = zip(*[[float(y) for y in x.split(', ')] for x in results_text.splitlines()])
model_params[j]['l2_loss_global'] =
|
np.mean(global_losses)
|
numpy.mean
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import rospy
import cv2
import numpy as np
from std_msgs.msg import String
from std_msgs.msg import Int32
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
class image_converter:
def __init__(self):
self.image_pub = rospy.Publisher("image_topic", Image, queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/color/image_raw",Image,self.callback)
def callback(self,data):
pub = rospy.Publisher("bottle_size", Int32, queue_size=1)
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
# RGB表色系からHSV表色系に変換
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# しきい値の設定1(ここでは赤を抽出)
color_min = np.array([0,200,50])
color_max =
|
np.array([30,255,255])
|
numpy.array
|
'''
Adjustments to the pysptools library to support linear spectral mixture
analysis (LSMA). Includes classes and functions:
* `PPI`
* `NFINDR`
* `FCLSAbundanceMapper`
* `combine_endmembers_and_normalize()`
* `convex_hull_graham()`
* `endmembers_by_maximum_angle()`
* `endmembers_by_maximum_area()`
* `endmembers_by_maximum_volume()`
* `endmembers_by_query()`
* `hall_rectification()`
* `iterate_endmember_combinations()`
* `normalize_reflectance_within_image()`
* `predict_spectra_from_abundance()`
* `point_to_pixel_geometry()`
* `ravel()`
* `ravel_and_filter()`
* `report_raster_dynamic_range()`
* `subtract_endmember_and_normalize()`
'''
import itertools
import json
import os
import re
import numpy as np
from concurrent.futures import ProcessPoolExecutor
from functools import reduce, partial, wraps
from unmixing.transform import mnf_rotation
from unmixing.utils import array_to_raster, as_array, as_raster, dump_raster, partition, xy_to_pixel, pixel_to_xy, spectra_at_xy, rmse
from lxml import etree
from osgeo import gdal, ogr, osr
from pykml.factory import KML_ElementMaker as KML
import pysptools.eea as sp_extract
import pysptools.abundance_maps as sp_abundance
import pysptools.classification as sp_classify
import pysptools.material_count as sp_matcount
class AbstractAbundanceMapper(object):
def __init__(self, mixed_raster, gt, wkt, nodata=-9999, processes=1):
assert np.all(np.greater(mixed_raster.shape, 0)), 'Raster array cannot have any zero-length axis'
self.shp = mixed_raster.shape
self.mixed_raster = mixed_raster
self.hsi = mixed_raster.T # HSI form: (p x m x n) as (n x m x p)
self.gt = gt
self.wkt = wkt
self.nodata = nodata
self.num_processes = processes
class AbstractExtractor(object):
def get_idx_as_kml(self, path, gt, wkt, data_dict=None):
'''
Exports a KML file containing the locations of the extracted endmembers
as point markers.
'''
# Despite that the HSI cube is the transpose of our raster array, the
# coordinates returned by `get_idx()` are already in the right order
# (longitude, latitude) because the (m by n) == (y by x) order
# transposed is (n by m) == (x by y); the row index is the latitude
# and the column index is the longitude.
coords = pixel_to_xy(self.get_idx(), gt=gt, wkt=wkt, dd=True)
if any(map(lambda x: x[0] == 0 and x[1] == 0, self.get_idx())):
print('Warning: Target endmember chosen at (0,0)')
print('One or more endmembers may be photometric shade')
if data_dict is None:
data_dict = {
'wavelength': range(1, len(coords) + 1),
'wavelength units': 'MNF Component',
'z plot titles': ['', '']
}
ico = 'http://maps.google.com/mapfiles/kml/paddle/%i.png'
pmarks = []
for i, pair in enumerate(coords):
pmarks.append(KML.Placemark(
KML.Style(
KML.IconStyle(
KML.Icon(KML.href(ico % (i + 1))))),
KML.name(data_dict['wavelength units'] + ' %d' % (i + 1)),
KML.Point(KML.coordinates('%f,%f' % pair))))
doc = KML.kml(KML.Folder(*pmarks))
with open(path, 'wb') as source:
source.write(etree.tostring(doc, pretty_print=True))
def get_idx_as_shp(self, path, gt, wkt):
'''
Exports a Shapefile containing the locations of the extracted
endmembers. Assumes the coordinates are in decimal degrees.
'''
coords = pixel_to_xy(self.get_idx(), gt=gt, wkt=wkt, dd=True)
driver = ogr.GetDriverByName('ESRI Shapefile')
ds = driver.CreateDataSource(path)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
layer = ds.CreateLayer(path.split('.')[0], srs, ogr.wkbPoint)
for pair in coords:
feature = ogr.Feature(layer.GetLayerDefn())
# Create the point from the Well Known Text
point = ogr.CreateGeometryFromWkt('POINT(%f %f)' % pair)
feature.SetGeometry(point) # Set the feature geometry
layer.CreateFeature(feature) # Create the feature in the layer
feature.Destroy() # Destroy the feature to free resources
# Destroy the data source to free resources
ds.Destroy()
class PPI(sp_extract.PPI, AbstractExtractor):
pass
class NFINDR(sp_extract.NFINDR, AbstractExtractor):
pass
class FCLSAbundanceMapper(AbstractAbundanceMapper):
'''
A class for generating an abundance map, containing both the raw spectral
(mixed) data and the logic to unmix the data into an abundance map with
the fully constrained least-squares (FCLS) approach. The "full"
constraints are the sum-to-one and non-negativity constraints. Given
q endmembers and p spectral bands, the mapper is forced to find
abundances within a simplex in a (q-1)-dimensional subspace.
NOTE: The mixed_raster provided in instantation is assumed to correspond
exactly to the mixing space used to induce endmembers; if mixed_raster
was MNF- or PCA-transformed, for instance, the endmember spectra provided
in, e.g., map_abundances(), must match this transformation exactly.
Arguments:
mixed_raster The raster to be unmixed; should NOT be in HSI form
but should be MNF- or PCA-transformed to match any
endmember spectra provided
gt A GDAL GeoTransform tuple for the mixed_raster
wkt Projection information as Well-Known Text for the
mixed_raster
nodata The NoData value for mixed_raster
processes The number of processes to create in mixture analysis
'''
def __init__(self, *args, **kwargs):
super(FCLSAbundanceMapper, self).__init__(*args, **kwargs)
self.mapper = sp_abundance.FCLS()
def __lsma__(self, cases, endmembers):
# For regular LSMA with single endmember spectra
# c is number of pixels, k is number of bands
c, k = cases.shape if len(cases.shape) > 1 else (1, cases.shape[0])
return self.mapper.map(cases.reshape((1, c, k)), endmembers,
normalize = False)
def __mesma__(self, array_pairs):
# For multiple endmember spectra, in chunks
cases, endmembers = array_pairs
# c is number of pixels, k is number of bands
c, k = cases.shape if len(cases.shape) > 1 else (1, cases.shape[0])
return [
self.mapper.map(cases[i,...].reshape((1, 1, k)), endmembers[i,...],
normalize = False) for i in range(0, c)
]
def __mesma2__(self, array_pairs):
# For multiple endmember spectra, pixel-wise
# NOTE: This pixel-wise implementation might be slower than __mesma__
# for large arrays
cases, endmembers = array_pairs
# c is number of pixels, k is number of bands
c, k = cases.shape if len(cases.shape) > 1 else (1, cases.shape[0])
return self.mapper.map(cases.reshape((1, c, k)), endmembers,
normalize = False)
def map_abundance(self, endmembers, pixelwise=False):
'''
Arguments:
endmembers A numpy.ndarray of endmembers; either (q x p) array
of q endmembers and p bands (for regular LSMA) or a
(c x q x p) array, where c = m*n, for multiple
endmember spectra for each pixel.
Returns: An (m x n x q) numpy.ndarray (in HSI form) that contains
the abundances for each of q endmember types.
'''
q = endmembers.shape[-2]
# FCLS with the sum-to-one constraint has an extra degree of freedom so it
# is able to form a simplex of q corners in (q-1) dimensions:
# q <= n (Settle and Drake, 1993)
k = q - 1 # Find q corners of simplex in (q-1) dimensions
endmembers = endmembers[...,0:k]
shp = self.hsi.shape
base_array = self.hsi[:,:,0:k].reshape((shp[0] * shp[1], k))
# Get indices for each process' work range
work = partition(base_array, self.num_processes, axis=0)
with ProcessPoolExecutor(max_workers = self.num_processes) as executor:
# We're working with multiple endmembers
if endmembers.ndim == 3 and pixelwise:
result = executor.map(self.__mesma2__, [ # Work done pixel-wise
(base_array[i,...], endmembers[i,...]) for i in range(0, base_array.shape[0])
])
elif endmembers.ndim == 3:
result = executor.map(self.__mesma__, [
(base_array[i:j,...], endmembers[i:j,...]) for i, j in work
])
# We're working with a single endmember per class
else:
# Curry an unmixing function with the present endmember array
unmix = partial(self.__lsma__, endmembers = endmembers)
result = executor.map(unmix, [
base_array[i:j,...] for i, j in work
])
combined_result = list(result) # Executes the multiprocess suite
if endmembers.ndim == 3 and not pixelwise:
# When chunking with multiple endmembers, we get list of lists
ext_array = [y for x in combined_result for y in x] # Flatten once
return np.concatenate(ext_array, axis = 1)\
.reshape((shp[0], shp[1], q))
return np.concatenate(combined_result, axis = 1)\
.reshape((shp[0], shp[1], q))
def validate_by_forward_model(
self, ref_image, abundances, ref_spectra=None,
ref_em_locations=None, dd=False, nodata=-9999, r=10000,
as_pct=True, convert_nodata=False):
'''
Validates LSMA result in the forward model of reflectance, i.e.,
compares the observed reflectance in the original (mixed) image to the
abundance predicted by a forward model of reflectance using the
provided endmember spectra. NOTE: Does not apply in the case of
multiple endmember spectra; requires only one spectral profile per
endmember type.
Arguments:
ref_image A raster array of the reference spectra (not MNF-
transformed data).
abundances A raster array of abundances; a (q x m x n) array for
q abundance types (q endmembers).
ref_spectra With single endmember spectra, user can provide the
reference spectra, e.g., the observed reflectance for
each endmember (not MNF spectra).
ref_em_locations With single endmember spectra, user can provide
the coordinates of each endmember, so that reference
spectra can be extracted for validation.
dd True if ref_em_locations provided and the coordinates
are in decimal degrees.
nodata The NoData value to use.
r The number of random samples to take in calculating
RMSE.
as_pct Report normalized RMSE (as a percentage).
convert_nodata
True to convert all NoData values to zero (as in zero
reflectance)
'''
rastr = ref_image.copy()
assert (ref_spectra is not None) or (ref_em_locations is not None), 'When single endmember spectra are used, either ref_spectra or ref_em_locations must be provided'
if ref_spectra is not None:
assert ref_spectra.shape[0] == abundances.shape[0], 'One reference spectra must be provided for each endmember type in abundance map'
else:
# Get the spectra for each endmember from the reference dataset
ref_spectra = spectra_at_xy(ref_image, ref_em_locations,
self.gt, self.wkt, dd = dd)
# Convert the NoData values to zero reflectance
if convert_nodata:
rastr[rastr == nodata] = 0
ref_spectra[ref_spectra == nodata] = 0
shp = rastr.shape # Reshape the arrays
arr = rastr.reshape((shp[0], shp[1]*shp[2]))
# Generate random sampling indices
idx = np.random.choice(np.arange(0, arr.shape[1]), r)
# Get the predicted reflectances
preds = predict_spectra_from_abundance(ravel(abundances), ref_spectra)
assert preds.shape == arr.shape, 'Prediction and observation matrices are not the same size'
# Take the mean RMSE (sum of RMSE divided by number of pixels), after
# the residuals are normalized by the number of endmembers
rmse_value = rmse(arr, preds, idx, n = ref_spectra.shape[0], nodata = nodata).sum() / r
norm = 1
if as_pct:
# Divide by the range of the measured data; minimum is zero
norm = arr.max()
return str(round(rmse_value / norm * 100, 2)) + '%'
return round(rmse_value / norm, 2)
def combine_endmembers_and_normalize(
abundances, es=(1, 2), at_end=True, nodata=-9999):
'''
Combines two endmembers from a fraction image into a single endmember.
If the original endmember abundances summed to one, they will sum to one
in the resulting image as well. Arguments:
abundances The raster array of endmember abundances
es A two-element tuple of the endmembers indices to combine
at_end Place the combined endmembers at the end of the array?
nodata The NoData value to ignore
'''
shp = abundances.shape
rast = abundances.copy() # Copy raster array
rast[rast == nodata] = 0 # Replace NoData values
c0 = rast[es[0], ...] # Get the endmembers to be combined
c1 = rast[es[1], ...]
# Stack the remaining bands
abunds = []
for e in range(0, shp[0]):
if e not in es:
abunds.append(rast[e, ...])
if at_end:
comps = (abunds, c0 + c1.reshape(1, shp[1], shp[2]))
else:
comps = (c0 + c1.reshape(1, shp[1], shp[2]), abunds)
rast = None
return np.vstack(comps)
def convex_hull_graham(points, indices=False):
'''
Returns points on convex hull of an array of points in CCW order according
to Graham's scan algorithm. By <NAME> <<EMAIL>>.
Arguments:
points The points for which a convex hull is sought
indices True to return a tuple of (indices, hull)
'''
TURN_LEFT, TURN_RIGHT, TURN_NONE = (1, -1, 0)
def cmp(a, b):
return (a > b) - (a < b)
def turn(p, q, r):
return cmp((q[0] - p[0])*(r[1] - p[1]) - (r[0] - p[0])*(q[1] - p[1]), 0)
def keep_left(hull, r):
while len(hull) > 1 and turn(hull[-2], hull[-1], r) != TURN_LEFT:
hull.pop()
if not len(hull) or hull[-1] != r:
hull.append(r)
return hull
pts_sorted = sorted(points)
l = reduce(keep_left, pts_sorted, [])
u = reduce(keep_left, reversed(pts_sorted), [])
hull = l.extend(u[i] for i in range(1, len(u) - 1)) or l
if indices:
return ([points.index(h) for h in hull], hull)
return hull
def endmembers_by_maximum_angle(
rast, targets, ref_target, gt=None, wkt=None, dd=False):
'''
Locates endmembers in (2-dimensional) feature space as the triad (3-corner
simplex) that maximizes the angle formed with a reference endmember target.
Returns the endmember coordinates in feature (not geographic) space.
Arguments:
rast The raster that describes the feature space
ref_target The coordinates (in feature space) of a point held fixed
targets The coordinates (in feature space) of all other points
gt The GDAL GeoTransform
wkt The GDAL WKT projection
dd True for coordinates in decimal degrees
Angle calculation from:
http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python/13849249#13849249
'''
def unit_vector(vector):
# Returns the unit vector of the vector.
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
# Returns the angle in radians between vectors 'v1' and 'v2'
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
rastr = rast.ReadAsArray()
gt = rast.GetGeoTransform()
wkt = rast.GetProjection()
else:
assert gt is not None and wkt is not None, 'gt and wkt arguments required'
rastr = rast.copy()
# Get the spectra for these targets; this works in two dimensions only
ref_spec = spectra_at_xy(rast, (ref_target,), gt, wkt)[...,0:2].reshape((2,))
target_specs = spectra_at_xy(rast, targets, gt, wkt)[...,0:2]
# All combinations of 2 of the targets
combos = list(itertools.combinations(range(max(target_specs.shape)), 2))
spec_map = [
[target_specs[i,:] for i in triad] for triad in combos
]
coord_map = [
[targets[i] for i in triad] for triad in combos
]
# Find vectors from ref_spec, not from origin (by vector subtraction)
# If (cx) is the ref_spec vector (line from origin to ref_spec),
# and (ca) and (cb) are the vectors to the points that form the angle
# (axb), then [(cx) - (ca)] and [(cx) - (cb)] are the vectors from point
# x to the points a and b, respectively.
vectors = [(ref_spec - a, ref_spec - b) for a, b in spec_map]
angles = [angle_between(v1, v2) for v1, v2 in vectors]
idx = angles.index(max(angles))
specs = spec_map[idx] # The optimized spectra
locs = coord_map[idx] # The optimized coordinates
specs.insert(0, ref_spec) # Add the reference target
locs.insert(0, ref_target) # Add the reference coordinates
return (
|
np.array(specs)
|
numpy.array
|
import numpy as np
def accuracy_score(Y_true, Y_pred, sample_weight=None):
assert len(Y_true) == len(Y_pred)
n_samples = len(Y_true)
sample_weight = np.array([1 / n_samples for _ in range(n_samples)]) if not sample_weight else sample_weight
return np.sum((np.array(Y_true) == np.array(Y_pred)) * sample_weight)
def f1_score(Y_true, Y_pred, average: str = None):
'''
:param Y_true:
:param Y_pred:
:param average: 均化方式,可选参数'micro','macro','weighted'
该参数置空时返回所有类别的F1分数
:return:
'''
uni_labels, label_weight = np.unique(Y_true, return_counts=True)
label_weight = label_weight / len(Y_true) # 类分布概率,用作标签权重
total_TP = total_FP = total_FN = 0 # 用于计算micro f1的总计数
f1_scores = list()
for label in uni_labels:
TP = np.sum((np.array(Y_pred) == label) * (np.array(Y_true) == label))
FP = np.sum((np.array(Y_pred) == label) * (np.array(Y_true) != label))
FN = np.sum((np.array(Y_pred) != label) * (np.array(Y_true) == label))
total_TP += TP
total_FP += FP
total_FN += FN
cur_precision = TP / (TP + FP)
cur_recall = TP / (TP + FN)
cur_f1 = 0 if cur_precision == 0 or cur_recall == 0 else 2 * cur_precision * cur_recall / (
cur_precision + cur_recall)
f1_scores.append(cur_f1)
f1_scores = np.array(f1_scores)
if average == 'micro':
precision = total_TP / (total_TP + total_FP)
recall = total_TP / (total_TP + total_FN)
return 0 if precision == 0 or recall == 0 else 2 * precision * recall / (precision + recall)
elif average == 'macro':
return np.sum(f1_scores) / len(uni_labels)
elif average == 'weighted':
return
|
np.sum(f1_scores * label_weight)
|
numpy.sum
|
from __future__ import division, print_function, absolute_import
import numpy as np
from .alphas import alpha_soave, alpha_sv, alpha_rk
from ..constants import R, r
from scipy.optimize import brentq, newton
def psat(T, cubic, P0=None):
"""
Computes saturation pressure with cubic eos
Parameters
----------
T : float,
Absolute temperature [K]
cubic : object
eos object
Returns
-------
P : float
saturation pressure [bar]
"""
a = cubic.a_eos(T)
b = cubic.b
c1 = cubic.c1
c2 = cubic.c2
emin = cubic.emin
c = cubic.c
e = a/(b*R*T)
if P0 is None:
if e > emin: # Zero pressure initiation
U = (e-c1-c2-np.sqrt((e-c1-c2)**2-4*(c1*c2+e)))/2
if c1 == 0 and c2 == 0:
S = -1-np.log(U-1)-e/U
else:
S = -1-np.log(U-1)-e*np.log((U+c1)/(U+c2))/(c1-c2)
P = np.exp(S)*R*T/b # bar
else: # Pmin Pmax initiation
a1 = -R*T
a2 = -2*b*R*T*(c1+c2)+2*a
a3 = -R*T*b**2*(c1**2+4*c1*c2+c2**2)+a*b*(c1+c2-4)
a4 = -R*T*2*b**3*c1*c2*(c1+c2)+2*a*b**2*(1-c1-c2)
a5 = -R*T*b**4*c1*c2+a*b**3*(c1+c2)
V = np.roots([a1, a2, a3, a4, a5])
V = V[np.isreal(V)]
V = V[V > b]
P = cubic(T, V)
P[P < 0] = 0.
P = P.mean()
else:
P = P0
itmax = 20
RT = R * T
for k in range(itmax):
A = a*P/RT**2
B = b*P/RT
C = c*P/RT
Z = cubic._Zroot(A, B, C)
Zl = min(Z)
Zv = max(Z)
fugL = cubic._logfug_aux(Zl, A, B, C)
fugV = cubic._logfug_aux(Zv, A, B, C)
FO = fugV-fugL
dFO = (Zv-Zl)/P
dP = FO/dFO
P -= dP
if abs(dP) < 1e-8:
break
vl = Zl*RT/P
vv = Zv*RT/P
return P, vl, vv
def fobj_tsat(T, P, cubic):
a = cubic.a_eos(T)
b = cubic.b
c = cubic.c
RT = R*T
A = a*P/(RT)**2
B = b*P/(RT)
C = c*P/RT
Z = cubic._Zroot(A, B, C)
Zl = min(Z)
Zv = max(Z)
fugL = cubic._logfug_aux(Zl, A, B, C)
fugV = cubic._logfug_aux(Zv, A, B, C)
FO = fugV-fugL
return FO
def tsat(cubic, P, T0=None, Tbounds=None):
"""
Computes saturation temperature with cubic eos
Parameters
----------
cubic: object
cubic eos object
P: float
saturation pressure [bar]
T0 : float, optional
Temperature to start iterations [K]
Tbounds : tuple, optional
(Tmin, Tmax) Temperature interval to start iterations [K]
Returns
-------
T : float
saturation temperature [K]
vl: float
saturation liquid volume [cm3/mol]
vv: float
saturation vapor volume [cm3/mol]
"""
bool1 = T0 is None
bool2 = Tbounds is None
if bool1 and bool2:
raise Exception('You must provide either Tbounds or T0')
if not bool1:
sol = newton(fobj_tsat, x0=T0, args=(P, cubic),
full_output=False)
Tsat = sol[0]
elif not bool2:
sol = brentq(fobj_tsat, Tbounds[0], Tbounds[1], args=(P, cubic),
full_output=False)
Tsat = sol
vl = 1./cubic.density(Tsat, P, 'L')
vv = 1./cubic.density(Tsat, P, 'V')
out = (Tsat, vl, vv)
return out
class vtcpure():
'''
Pure component Cubic EoS Object
This object have implemeted methods for phase equilibrium
as for iterfacial properties calculations.
Parameters
----------
pure : object
pure component created with component class
c1, c2 : float
constants of cubic EoS
oma, omb : float
constants of cubic EoS
alpha_eos : function
function that gives thermal funcionality to attractive term of EoS
Attributes
----------
Tc: float
critical temperture [K]
Pc: float
critical pressure [bar]
w: float
acentric factor
cii : array_like
influence factor for SGT polynomial [J m5 mol-2]
Mw : float
molar weight of the fluid [g mol-1]
Methods
-------
a_eos : computes the attractive term of cubic eos.
psat : computes saturation pressure.
tsat : computes saturation temperature
density : computes density of mixture.
logfug : computes fugacity coefficient.
a0ad : computes adimentional Helmholtz density energy
muad : computes adimentional chemical potential.
dOm : computes adimentional Thermodynamic Grand Potential.
ci : computes influence parameters matrix for SGT.
sgt_adim : computes adimentional factors for SGT.
EntropyR : computes residual Entropy.
EnthalpyR: computes residual Enthalpy.
CvR : computes residual isochoric heat capacity.
CpR : computes residual isobaric heat capacity.
speed_sound : computes the speed of sound.
'''
def __init__(self, pure, c1, c2, oma, omb, alpha_eos):
self.c1 = c1
self.c2 = c2
self.oma = oma
self.omb = omb
self.alpha_eos = alpha_eos
self.emin = 2+self.c1+self.c2+2*np.sqrt((1+self.c1)*(1+self.c2))
self.Mw = pure.Mw
self.Tc = np.array(pure.Tc, ndmin=1) # Critical temperature in K
self.Pc = np.array(pure.Pc, ndmin=1) # Critical Pressure in bar
self.w = np.array(pure.w, ndmin=1)
self.cii = np.array(pure.cii, ndmin=1)
self.b = self.omb*R*self.Tc/self.Pc
self.c = np.array(pure.c, ndmin=1)
def __call__(self, T, v):
b = self.b
a = self.a_eos(T)
c1 = self.c1
c2 = self.c2
return R*T/(v - b) - a/((v+c1*b)*(v+c2*b))
def a_eos(self, T):
"""
a_eos(T)
Method that computes atractive term of cubic eos at fixed T (in K)
Parameters
----------
T : float
absolute temperature [K]
Returns
-------
a : float
atractive term array [bar cm6 mol-2]
"""
alpha = self.alpha_eos(T, self.k, self.Tc)
return self.oma*(R*self.Tc)**2*alpha/self.Pc
def psat(self, T, P0=None):
"""
psat(T, P0)
Method that computes saturation pressure at given temperature
Parameters
----------
T : float
absolute temperature [K]
P0 : float, optional
initial value to find saturation pressure [bar], None for automatic
initiation
Returns
-------
psat : float
saturation pressure [bar]
"""
p0, vl, vv = psat(T, self, P0)
return p0, vl, vv
def tsat(self, P, T0=None, Tbounds=None):
"""
tsat(P, T0, Tbounds)
Method that computes saturation temperature at given pressure
Parameters
----------
P : float
pressure [bar]
T0 : float, optional
Temperature to start iterations [K]
Tbounds : tuple, optional
(Tmin, Tmax) Temperature interval to start iterations [K]
Returns
-------
tsat : float
saturation pressure [bar]
vl: float
saturation liquid volume [cm3/mol]
vv: float
saturation vapor volume [cm3/mol]
"""
Tsat, vl, vv = tsat(self, P, T0, Tbounds)
return Tsat, vl, vv
def _Zroot(self, A, B, C):
a1 = (self.c1+self.c2-1)*B-1 + 3 * C
a2 = self.c1*self.c2*B**2-(self.c1+self.c2)*(B**2+B)+A
a2 += 3*C**2 + 2*C*(-1 + B*(-1 + self.c1 + self.c2))
a3 = A*(-B+C)+(-1-B+C)*(C+self.c1*B)*(C+self.c2*B)
Zpol = [1., a1, a2, a3]
Zroots = np.roots(Zpol)
Zroots = np.real(Zroots[np.imag(Zroots) == 0])
Zroots = Zroots[Zroots > (B - C)]
return Zroots
def _volume_solver(self, P, RT, D, B, C, state):
Dr = D*P/RT**2
Br = B*P/RT
Cr = C*P/RT
if state == 'L':
Z = np.min(self._Zroot(Dr, Br, Cr))
elif state == 'V':
Z = np.max(self._Zroot(Dr, Br, Cr))
else:
raise Exception('Valid states: L for liquids and V for vapor ')
V = (RT*Z)/P
return V
def density(self, T, P, state):
"""
density(T, P, state)
Method that computes the density of the mixture at given temperature
and pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [bar]
state : string
'L' for liquid phase and 'V' for vapour phase
Returns
-------
density: float
molar density [mol/cm3]
"""
RT = R * T
A = self.a_eos(T)*P/(RT)**2
B = self.b*P/(RT)
C = self.c*P/(RT)
if state == 'L':
Z = min(self._Zroot(A, B, C))
if state == 'V':
Z = max(self._Zroot(A, B, C))
return P/(R*T*Z)
def _logfug_aux(self, Z, A, B, C):
c1 = self.c1
c2 = self.c2
logfug = Z-1-np.log(Z+C-B)
logfug -= (A/(c2-c1)/B)*np.log((Z+C+c2*B)/(Z+C+c1*B))
return logfug
def logfug(self, T, P, state):
"""
logfug(T, P, state)
Method that computes the fugacity coefficient at given temperature
and pressure.
Parameters
----------
T : float
absolute temperature [K]
P : float
pressure [bar]
state : string
'L' for liquid phase and 'V' for vapour phase
Returns
-------
logfug: float
fugacity coefficient
v : float
volume of the fluid [cm3/mol]
"""
RT = R * T
A = self.a_eos(T)*P/(RT)**2
B = self.b*P/(RT)
C = self.c*P/(RT)
if state == 'L':
Z = min(self._Zroot(A, B, C))
if state == 'V':
Z = max(self._Zroot(A, B, C))
logfug = self._logfug_aux(Z, A, B, C)
return logfug
def a0ad(self, ro, T):
"""
a0ad(ro, T)
Method that computes the adimenstional Helmholtz density energy at
given density and temperature.
Parameters
----------
ro : float
adimentional density vector [rho = rho * b]
T : float
absolute adimentional temperature [Adim]
Returns
-------
a0ad: float
adimenstional Helmholtz density energy [Adim]
"""
c1 = self.c1
c2 = self.c2
cro = self.c * ro / self.b
Pref = 1
a0 = -T*ro*np.log(1-ro+cro)
a0 += -T*ro*np.log(Pref/(T*ro))
a0 += -ro*np.log((1+c2*ro + cro)/(1+c1*ro+cro))/((c2-c1))
return a0
def muad(self, ro, T):
"""
muad(ro, T)
Method that computes the adimenstional chemical potential at given
density and temperature.
Parameters
----------
roa : float
adimentional density vector [rho = rho * b]
T : float
absolute adimentional temperature [adim]
Returns
-------
muad: float
chemical potential [Adim]
"""
c1 = self.c1
c2 = self.c2
cro = self.c * ro / self.b
Pref = 1
mu = - ro/((1+cro+c1*ro)*(1+cro+c2*ro))
mu += T / (1-ro+cro)
mu += np.log((1+cro+c2*ro)/(1+cro+c1*ro))/(c1-c2)
mu -= T * np.log(1-ro+cro)
mu -= T * np.log(Pref/T/ro)
return mu
def dOm(self, roa, Tad, mu, Psat):
r"""
dOm(roa, T, mu, Psat)
Method that computes the adimenstional Thermodynamic Grand potential
at given density and temperature.
Parameters
----------
roa : float
adimentional density vector [rho = rho * b]
T : floar
absolute adimentional temperature [Adim]
mu : float
adimentional chemical potential at equilibrium [Adim]
Psat : float
adimentional pressure at equilibrium [Adim]
Returns
-------
Out: float
Thermodynamic Grand potential [Adim]
"""
return self.a0ad(roa, Tad)-roa*mu+Psat
def ci(self, T):
'''
ci(T)
Method that evaluates the polynomial for the influence parameters used
in the SGT theory for surface tension calculations.
Parameters
----------
T : float
absolute temperature [K]
Returns
-------
ci: float
influence parameters [J m5 mol-2]
'''
return np.polyval(self.cii, T)
def sgt_adim_fit(self, T):
a = self.a_eos(T)
b = self.b
Tfactor = R*b/a
Pfactor = b**2/a
rofactor = b
tenfactor = 1000*np.sqrt(a)/b**2*(np.sqrt(101325/1.01325)*100**3)
return Tfactor, Pfactor, rofactor, tenfactor
def sgt_adim(self, T):
'''
sgt_adim(T)
Method that evaluates adimentional factor for temperature, pressure,
density, tension and distance for interfacial properties computations
with SGT.
Parameters
----------
T : float
absolute temperature [K]
Returns
-------
Tfactor : float
factor to obtain dimentionless temperature (K -> adim)
Pfactor : float
factor to obtain dimentionless pressure (bar -> adim)
rofactor : float
factor to obtain dimentionless density (mol/cm3 -> adim)
tenfactor : float
factor to obtain dimentionless surface tension (mN/m -> adim)
zfactor : float
factor to obtain dimentionless distance (Amstrong -> adim)
'''
a = self.a_eos(T)
b = self.b
ci = self.ci(T)
Tfactor = R*b/a
Pfactor = b**2/a
rofactor = b
tenfactor = 1000*np.sqrt(a*ci)/b**2*(np.sqrt(101325/1.01325)*100**3)
zfactor = np.sqrt(a/ci*10**5/100**6)*10**-10
return Tfactor, Pfactor, rofactor, tenfactor, zfactor
def ares(self, V, T, D, B, C):
c1 = self.c1
c2 = self.c2
VCc1B = V + C + c1 * B
VCc2B = V + C + c2 * B
VCB = V + C - B
g = np.log(VCB / V)
f = (1. / (R*B*(c1 - c2))) *
|
np.log(VCc1B / VCc2B)
|
numpy.log
|
import os
import nengo
import nengo_dl
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential, Model
from keras import Input
from keras import layers, models
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten
from keras.regularizers import l2
from keras.optimizers import Adam, RMSprop
from keras.losses import sparse_categorical_crossentropy, categorical_crossentropy
from keras.callbacks import TensorBoard, Callback
import argparse
import logging
import json
import nni
from sklearn.preprocessing import MinMaxScaler
from scipy.signal import butter, freqz
from nni.tuner import Tuner
from nni.experiment import Experiment
from nni.algorithms.hpo.hyperopt_tuner import HyperoptTuner
from nni.tools.nnictl import updater, nnictl_utils
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = 'true'
seed = 0
os.environ['PYTHONHASHSEED'] = str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
rng =
|
np.random.RandomState(seed)
|
numpy.random.RandomState
|
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
import sys
import datetime
from scipy import interpolate, gradient, integrate
from ROSCO_toolbox.utilities import list_check
# Some useful constants
now = datetime.datetime.now()
pi = np.pi
rad2deg = np.rad2deg(1)
deg2rad = np.deg2rad(1)
rpm2RadSec = 2.0*(np.pi)/60.0
RadSec2rpm = 60/(2.0 * np.pi)
class Controller():
"""
Class Controller used to calculate controller tunings parameters
Methods:
-------
tune_controller
Parameters:
-----------
controller_params: dict
Dictionary containing controller paramaters that need to be defined
"""
def __init__(self, controller_params):
'''
Load controller tuning parameters from input dictionary
'''
print('-----------------------------------------------------------------------------')
print(' Tuning a reference wind turbine controller using NREL\'s ROSCO toolbox ')
# print(' Developed by <NAME> for collaborative research purposes. ')
print('-----------------------------------------------------------------------------')
# Controller Flags
self.LoggingLevel = controller_params['LoggingLevel']
self.F_LPFType = controller_params['F_LPFType']
self.F_NotchType = controller_params['F_NotchType']
self.IPC_ControlMode = controller_params['IPC_ControlMode']
self.VS_ControlMode = controller_params['VS_ControlMode']
self.PC_ControlMode = controller_params['PC_ControlMode']
self.Y_ControlMode = controller_params['Y_ControlMode']
self.SS_Mode = controller_params['SS_Mode']
self.WE_Mode = controller_params['WE_Mode']
self.PS_Mode = controller_params['PS_Mode']
self.SD_Mode = controller_params['SD_Mode']
self.Fl_Mode = controller_params['Fl_Mode']
self.Flp_Mode = controller_params['Flp_Mode']
# Necessary parameters
self.U_pc = list_check(controller_params['U_pc'], return_bool=False)
self.zeta_pc = list_check(controller_params['zeta_pc'], return_bool=False)
self.omega_pc = list_check(controller_params['omega_pc'], return_bool=False)
self.zeta_vs = controller_params['zeta_vs']
self.omega_vs = controller_params['omega_vs']
self.interp_type = controller_params['interp_type']
# Optional parameters with defaults
self.min_pitch = controller_params['min_pitch']
self.max_pitch = controller_params['max_pitch']
self.vs_minspd = controller_params['vs_minspd']
self.ss_vsgain = controller_params['ss_vsgain']
self.ss_pcgain = controller_params['ss_pcgain']
self.ss_cornerfreq = controller_params['f_ss_cornerfreq']
self.ps_percent = controller_params['ps_percent']
self.sd_cornerfreq = controller_params['sd_cornerfreq']
self.sd_maxpit = controller_params['sd_maxpit']
self.WS_GS_n = controller_params['WS_GS_n']
self.PC_GS_n = controller_params['PC_GS_n']
self.flp_maxpit = controller_params['flp_maxpit']
# Optional parameters without defaults
if self.Flp_Mode > 0:
try:
self.zeta_flp = controller_params['zeta_flp']
self.omega_flp = controller_params['omega_flp']
except:
raise Exception('ROSCO_toolbox:controller: zeta_flp and omega_flp must be set if Flp_Mode > 0')
if self.Fl_Mode > 0:
try:
self.twr_freq = controller_params['twr_freq']
self.ptfm_freq = controller_params['ptfm_freq']
except:
raise Exception('ROSCO_toolbox:controller: twr_freq and ptfm_freq must be set if Fl_Mode > 0')
# Kp_float direct setting
if 'Kp_float' in controller_params:
self.Kp_float = controller_params['Kp_float']
else:
self.Kp_float = 0
self.tune_Fl = controller_params['tune_Fl']
else:
self.twr_freq = 0
self.ptfm_freq = 0
# Use critical damping if LPFType = 2
if controller_params['F_LPFType'] == 2:
self.F_LPFDamping = 0.7
else:
self.F_LPFDamping = 0.0
# Error checking: number of breakpoints
if self.WS_GS_n <= self.PC_GS_n:
raise Exception('Number of WS breakpoints is not greater than pitch control breakpoints')
# Error checking: pitch controller inputs
if list_check(self.U_pc) and \
(list_check(self.omega_pc) or list_check(self.zeta_pc)) and \
not len(self.U_pc) == len(self.omega_pc) == len(self.zeta_pc):
raise Exception(
'U_pc, omega_pc, and zeta_pc are all list-like and are not of equal length')
def tune_controller(self, turbine):
"""
Given a turbine model, tune a controller based on the NREL generic controller tuning process
Parameters:
-----------
turbine : class
Turbine class containing necessary turbine information to accurately tune the controller.
"""
# -------------Load Parameters ------------- #
# Re-define Turbine Parameters for shorthand
J = turbine.J # Total rotor inertial (kg-m^2)
rho = turbine.rho # Air density (kg/m^3)
R = turbine.rotor_radius # Rotor radius (m)
Ar = np.pi*R**2 # Rotor area (m^2)
Ng = turbine.Ng # Gearbox ratio (-)
rated_rotor_speed = turbine.rated_rotor_speed # Rated rotor speed (rad/s)
# -------------Define Operation Points ------------- #
TSR_rated = rated_rotor_speed*R/turbine.v_rated # TSR at rated
# separate wind speeds by operation regions
# add one to above rated because we don't use rated in the pitch control gain scheduling
v_below_rated = np.linspace(turbine.v_min,turbine.v_rated, num=self.WS_GS_n-self.PC_GS_n)[:-1] # below rated
v_above_rated = np.linspace(turbine.v_rated,turbine.v_max, num=self.PC_GS_n+1) # above rated
v = np.concatenate((v_below_rated, v_above_rated))
# separate TSRs by operations regions
TSR_below_rated = [min(turbine.TSR_operational, rated_rotor_speed*R/v) for v in v_below_rated] # below rated
TSR_above_rated = rated_rotor_speed*R/v_above_rated # above rated
# TSR_below_rated = np.minimum(np.max(TSR_above_rated), TSR_below_rated)
TSR_op = np.concatenate((TSR_below_rated, TSR_above_rated)) # operational TSRs
# Find expected operational Cp values
Cp_above_rated = turbine.Cp.interp_surface(0,TSR_above_rated[0]) # Cp during rated operation (not optimal). Assumes cut-in bld pitch to be 0
Cp_op_br = np.ones(len(v_below_rated)) * turbine.Cp.max # below rated
Cp_op_ar = Cp_above_rated * (TSR_above_rated/TSR_rated)**3 # above rated
Cp_op = np.concatenate((Cp_op_br, Cp_op_ar)) # operational CPs to linearize around
pitch_initial_rad = turbine.pitch_initial_rad
TSR_initial = turbine.TSR_initial
# initialize variables
pitch_op = np.empty(len(TSR_op))
dCp_beta = np.empty(len(TSR_op))
dCp_TSR = np.empty(len(TSR_op))
dCt_beta = np.empty(len(TSR_op))
dCt_TSR = np.empty(len(TSR_op))
Ct_op = np.empty(len(TSR_op))
# ------------- Find Linearized State "Matrices" ------------- #
for i in range(len(TSR_op)):
# Find pitch angle as a function of expected operating CP for each TSR
Cp_TSR = np.ndarray.flatten(turbine.Cp.interp_surface(turbine.pitch_initial_rad, TSR_op[i])) # all Cp values for a given tsr
Cp_maxidx = Cp_TSR.argmax()
Cp_op[i] = np.clip(Cp_op[i], np.min(Cp_TSR[Cp_maxidx:]), np.max(Cp_TSR[Cp_maxidx:])) # saturate Cp values to be on Cp surface # Find maximum Cp value for this TSR
f_cp_pitch = interpolate.interp1d(Cp_TSR[Cp_maxidx:],pitch_initial_rad[Cp_maxidx:]) # interpolate function for Cp(tsr) values
# expected operation blade pitch values
if v[i] <= turbine.v_rated and isinstance(self.min_pitch, float): # Below rated & defined min_pitch
pitch_op[i] = min(self.min_pitch, f_cp_pitch(Cp_op[i]))
elif isinstance(self.min_pitch, float):
pitch_op[i] = max(self.min_pitch, f_cp_pitch(Cp_op[i]))
else:
pitch_op[i] = f_cp_pitch(Cp_op[i])
dCp_beta[i], dCp_TSR[i] = turbine.Cp.interp_gradient(pitch_op[i],TSR_op[i]) # gradients of Cp surface in Beta and TSR directions
dCt_beta[i], dCt_TSR[i] = turbine.Ct.interp_gradient(pitch_op[i],TSR_op[i]) # gradients of Cp surface in Beta and TSR directions
# Thrust
Ct_TSR = np.ndarray.flatten(turbine.Ct.interp_surface(turbine.pitch_initial_rad, TSR_op[i])) # all Cp values for a given tsr
f_ct = interpolate.interp1d(pitch_initial_rad,Ct_TSR)
Ct_op[i] = f_ct(pitch_op[i])
Ct_op[i] = np.clip(Ct_op[i],
|
np.min(Ct_TSR)
|
numpy.min
|
import numpy as np
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
Y = np.array([1, 1, 1, 2, 2, 2])
Xtest =
|
np.array([[-1, -0.5], [-1.5, -1], [-2.5, -2], [0.5, 1], [1.7, 1], [3, 2]])
|
numpy.array
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import copy
import os
import numpy as np
from collections import Counter, OrderedDict
from tfbldr.datasets import notes_to_midi
from tfbldr.datasets import midi_to_notes
from functools import reduce
basedir = "/u/kastner/music_npz_jos"
"""
cnt = Counter()
for fnpz in sorted(os.listdir(basedir)):
print(fnpz)
d = np.load(basedir + os.sep + fnpz)
if len(d['centered']) < 1:
print(fnpz + " had zero length")
continue
for mi in range(len(d['centered'])):
measure = d['centered'][mi]
cnt.update(measure.ravel())
"""
all_filenames = []
all_measurenums = []
all_piano_rolls = []
all_pitch_duration = []
all_functional_notes = []
all_functional_notes_idx = []
all_functional_notes_kv = []
all_functional_voicings_idx = []
all_functional_voicings_kv = []
all_keyframes = []
all_chords_names = []
all_indexed = []
all_absolutes = []
all_keys = []
all_modes = []
all_scalenotes = []
for fnpz in sorted(os.listdir(basedir)):
print(fnpz)
try:
d = np.load(basedir + os.sep + fnpz)
except:
print("Unable to load {}, continuing".format(fnpz))
if len(d["centered"]) < 1 or 'keyname' not in d:
print(fnpz + " had zero length or no key")
continue
prs = copy.deepcopy(d["piano_rolls"])
pds = copy.deepcopy(d["pitch_duration"])
loaded_chords_names = copy.deepcopy(d["chords_names"])
key = d["keyname"]
mode = d["keymode"]
# last note is octave of the root, skip it
notes = d["keynotes"][:-1]
assert sorted(list(set(d["keynotes"]))) == sorted(list(notes))
scale_lu = {}
scale_lu["R"] = 0
ordered_scale = ["R"]
counter = 1
for octave in ["1", "2", "3", "4", "5"]:
for note in notes:
ordered_scale.append(note + octave)
scale_lu[note + octave] = counter
counter += 1
norm_lu = {v: k for k, v in scale_lu.items()}
notes_lu = {os: notes_to_midi([[os]])[0][0] for os in ordered_scale}
notes_lu["R"] = notes_to_midi([["R"]])[0][0]
midi_lu = {v: k for k, v in notes_lu.items()}
filename = fnpz
keyframe_lu = {v: k for k, v in enumerate(np.arange(-13, 14 + 1))}
diff_lu = {v: k for k, v in keyframe_lu.items()}
piano_rolls = []
pitch_duration = []
measurenums = []
keyframes = []
indexed = []
absolutes = []
keys = []
modes = []
chords_names = []
scalenotes = []
functional_notes = []
functional_notes_idx = []
functional_voicings_idx = []
func_notes_lu = {}
func_notes_lu["R"] = 0
# R is always in the lowest voicing -> R0
counter = 1
notes = [n for n in notes]
for n1 in notes + ["R"]:
for n2 in notes + ["R"]:
for n3 in notes + ["R"]:
for n4 in notes + ["R"]:
# hack to represent it in the form we get from midi_to_notes
# basically changing E-3 -> Eb3 , etc
if n1 != "R":
nnn1 = midi_to_notes(notes_to_midi([[n1 + octave]]))[0][0][:-1]
else:
nnn1 = n1
if n2 != "R":
nnn2 = midi_to_notes(notes_to_midi([[n2 + octave]]))[0][0][:-1]
else:
nnn2 = n2
if n3 != "R":
nnn3 = midi_to_notes(notes_to_midi([[n3 + octave]]))[0][0][:-1]
else:
nnn3 = n3
if n4 != "R":
nnn4 = midi_to_notes(notes_to_midi([[n4 + octave]]))[0][0][:-1]
else:
nnn4 = n4
func_notes_lu[tuple([nnn1, nnn2, nnn3, nnn4])] = counter
counter += 1
func_voicings_lu = {}
# hardcode for 4 voices for now
count = 0
for o1 in [0, 1, 2, 3, 4, 5]:
for o2 in [0, 1, 2, 3, 4, 5]:
for o3 in [0, 1, 2, 3, 4, 5]:
for o4 in [0, 1, 2, 3, 4, 5]:
oo = [o1, o2, o3, o4]
nz = [ooi for ooi in oo if ooi != 0]
# can only be an ordering with at least 2
if len(nz) == 0 or len(nz) == 1:
func_voicings_lu[tuple(oo)] = count
count += 1
else:
rr = range(len(nz))
ordered = True
maxv = 5
for i in rr:
if nz[i] <= maxv:
maxv = nz[i]
else:
ordered = False
# allow voice crossing in the middle 2 voices?
if ordered:
func_voicings_lu[tuple(oo)] = count
count += 1
inv_func_voicings = {v: k for k, v in func_voicings_lu.items()}
last_non_rest = [0, 0, 0, 0]
for n in range(len(prs)):
# key and mode delta normalized repr
# 0 rest, 1:28 is [-13, 14]
pr_i = prs[n]
pd_i = pds[n]
chords_names_i = loaded_chords_names[n]
if len(set([lcn for lcn in loaded_chords_names[n]])) != 1:
print("got multiple chords")
from IPython import embed; embed(); raise ValueError()
if pr_i.shape[-1] != 4:
#print("3 voices, skip for now")
continue
if pr_i.shape[0] != 48:
new_pr_i = np.zeros((48, pr_i.shape[-1]))
if pr_i.shape[0] == 32:
# 32 into 48 is 4 into 6
ii = 0
oi = 0
while True:
nt = pr_i[ii:ii + 4]
for v in range(pr_i.shape[-1]):
if len(np.unique(nt[:, v])) != 1:
if len(np.unique(nt[:, v])) == 2:
mn = np.min(nt[:, v])
mx = np.max(nt[:, v])
if np.sum(nt[:, v] == mn) == 2:
nt[:, v] = mn
else:
nt[:, v] = mx
else:
print("note changed :|")
from IPython import embed; embed(); raise ValueError()
new_pr_i[oi:oi + 6] = nt[0][None] # ii:ii + 3 all forced the same above
oi = oi + 6
ii = ii + 4
if ii >= 32:
break
pr_i = new_pr_i
else:
#print("not length 48, needs normalization")
continue
loop_reset = False
for unote in np.unique(pr_i):
if unote not in midi_lu:
#print("note not in key!")
last_non_rest = [0, 0, 0, 0]
loop_reset = True
break
if loop_reset:
continue
for v in range(pr_i.shape[-1]):
non_rest = pr_i[pr_i[:, v] > 0, v]
if last_non_rest[v] == 0:
if len(non_rest) > 0:
last_non_rest[v] = scale_lu[midi_lu[non_rest[0]]]
func = midi_to_notes(pr_i)
func_notes = copy.deepcopy(func)
func_notes_i = np.zeros_like(pr_i[:, 0])
func_voicings_i = np.zeros_like(pr_i[:, 0])
"""
loop_reset = False
for iii in range(len(pr_i)):
fvi = [int(fi[-1]) if fi != "R" else 0 for fi in func[iii]]
if tuple(fvi) not in func_voicings_lu:
print("unknown voicing {}".format(fvi))
loop_reset = True
break
fni = [fi[:-1] if fi != "R" else "R" for fi in func[iii]]
fni_idx = func_notes_lu[tuple(fni)]
fvi_idx = func_voicings_lu[tuple(fvi)]
func_notes_i[iii] = fni_idx
func_voicings_i[iii] = fvi_idx
if loop_reset:
continue
"""
# put the scale notes in
out = np.zeros_like(pr_i)
for unote in np.unique(pr_i):
out[pr_i == unote] = scale_lu[midi_lu[unote]]
absolute = copy.deepcopy(out)
# calculate their offset relative to the keyframe
# should become - whatever , 0 rest, 1 whatever where 1 is "same as keyframe"
for v in range(pr_i.shape[-1]):
subs = out[out[:, v] > 0, v]
# shift the positive ones up, the others will be negative
subs[subs >= last_non_rest[v]] += 1
subs -= last_non_rest[v]
out[out[:, v] > 0, v] = subs
loop_reset = False
for uni in np.unique(out):
if uni not in keyframe_lu:
#print("note not in key!")
last_non_rest = [0, 0, 0, 0]
loop_reset = True
break
if loop_reset:
continue
# finally, give each of these an index value so we can softmax it
final = np.zeros_like(pr_i)
for uni in np.unique(out):
final[out == uni] = keyframe_lu[uni]
indexed.append(final)
keyframes.append(copy.deepcopy(last_non_rest))
chords_names.append(chords_names_i)
measurenums.append(n)
absolutes.append(absolute)
piano_rolls.append(pr_i)
pitch_duration.append(pd_i)
functional_notes.append(func_notes)
functional_notes_idx.append(func_notes_i)
functional_voicings_idx.append(func_voicings_i)
for v in range(pr_i.shape[-1]):
non_rest = pr_i[pr_i[:, v] > 0, v]
if len(non_rest) > 0:
last_non_rest[v] = scale_lu[midi_lu[non_rest[-1]]]
filenames = [fnpz] * len(indexed)
keys = [key] * len(indexed)
modes = [mode] * len(indexed)
scalenotes = [notes] * len(indexed)
if len(keyframes) > 0:
all_piano_rolls.append(piano_rolls)
all_pitch_duration.append(pitch_duration)
all_indexed.append(indexed)
all_keyframes.append(keyframes)
all_chords_names.append(chords_names)
all_absolutes.append(absolutes)
all_functional_notes.append(functional_notes)
all_functional_notes_idx.append(functional_notes_idx)
all_functional_notes_kv.append([(k, v) for k, v in func_notes_lu.items()])
all_functional_voicings_idx.append(functional_voicings_idx)
all_functional_voicings_kv.append([(k, v) for k, v in func_voicings_lu.items()])
all_measurenums.append(measurenums)
all_filenames.append(filenames)
all_keys.append(keys)
all_modes.append(modes)
all_scalenotes.append(scalenotes)
final = {}
final["piano_rolls"] = all_piano_rolls
final["pitch_duration"] = all_pitch_duration
final["indexed"] = all_indexed
final["absolutes"] = all_absolutes
final["keyframes"] = all_keyframes
final["chords_names"] = all_chords_names
final["functional_notes"] = all_functional_notes
final["functional_notes_idx"] = all_functional_notes_idx
final["functional_notes_kv"] = all_functional_notes_kv
final["functional_voicings_idx"] = all_functional_voicings_idx
final["functional_voicings_kv"] = all_functional_voicings_kv
final["measurenums"] = all_measurenums
final["filenames"] = all_filenames
final["keys"] = all_keys
final["modes"] = all_modes
final["scalenotes"] = all_scalenotes
|
np.savez("music_data_1d.npz", **final)
|
numpy.savez
|
"""
Galaxy Size Distributions
=========================
This example demonstrate how to sample sizes for early and late type galaxies
in SkyPy.
"""
# %%
# Size-Magnitude Relation
# -------------------------
#
# In Shen et al. 2003 [1]_, the observed sizes, :math:`R`, of galaxies
# were shown to follow simple analytic relations as a function of their absolute
# magnitudes, :math:`M`.
# For early-type galaxies, their mean radius follows Equation 14:
#
# .. math::
#
# \log_{10} (\bar{R}/{\rm kpc}) = -0.4aM + b,
#
# with :math:`a` and :math:`b` fitting constants. Likewise, late-type galaxies
# follow Equation 15:
#
# .. math::
#
# \log_{10}(\bar{R}/{\rm kpc})=-0.4\alpha M+
# (\beta -\alpha)\log \left[1+10^{-0.4(M-M_0)}\right]+\gamma \, .
#
# The dispersion on these relations is given by Equation 16:
#
# .. math::
#
# \sigma_{ln R} = \sigma_2 + \frac{\sigma_1 - \sigma_2}{1 + 10^{-0.8(M - M_0)}}
#
# where :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\sigma_1`, :math:`\sigma_2` and
# :math:`M_0` are fitting parameters.
#
# In SkyPy, we can sample physical sizes for each galaxy type from lognormal distributions,
# with median :math:`\bar{R}` and width :math:`\sigma_{ln R}`, using the functions
# :func:`skypy.galaxies.morphology.early_type_lognormal_size()` and
# :func:`skypy.galaxies.morphology.late_type_lognormal_size()`.
#
# In this example, we simulate the sizes of galaxies with random magnitudes using the
# values for the parameters
# given in Shen et al. 2003 Table 1 [1]_ :
import numpy as np
import matplotlib.pyplot as plt
from skypy.galaxies.morphology import (early_type_lognormal_size,
late_type_lognormal_size)
# Parameters for the late-type and early-type galaxies
alpha, beta, gamma = 0.21, 0.53, -1.31
a, b = 0.6, -4.63
M0 = -20.52
sigma1, sigma2 = 0.48, 0.25
# SkyPy late sample
M_late = np.random.uniform(-16, -24, size=10000)
R_late = late_type_lognormal_size(M_late, alpha, beta, gamma, M0, sigma1, sigma2).value
# SkyPy early sample
M_early = np.random.uniform(-18, -24, size=10000)
R_early = early_type_lognormal_size(M_early, a, b, M0, sigma1, sigma2).value
# %%
# Validation against SDSS Data
# ----------------------------
# Here we reproduce Figure 4 from [1]_, comparing our simulated galaxy sizes
# against observational data from SDSS. You can download the data files for
# :download:`early-type <../../../examples/galaxies/Shen+03_early.txt>` and
# :download:`late-type <../../../examples/galaxies/Shen+03_late.txt>` SDSS
# galaxies which have the following columns: magnitudes, median radius, minus
# error, and plus error.
# Load data from figure 4 in Shen et al 2003
sdss_early = np.loadtxt('Shen+03_early.txt')
sdss_late = np.loadtxt('Shen+03_late.txt')
error_late = (sdss_late[:, 2], sdss_late[:, 3])
error_early = (sdss_early[:, 2], sdss_early[:, 3])
# Bins for median radii
M_bins_late = np.arange(-16, -24.1, -0.5)
M_bins_early = np.arange(-18, -24.1, -0.5)
# Center bins
center_late = (M_bins_late[:-1] + M_bins_late[1:]) / 2
center_early = (M_bins_early[:-1] + M_bins_early[1:]) / 2
# Median sizes for SkyPy late- and early-type galaxies
R_bar_early = [np.median(R_early[(M_early <= Ma) & (M_early > Mb)])
for Ma, Mb in zip(M_bins_early, M_bins_early[1:])]
R_bar_late = [
|
np.median(R_late[(M_late <= Ma) & (M_late > Mb)])
|
numpy.median
|
# The script is used to perform analysis of phtotluminescence spectra of nanoscale Si measured by
# Perkin Elmer FL 8500 or 6500 using Spectrum FL Software
# (https://www.perkinelmer.com/product/fl-8500-with-spectrum-fl-software-n4200030).
# The measurement can be done with powder or liquid samples, with differnect filters to
# get rid of excitation light in the emission spectra. The script combines the data measured in 3D mode,
# i.e. emission spectra is measured for each excitation wavelength. Depending on the excitation wavelengths
# and emission filters used the script combines the spectra into one graph. For example, consider the same
# sample was first measured with excitation wavelegths from 300 to 400 nm and emission filter at 430 nm, and
# then measured with excitation wavelegths from 400 to 500 nm with emission filter at 515 nm. Then the script
# will combine those measurements into one and plot relevant graphs for the combined data.
# Script works by setting sample id and the folder, where folders with measurements are located.
# These folders must start with sample id followed by '_' character with additional measurement
# description. The folder contain the filter wavelength in nm somewhere after the '_' character.
# The folder may end with '_' followed by measurement index is case the measurement was repeated.
# However there is no way to select the exact measurement repeat, and it is selection is determined
# by the directory search function glob.glob().
import argparse
import chardet
import glob
import os
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlsxwriter
from matplotlib import cm
from matplotlib.ticker import LinearLocator
def load_csv(meas_folder: str, encoding: str) -> pd.DataFrame:
'''Loads the csv to pandas dataframe.'''
csv_path = glob.glob(os.path.join(meas_folder, 'Administrator*'))[0]
# get file encoding
if encoding == '':
with open(csv_path, 'rb') as raw:
encoding = chardet.detect(raw.read())
encoding = encoding['encoding']
# open file and replace , with .
with open(csv_path, 'r', encoding=encoding) as f:
csv = f.read().replace(',', '.')
with open(csv_path, 'w', encoding=encoding) as f:
f.write(csv)
# get dataframe
meas_df = pd.read_csv(csv_path, sep=';', skiprows=1, encoding=encoding)
meas_df.drop(meas_df.columns[len(meas_df.columns)-1], axis=1, inplace=True)
meas_df.astype(float)
return meas_df
def get_sample_data(measure_dir: str,
sample_id: str,
emission_filters: list,
excitation_wavelengths: list,
encoding: str) -> list:
'''Get sample data for sample with sample_id, excitation wavelengths and emission filters'''
# get all folders with specified
all_sample_paths = [x for x in glob.glob(os.path.join(measure_dir, sample_id + '*')) if os.path.isdir(x)]
print(all_sample_paths)
if not all_sample_paths:
print('error: sample with specified id was not found: ' + sample_id)
return
# loop through emission filters and sample paths to
# and select ine measurement for each filter and excitation range
x_nm = []
sample_data = []
sample_excit_wls = []
if emission_filters:
# if there are emission filters, select measurement for each folder
for i, ef in enumerate(emission_filters):
meas_path = ''
for path in all_sample_paths:
if str(ef) in path:
meas_path = path
if meas_path == '':
# no measurement with such filter found
print('error: no measurement for specified emission filter was found: ' + str(ef) + ' nm')
return
# load the sample data into dataframe
print(f'info: using measurement {meas_path} for emission filter {ef} nm and range {excitation_wavelengths[i]}')
meas_df = load_csv(meas_path, encoding)
# select the first column which is wavelength in nm
x_nm = meas_df.iloc[:, 0].to_numpy()
# get excitation wavelengths from the column
meas_excit_wls = np.array([float(x.strip(')').strip('INT(')) for x in list(meas_df.columns[1:])])
meas_data = meas_df.iloc[:,1:].to_numpy()
excitation_filter_mask = ((meas_excit_wls >= excitation_wavelengths[i][0]) & (meas_excit_wls < excitation_wavelengths[i][1]))
meas_data = meas_data[:, excitation_filter_mask]
meas_excit_wls = meas_excit_wls[excitation_filter_mask]
if len(sample_data) == 0:
# sample data is empty make it not empty with meas data
sample_data = meas_data
sample_excit_wls = meas_excit_wls
else:
# sample data is not empty, so it can be joined with meas data
sample_data =
|
np.concatenate((sample_data, meas_data), axis=1)
|
numpy.concatenate
|
## Copyright (c) 2001-2012, <NAME>
## October 2012 (CSDMS Standard Names with BMI)
## January 2009 (converted from IDL)
## May, July, August, October 2009
## May 2010 (changes to initialize() and read_cfg_file()
## NB! TF expects d8.codes vs. d8.flow_grid
#############################################################
## Note: channel_base.py calls many of these but then
## saves the results among its state variables.
#############################################################
## Note: It might be more clear to refer to flow_grids
## and flow_codes as d8_grids and d8_codes, etc.
## (e.g. d8_code_list, d8_code_map, d8_width_grid)
#############################################################
from numpy import *
import numpy
import os, os.path
import time
from . import BMI_base
from . import cfg_files as cfg
from . import pixels
from . import rtg_files
from . import tf_utils
from .model_output import *
#---------------------------------------------------------------------
#
# unit_test()
#
# class d8_base # (inherits from BMI_base)
#
# get_attribute()
# get_input_var_names()
# get_output_var_names()
# get_var_names()
# get_var_units()
#----------------------------------
# set_constants()
# initialize()
# update()
# read_cfg_file() #### not finished yet ####
# set_computed_input_vars()
# initialize_computed_vars()
#----------------------------------
# get_pixel_dimensions()
# get_flow_code_list()
# get_flow_code_list_opps()
# get_valid_code_map()
# get_ID_grid()
# get_parent_inc_map()
# get_edge_IDs()
# get_not_edge_grid()
#----------------------------------
# update_parent_ID_grid() # (can handle periodic BCs)
# update_parent_IDs() # (used by erosion_base.update_slope_grid())
# update_non_parent_IDs() # (not working or needed yet)
# update_flow_from_IDs()
# update_flow_to_IDs()
# update_noflow_IDs()
#----------------------------------
# read_flow_grid()
# update_flow_grid()
# start_new_flow_grid()
# resolve_array_cycle()
# get_resolve_array()
# break_flow_grid_ties()
# link_flats()
#----------------------------------
# update_flow_width_grid()
# update_flow_length_grid()
# update_area_grid() # (added on 10/28/09)
# update_area_grid_OLD() # (can't handle periodic BCs)
#----------------------------------
# get_flow_width_grid() # OBSOLETE ?
# get_flow_length_grid() # OBSOLETE ?
#-----------------------------------------------------------------------
def unit_test(SILENT=False, REPORT=True):
#---------------------------------------------------------------
# NOTE! The tests will appear to fail if the existing flow
# grid used for comparison was computed using a flat
# resolution method other than "Iterative linking".
#
# The KY_Sub and Beaver DEMs were processed using RiverTools
# 3.0 using the WGS_1984 ellipsoid model for computing lengths
# and areas. The "Iterative linking" method was used for both
# as the "Flat resolution method", to make them comparable to
# the ones generated by functions in d8_base.py and
# fill_pits.py. Older version of these data sets used other
# methods and can't be compared directly.
#
# Make sure that LINK_FLATS=True, LR_PERIODIC=False, and
# TB_PERIODIC=False in CFG file.
#---------------------------------------------------------------
start = time.time()
#------------------------------------------------------
# Example of DEM with fixed-angle pixels (Geographic)
# min(da) = 6802.824074169645 [m^2]
# max(da) = 6837.699120083246 [m^2]
# min(A) = 0.000000000000 [km^2]
# max(A) = 807.063354492188 [km^2]
#------------------------------------------------------
## directory = '/Applications/Erode/Data/KY_Sub2/'
## site_prefix = 'KY_Sub'
## case_prefix = 'Test1'
#------------------------------------------------
# Example of DEM with fixed-length pixels (UTM)
# min(da) = 900.000 [m^2]
# max(da) = 900.000 [m^2]
# min(A) = 0.000000000000 [km^2]
# max(A) = 681.914184570312 [km^2]
#------------------------------------------------
## directory = '/Applications/Erode/Data/Beaver2/'
## site_prefix = 'Beaver'
## case_prefix = 'Test1'
#-----------------------------------------
# This function adjusts for the platform
# and can be changed in "tf_utils.py".
#-------------------------------------------------
# NOTE: The Treynor_Iowa DEM has no depressions!
#-------------------------------------------------
## in_directory = tf_utils.TF_Test_Directory()
d8 = d8_component()
d8.CCA = False
d8.DEBUG = True # (check flow and area grid against existing)
d8.SILENT = SILENT
d8.REPORT = REPORT
cfg_prefix = 'Test1'
d8.site_prefix = 'KY_Sub'
d8.initialize(cfg_prefix=cfg_prefix, mode="driver",
SILENT=SILENT, REPORT=REPORT)
d8.A_units = 'km^2' # (override setting in CFG file)
d8.update(SILENT=SILENT, REPORT=REPORT)
print('grid nx =', d8.nx)
print('grid ny =', d8.ny)
print('Run time =', (time.time() - start), ' [secs]')
print('Finished with unit_test().')
print(' ')
# unit_test()
#---------------------------------------------------------------------
class d8_component(BMI_base.BMI_component):
#-------------------------------------------------------------------
_att_map = {
'model_name': 'd8_component class', ###########
'version': '3.1',
'author_name': '<NAME>',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit', ##### or 'none' ??
#------------------------------------------------------
'comp_name': 'TF_D8_component',
'model_family': 'TopoFlow',
'cfg_template_file': 'None',
'cfg_extension': '_d8.cfg',
'cmt_var_prefix': 'None',
'gui_xml_file': 'None',
'dialog_title': 'None',
'time_units': 'seconds' }
_input_var_names = ['land_surface__elevation']
#------------------------------------------
# Maybe use "step" instead of "increment"
# and/or incorporate "grid_cell".
#------------------------------------------
_output_var_names = [
'land_surface__d8_total_contributing_area', ## 'A'
'land_surface__d8_flow_direction_code', ## 'flow_grid'
'land_surface__d8_flow_length_increment', ## 'ds'
'land_surface__d8_flow_width_increment' ] ## 'dw'
## 'ID_grid' = "grid_cell__row_major_id_number"
## 'ID_grid', 'parent_IDs', 'edge_IDs', 'noflow_IDs',
## 'w1', 'w2', 'w3', 'w4', 'w5', 'w6', 'w7', 'w8',
## 'p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8'
## nx, ny, dx, dy, dd, da
_var_name_map = {
'land_surface__elevation':'DEM',
'land_surface__d8_total_contributing_area':'area_grid', ## CHECK
'land_surface__d8_flow_direction_code':'flow_grid',
'land_surface__d8_flow_length_increment':'ds',
'land_surface__d8_flow_width_increment':'dw' }
_var_units_map = {
'land_surface__elevation':'m',
'land_surface__d8_total_contributing_area':'m2', ## CHECK
'land_surface__d8_flow_direction_code':'1',
'land_surface__d8_flow_length_increment':'m',
'land_surface__d8_flow_width_increment':'m' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = numpy.array( _input_var_names )
## _output_var_names = numpy.array( _output_var_names )
#-------------------------------------------------------------------
def get_attribute(self, att_name):
#----------------
# New. 10/26/11
#----------------
## map = {'comp_name': 'TF_D8_component',
## 'version': '3.1',
## 'model_name': 'd8_component class',
## 'model_family': 'TopoFlow',
## 'cfg_template_file': 'None',
## 'cfg_extension': '_d8.cfg',
## 'cmt_var_prefix': 'None',
## 'gui_xml_file': 'None',
## 'dialog_title': 'None',
## 'time_step_type': 'fixed',
## 'time_units': 'seconds',
## 'grid_type': 'uniform',
## 'author_name': '<NAME>'}
try:
return self._att_map[ att_name.lower() ]
except:
print('###################################################')
print(' ERROR: Could not find attribute: ' + att_name)
print('###################################################')
print(' ')
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
def set_constants(self):
#------------------------
# Define some constants
#------------------------
self.dt = 1.0 # (needs to be defined)
self.nodata = -float32(9999)
# set_constants()
#-----------------------------------------------------------------
def initialize(self, cfg_prefix='Case1', mode="nondriver",
SILENT=False, REPORT=True):
#--------------------------------------------------------
# Note: This function calls functions that compute a
# variety of D8 variables and saves them in its
# state. This "d8 state" can be embedded within
# another class such as the "channels_base" or
# "erosion_base" class.
#--------------------------------------------------------
if not(SILENT):
print(' ')
print('D8 component: Initializing...')
## start_init = time.time()
self.status = 'initializing'
self.mode = mode
#------------------------------------------------------
# Note: A run_model() call or a driver's initialize()
# call calling initialize_config_vars() will set
# CWD to the location of CFG files.
#------------------------------------------------------
# Note: If directories and prefixes are not set in
# initialize_config_vars(), then they will
# default to CWD and cfg_prefix.
#------------------------------------------------------
cfg_extension = self.get_cfg_extension()
filename = cfg_prefix + cfg_extension
self.cfg_file = os.path.join( os.getcwd(), filename )
self.cfg_prefix = cfg_prefix
#-----------------------------------------------
# Load component parameters from a config file
#-----------------------------------------------
self.set_constants()
self.initialize_config_vars()
self.read_grid_info() # (also gets & stores self.da)
#-----------------------------------------
# This must come before "Disabled" test.
#-----------------------------------------
self.initialize_time_vars()
#--------------------------------------------
# Convert units for da from m^2 to km^2 ??
#--------------------------------------------
# Better to do this in "update_area_grid()"
# See "unit_test()".
#--------------------------------------------
## if ('km' in self.A_units.lower()):
## self.da = self.da / 1e6
#-----------------------------------------------
# These return values that don't depend on the
# flow grid and don't change, so they should
# simply be stored for subsequent use.
#-----------------------------------------------
# Values that do depend on the flow grid are
# computed by calls in the update() method.
#-----------------------------------------------
self.get_pixel_dimensions(SILENT=SILENT, REPORT=REPORT)
self.get_flow_code_list()
self.get_flow_code_list_opps()
self.get_ID_grid(SILENT=SILENT)
self.get_parent_inc_map()
self.get_edge_IDs(SILENT=SILENT)
self.get_not_edge_grid(SILENT=SILENT)
#-------------------------------------------------
self.get_resolve_array(SILENT=SILENT) ######
self.get_valid_code_map(SILENT=SILENT)
#-----------------------------
# Initialize dw, ds, A, etc.
#-----------------------------
self.initialize_computed_vars()
## self.initialize_required_components(mode) ##### NOT READY YET ###
self.status = 'initialized'
## finish_init = time.time()
## run_time = (finish_init - start_init)
## print 'Run time for initialize =', run_time, ' [secs]'
# initialize()
#-------------------------------------------------------------------
def update(self, time=None, DEM=None,
SILENT=True, REPORT=False):
self.status = 'updating' # (OpenMI 2.0 convention)
if not(SILENT):
print('D8 component: Updating...')
# if (self.mode == 'driver'):
# self.print_time_and_value(self.z_outlet, 'z_out', '[m]')
#-------------------------
# Update computed values
#-------------------------
## fill_pits.fill_pits_in_dem() ## pass DEM to here? ##
self.update_flow_grid(DEM, SILENT=SILENT, REPORT=REPORT)
self.update_parent_ID_grid()
self.update_parent_IDs() # (needed for gradients)
self.update_flow_from_IDs()
self.update_flow_to_IDs()
self.update_noflow_IDs()
# OLD_WAY = True # (Treynor run time = 0.11 seconds)
OLD_WAY = False # (Treynor run time = 0.092 seconds)
if not(OLD_WAY):
self.update_flow_width_grid(SILENT=SILENT, REPORT=REPORT)
self.update_flow_length_grid(SILENT=SILENT, REPORT=REPORT)
self.update_area_grid(SILENT=SILENT, REPORT=REPORT)
else:
self.get_flow_width_grid(SILENT=SILENT, REPORT=REPORT)
self.get_flow_length_grid(SILENT=SILENT, REPORT=REPORT)
#-------------------------------------------
# Read from files as needed to update vars
#-------------------------------------------
# if (self.time_index > 0):
# self.read_input_files()
#----------------------------------------------
# Use this for saving D8 flow and area grids
#------------------------------------------------
# Write user-specified data to output files ?
#------------------------------------------------
## self.write_output_files( time )
#------------------------
# Check computed values
#------------------------
## if (OK):
## self.status = 'updated' # (OpenMI 2.0 convention)
## else:
## self.status = 'failed'
## self.DONE = True
#------------------------
# Update internal clock
#------------------------
self.update_time()
self.status = 'updated' # (OpenMI 2.0 convention)
# update()
#-------------------------------------------------------------------
def get_cfg_extension(self):
return '_d8.cfg'
# get_cfg_extension()
#---------------------------------------------------------------------
def read_cfg_file(self):
#------------------------------------------
# Read parameters from a CFG file that is
# in the current working directory.
#------------------------------------------
print('D8 component: Reading config file...')
file_unit = open(self.cfg_file, 'r')
#-------------------------
# Skip over header lines
#-------------------------
cfg.skip_header( file_unit, n_lines=4 )
#------------------------
# Read the channel vars
#------------------------
self.method = cfg.read_value( file_unit, dtype='int16' )
self.method_name = cfg.read_value( file_unit, dtype='string' )
#---------------------------------------------------------------
dt_info = cfg.read_input_option( file_unit )
# self.dt_type = dt_info[0]
self.dt = dt_info[1]
#-----------------------------------------------------------
# Read: LR_PERIODIC, TB_PERIODIC, DEM_file_name, etc.
#-----------------------------------------------------------
self.DEM_file = cfg.read_value(file_unit, dtype='string')
self.A_units = cfg.read_value(file_unit, dtype='string')
self.A_units = self.A_units.lower()
self.LINK_FLATS = cfg.read_value(file_unit, dtype='boolean')
self.LR_PERIODIC = cfg.read_value(file_unit, dtype='boolean')
self.TB_PERIODIC = cfg.read_value(file_unit, dtype='boolean')
#---------------------------------------------------------------
if (self.LR_PERIODIC and self.TB_PERIODIC):
self.ALL_PERIODIC = True
#--------------------------------------------------------------------
save_grid_dt_info = cfg.read_input_option( file_unit )
save_area_grids, area_gs_file = cfg.read_output_option( file_unit )
save_code_grids, code_gs_file = cfg.read_output_option( file_unit )
save_ds_grids, ds_gs_file = cfg.read_output_option( file_unit )
save_dw_grids, dw_gs_file = cfg.read_output_option( file_unit )
#--------------------------------------------------------------------
self.save_grid_dt = save_grid_dt_info[1]
self.SAVE_AREA_GRIDS = save_area_grids
self.SAVE_CODE_GRIDS = save_code_grids
self.SAVE_DS_GRIDS = save_ds_grids
self.SAVE_DW_GRIDS = save_dw_grids
self.area_gs_file = area_gs_file
self.code_gs_file = code_gs_file
self.ds_gs_file = ds_gs_file
self.dw_gs_file = dw_gs_file
#-----------------------------------------------------------------------
save_pixels_dt_info = cfg.read_input_option( file_unit )
save_area_pixels, area_ts_file = cfg.read_output_option( file_unit )
save_code_pixels, code_ts_file = cfg.read_output_option( file_unit )
save_ds_pixels, ds_ts_file = cfg.read_output_option( file_unit )
save_dw_pixels, dw_ts_file = cfg.read_output_option( file_unit )
#-----------------------------------------------------------------------
self.save_pixels_dt = save_pixels_dt_info[1]
self.SAVE_AREA_PIXELS = save_area_pixels
self.SAVE_CODE_PIXELS = save_code_pixels
self.SAVE_DS_PIXELS = save_ds_pixels
self.SAVE_DW_PIXELS = save_dw_pixels
self.code_ts_file = code_ts_file
self.area_ts_file = area_ts_file
self.ds_ts_file = ds_ts_file
self.dw_ts_file = dw_ts_file
#-----------------------
# Close the config file
#-----------------------
file_unit.close()
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = maximum(self.save_pixels_dt, self.dt)
# read_cfg_file()
#-------------------------------------------------------------------
def set_computed_input_vars(self):
self.ALL_PERIODIC = (self.LR_PERIODIC and self.TB_PERIODIC)
#---------------------------------------------------------
# Make sure that all "save_dts" are larger or equal to
# the specified process dt. There is no point in saving
# results more often than they change.
# Issue a message to this effect if any are smaller ??
#---------------------------------------------------------
self.save_grid_dt = maximum(self.save_grid_dt, self.dt)
self.save_pixels_dt = maximum(self.save_pixels_dt, self.dt)
# set_computed_input_vars()
#-------------------------------------------------------------------
def embed_child_components(self):
#------------------------------------------------
# Instantiate and embed "process components"
# in the place of the CCA ports.
#------------------------------------------------
import erosion_base
self.ep = erosion_base.erosion_component()
# embed_child_components()
#-------------------------------------------------------------------
def add_child_ports(self):
#-------------------------------------------------------
# Note: Erosion component does not have any ports yet.
#-------------------------------------------------------
pass
# add_child_ports()
#-------------------------------------------------------------------
def initialize_ports(self):
#-------------------------------------------------
# Initialize the process objects/components
# This is also where output files are opened.
#-------------------------------------------------
DEBUG = True
if (self.ep.get_status() != 'initialized'): # erosion vars
self.ep.initialize( sync_file=self.sync_file )
if (DEBUG): print('\nD8 component initialized EROSION.')
# initialize_ports()
#---------------------------------------------------------------------
def initialize_computed_vars(self, DOUBLE=False):
nx = self.nx # (Local synonyms)
ny = self.ny
if (DOUBLE):
self.dw = zeros([ny, nx], dtype='Float64')
self.ds = zeros([ny, nx], dtype='Float64')
self.A = zeros([ny, nx], dtype='Float64')
else:
self.dw = zeros([ny, nx], dtype='Float32')
self.ds = zeros([ny, nx], dtype='Float32')
self.A = zeros([ny, nx], dtype='Float32')
# initialize_computed_vars()
#---------------------------------------------------------------------
def get_pixel_dimensions(self, DOUBLE=False,
SILENT=True, REPORT=False):
if not(SILENT):
print('Computing pixel dimensions...')
dx, dy, dd = pixels.get_sizes_by_row(self.rti, METERS=True)
self.dx = dx
self.dy = dy
self.dd = dd
if not(DOUBLE):
self.dx = float32(self.dx)
self.dy = float32(self.dy)
self.dd = float32(self.dd)
#------------------------------------------------
# Get grid cell areas, "da", which is either a
# scalar (if same for all grid cells) or a grid
#------------------------------------------------
# self.da = pixels.get_da( self.rti )
#---------------------------------------
# Note that "da" is saved into self by
# self.read_grid_info().
#---------------------------------------
if (REPORT):
w = 8
dx_str = str(self.dx.min()).ljust(w) + ', '
dx_str += str(self.dx.max()).ljust(w) + ' [m]'
print(' min(dx), max(dx) = ' + dx_str)
#--------------------------------------------------
dy_str = str(self.dy.min()).ljust(w) + ', '
dy_str += str(self.dy.max()).ljust(w) + ' [m]'
print(' min(dy), max(dy) = ' + dy_str)
#--------------------------------------------------
dd_str = str(self.dd.min()).ljust(w) + ', '
dd_str += str(self.dd.max()).ljust(w) + ' [m]'
print(' min(dd), max(dd) = ' + dd_str)
#--------------------------------------------------
da_str = str(self.da.min()).ljust(w) + ', '
da_str += str(self.da.max()).ljust(w) + ' [m^2]'
print(' min(da), max(da) = ' + da_str)
## print ' min(dx) =', self.dx.min(), ' [m]'
## print ' max(dx) =', self.dx.max(), ' [m]'
## #------------------------------------------------
## print ' min(dy) =', self.dy.min(), ' [m]'
## print ' max(dy) =', self.dy.max(), ' [m]'
## #------------------------------------------------
## print ' min(dd) =', self.dd.min(), ' [m]'
## print ' max(dd) =', self.dd.max(), ' [m]'
## #------------------------------------------------
## print ' min(da) =', self.da.min(), ' [m^2]'
## print ' max(da) =', self.da.max(), ' [m^2]'
# get_pixel_dimensions()
#---------------------------------------------------------------------
def get_flow_code_list(self, ARC=False):
#-------------------------------------------
# Notes: RT flow codes = | 64 128 1 |
# | 32 x 2 |
# | 16 8 4 |
# ARC/INFO codes = | 32 64 128 |
# | 16 x 1 |
# | 8 4 2 |
#-------------------------------------------
if not(ARC):
self.code_list = int16([1, 2, 4, 8, 16, 32, 64, 128])
else:
self.code_list = int16([128, 1, 2, 4, 8, 16, 32, 64])
# get_flow_code_list()
#---------------------------------------------------------------------
def get_flow_code_list_opps(self, ARC=False):
if not(ARC):
self.code_opps = int16([16, 32, 64, 128, 1, 2, 4, 8])
else:
self.code_opps = int16([8, 16, 32, 64, 128, 1, 2, 4])
# get_flow_code_list_opps()
#---------------------------------------------------------------------
def get_valid_code_map(self, SILENT=True):
#----------------------------------------------------------------
# Notes: This map is used near the end of update_flow_grid()
# to set any invalid flow code to 0, which signifies
# the the flow direction for that grid cell is undefined.
#----------------------------------------------------------------
self.valid_code_map = zeros([256], dtype='UInt8')
self.valid_code_map[ self.code_list ] = numpy.uint8( self.code_list )
# get_valid_code_map()
#---------------------------------------------------------------------
def get_ID_grid(self, SILENT=True):
#-----------------------------------------------------
# Get a grid which for each grid cell contains the
# calendar-style index (or ID) of that grid cell.
#-----------------------------------------------------
if not(SILENT):
print('Computing pixel IDs...')
nx = self.nx
ny = self.ny
self.ID_grid = reshape(arange(nx*ny, dtype='Int32'), [ny, nx])
# get_ID_grid()
#---------------------------------------------------------------------
def get_parent_inc_map(self):
#-----------------------------------------
# Note: parent_ID = ID + incs[flow_code].
#-----------------------------------------
nx = self.nx
incs = int32(array([-nx + 1, 1, nx + 1, nx, nx - 1,
-1, -nx - 1, -nx]))
MAP = zeros([129], dtype='Int32')
MAP[self.code_list] = incs
self.inc_map = MAP
# get_parent_inc_map()
#-------------------------------------------------------------------
def get_edge_IDs(self, SILENT=True):
if not(SILENT):
print('Computing edge pixel IDs...')
#------------------------------------------
# Get IDs of edge pixels, making sure not
# to double-count the corner pixels
#------------------------------------------
nx = self.nx
ny = self.ny
T_IDs = arange(nx, dtype='Int32')
B_IDs = T_IDs + (ny - 1) * nx
L_IDs = (1 + arange(ny - 2, dtype='Int32')) * nx
R_IDs = L_IDs + (nx - 1)
edge_IDs = concatenate([T_IDs, B_IDs, L_IDs, R_IDs])
#-------------------------------------------
# Save IDs as a tuple of row indices and
# calendar indices, "numpy.where" style
#-------------------------------------------
self.edge_IDs = (edge_IDs / nx, edge_IDs % nx) ## NB! (row, col)
#------------------------------------------
# Save IDs as a 1D array of long-integer,
# calendar-style indices
#------------------------------------------
# self.edge_IDs = edge_IDs
# get_edge_IDs()
#---------------------------------------------------------------------
def get_not_edge_grid(self, SILENT=True):
if not(SILENT):
print('Computing "not_edge" grid...')
self.not_edge_grid = numpy.ones([self.ny, self.nx],
dtype='UInt8')
self.not_edge_grid[ self.edge_IDs ] = 0
## self.not_edge_grid[:, 0] = 0
## self.not_edge_grid[:, nx - 1] = 0
## self.not_edge_grid[0, :] = 0
## self.not_edge_grid[ny - 1, :] = 0
# get_not_edge_grid()
#---------------------------------------------------------------------
def update_parent_ID_grid(self, SILENT=True):
#-----------------------------------------------------
# Get a grid which for each grid cell contains the
# calendar-style index (or ID) of the grid cell that
# its D8 code says it flows to.
#-----------------------------------------------------
# Note: This version can handle periodic boundaries,
# as can occur in a landscape evolution model.
#-----------------------------------------------------
if not(SILENT):
print('Finding parent pixel IDs...')
nx = self.nx
ny = self.ny
self.parent_ID_grid = self.ID_grid + self.inc_map[self.flow_grid]
#---------------------------------
# Iterators for using 1D indices
#---------------------------------
dirs = self.flow_grid.flat
pIDs = self.parent_ID_grid.flat
#---------------------------------------
# Get IDs for pixels on the four edges
#---------------------------------------
T = arange(nx, dtype='Int32')
B = T + (nx * (ny - 1))
L = nx * arange(ny, dtype='Int32')
R = L + (nx - 1)
#------------------------------------------------
# Remap parent IDs for pixels on left and right
# edges to support periodic boundary conditions
#------------------------------------------------
if (self.LR_PERIODIC):
w = where(logical_or(logical_or((dirs[R] == 1), (dirs[R] == 2)), (dirs[R] == 4)))
if (size(w[0]) != 0):
pIDs[R[w]] -= nx # (subtract nx)
#-------------------------------------------------------------------------------------
w = where(logical_or(logical_or((dirs[L] == 16), (dirs[L] == 32)), (dirs[L] == 64)))
if (size(w[0]) != 0):
pIDs[L[w]] += nx
else:
pIDs[R] = 0
pIDs[L] = 0
#------------------------------------------------
# Remap parent IDs for pixels on top and bottom
# edges to support periodic boundary conditions
#------------------------------------------------
if (self.TB_PERIODIC):
w = where(logical_or(logical_or((dirs[T] == 1), (dirs[T] == 64)), (dirs[T] == 128)))
if (size(w[0]) != 0):
pIDs[T[w]] += self.rti.n_pixels ## (DOUBLE CHECK THIS)
#-------------------------------------------------------------------------------------
w = where(logical_or(logical_or((dirs[B] == 4), (dirs[B] == 8)), (dirs[B] == 16)))
if (size(w[0]) != 0):
pIDs[B[w]] -= self.rti.n_pixels # (subtract n_pixels)
else:
pIDs[T] = 0
pIDs[B] = 0
#---------------------------------------
# Pixels with invalid flow directions,
# like edges, get assigned a pID of 0.
#---------------------------------------
wbad = where(self.flow_grid <= 0)
nw = size(wbad[0])
if (nw != 0):
self.parent_ID_grid[wbad] = 0
# update_parent_ID_grid()
#---------------------------------------------------------------------
def update_parent_IDs(self):
#---------------------------------------------------------
# Notes: This version cannot handle periodic boundaries,
# and requires that D8 flow codes be set to zero
# on the four edges. This can be done at the end
# of the update_flow_grid() function.
#---------------------------------------------------------
# NB! The use of 0's here is important.
# If iterating, pID[0]=0.
#---------------------------------------------------------
#-----------------------------------------------------
# Get a grid which for each grid cell contains the
# calendar-style index (or ID) of the grid cell that
# its D8 code says it flows to.
#-----------------------------------------------------
pID_grid = self.parent_ID_grid
#-------------------------------------------
# Save IDs as a tuple of row indices and
# calendar indices, "numpy.where" style
#-------------------------------------------
self.parent_IDs = (pID_grid / self.nx, pID_grid % self.nx)
# update_parent_IDs()
#-------------------------------------------------------------------
## def update_non_parent_IDs(parent_IDs, flow_grid, rti):
##
## #---------------------------
## # Get flow grid dimensions
## #---------------------------
## nx = rti.ncols
## ny = rti.nrows
##
## #---------------------------------------
## # Return the IDs of non-parent pixels,
## # such as ridges, but exclude pixels
## # with flow code of 0, such as edges
## # and nodata pixels.
## #---------------------------------------
## base = zeros([ny, nx], dtype='UInt8')
## base[parent_IDs] = 1
##
## wbad = no_flow_IDs(flow_grid, rti)
## nbad = size(wbad[0]])
## if (nbad > 0):
## base[wbad] = 1 ########## Should be 1 or 0 ??
##
## wnot = where(base == 0)
## nnot = size(wnot[0])
## if (nnot != 0):
## non_parent_IDs = wnot
## else:
## non_parent_IDs = -int32(1)
##
## return non_parent_IDs
##
## # update_non_parent_IDs()
#---------------------------------------------------------------------
def update_flow_from_IDs(self):
#----------------------------------------------------------
# Notes: This function returns the 4-byte long-integer
# array IDs of pixels that flow in a particular
# direction. RiverTools flow codes are assumed.
#----------------------------------------------------------
# Notes: Later, rename w1 to w_NE, n1 to n_NE, then
# use self.code_list[0] vs. 1, etc.. This will
# then provide support for ARC flow codes, etc.
#----------------------------------------------------------
self.w1 = where(self.flow_grid == 1)
self.n1 = size(self.w1[0]) # (northeast)
self.w2 = where(self.flow_grid == 2)
self.n2 = size(self.w2[0]) # (east)
self.w3 = where(self.flow_grid == 4)
self.n3 = size(self.w3[0]) # (southeast)
self.w4 = where(self.flow_grid == 8)
self.n4 = size(self.w4[0]) # (south)
self.w5 = where(self.flow_grid == 16)
self.n5 = size(self.w5[0]) # (southwest)
self.w6 = where(self.flow_grid == 32)
self.n6 = size(self.w6[0]) # (west)
self.w7 = where(self.flow_grid == 64)
self.n7 = size(self.w7[0]) # (northwest)
self.w8 = where(self.flow_grid == 128)
self.n8 = size(self.w8[0]) # (north)
##### Same as noflow_IDs ####################
## self.w0 = where(self.flow_grid <= 0)
## self.n0 = size(self.w0[0]) #(undefined)
# print 'n1 = ' + str(n1)
# update_flow_from_IDs()
#---------------------------------------------------------------------
def update_flow_to_IDs(self):
nx = self.nx
#-------------------------------------------------
# Get IDs of "parent cells" that are downstream
# of pixels that flow in a given direction.
#-------------------------------------------------
if (self.n1 != 0): # northeast
p1_IDs = self.parent_ID_grid[self.w1]
self.p1 = (p1_IDs / nx, p1_IDs % nx)
else:
self.p1 = (-1, -1)
#-------------------------------------------------
if (self.n2 != 0): # east
p2_IDs = self.parent_ID_grid[self.w2]
self.p2 = (p2_IDs / nx, p2_IDs % nx)
else:
self.p2 = (-1, -1)
#-------------------------------------------------
if (self.n3 != 0): # southeast
p3_IDs = self.parent_ID_grid[self.w3]
self.p3 = (p3_IDs / nx, p3_IDs % nx)
else:
self.p3 = (-1, -1)
#-------------------------------------------------
if (self.n4 != 0): # south
p4_IDs = self.parent_ID_grid[self.w4]
self.p4 = (p4_IDs / nx, p4_IDs % nx)
else:
self.p4 = (-1, -1)
#-------------------------------------------------
if (self.n5 != 0): # southwest
p5_IDs = self.parent_ID_grid[self.w5]
self.p5 = (p5_IDs / nx, p5_IDs % nx)
else:
self.p5 = (-1, -1)
#-------------------------------------------------
if (self.n6 != 0): # west
p6_IDs = self.parent_ID_grid[self.w6]
self.p6 = (p6_IDs / nx, p6_IDs % nx)
else:
self.p6 = (-1, -1)
#-------------------------------------------------
if (self.n7 != 0): # northwest
p7_IDs = self.parent_ID_grid[self.w7]
self.p7 = (p7_IDs / nx, p7_IDs % nx)
else:
self.p7 = (-1, -1)
#-------------------------------------------------
if (self.n8 != 0): # north
p8_IDs = self.parent_ID_grid[self.w8]
self.p8 = (p8_IDs / nx, p8_IDs % nx)
else:
self.p8 = (-1, -1)
#-------------------------------------------------
## print 'p1.shape, size(p1) =', p1.shape, size(p1)
## print 'w1[0].shape, size(w1[0]) =', w1[0].shape, size(w1[0])
#-------------------------------------
# Some flow directions may not occur
#-------------------------------------
self.p1_OK = (self.p1[0][0] != -1)
self.p2_OK = (self.p2[0][0] != -1)
self.p3_OK = (self.p3[0][0] != -1)
self.p4_OK = (self.p4[0][0] != -1)
self.p5_OK = (self.p5[0][0] != -1)
self.p6_OK = (self.p6[0][0] != -1)
self.p7_OK = (self.p7[0][0] != -1)
self.p8_OK = (self.p8[0][0] != -1)
# update_flow_to_IDs()
#-------------------------------------------------------------------
def update_noflow_IDs(self):
#--------------------------------------------------
# 1/19/07. Need to set d and u to zero at any ID
# where flow terminates. This includes pixels on
# the edges, those with unresolved flow direction
# and those where elevation is nodata or NaN.
# A RiverTools flow grid will have a flow code of
# zero at all of these places.
#--------------------------------------------------
noflow_IDs = where(self.flow_grid <= 0)
num_IDs = size(noflow_IDs[0])
if (num_IDs != 0):
self.noflow_IDs = noflow_IDs
else:
#----------------------------
# Return IDs of edge pixels
#----------------------------
## self.get_edge_IDs() # (called by initialize())
self.noflow_IDs = self.edge_IDs()
# update_noflow_IDs()
#-------------------------------------------------------------------
def read_flow_grid(self, SILENT=True):
#----------------------------------------------------
# Read a grid of D8 flow codes, same size as DEM.
#----------------------------------------------------
if not(SILENT):
print('Reading D8 flow grid...')
code_file = (self.in_directory +
self.site_prefix + '_flow.rtg')
self.flow_grid = rtg_files.read_grid(code_file, self.rti,
RTG_type='BYTE')
# read_flow_grid()
#-------------------------------------------------------------------
def update_flow_grid(self, DEM=None, SILENT=True, REPORT=False):
#--------------------------------------------------------
# This can be used to test whether "update_area_grid()"
# is working, even if there is a problem with the
# "update_flow_grid()" function.
#--------------------------------------------------------
## print '### TEST: Reading existing flow grid instead'
## print '### of computing one.'
## print '################################################'
## self.read_flow_grid()
## return
#------------------------------------------------------
# NOTES: Direction codes are given by: |64 128 1|
# |32 x 2|
# |16 8 4|
# A RiverTools resolve array is returned by:
# self.get_resolve_array().
#------------------------------------------------------
if not(SILENT):
print('Updating flow grid...')
#----------------------------------------
# Assign directions where well-defined,
# and special codes otherwise
#----------------------------------------
self.start_new_flow_grid(DEM, SILENT=SILENT, REPORT=REPORT)
#----------------------------------
# Break ties with "resolve" array
#----------------------------------
self.break_flow_grid_ties(SILENT=SILENT)
#-------------------
# Link the flats ?
#-------------------
if (self.LINK_FLATS): self.link_flats(SILENT=SILENT)
#-------------------------------------
# Assign zeroes along 4 edges of DEM
#-------------------------------------------------------
# NB! This doesn't allow periodic boundary conditions.
#-------------------------------------------------------
#### self.flow_grid[ self.edge_IDs ] = 0 ##############
#--------------------------------------------------
# Change data type from 2-byte to 1-byte (NEW WAY)
#--------------------------------------------------
w = where( logical_or(self.flow_grid < 0, self.flow_grid > 128) )
if (size(w[0]) > 0):
self.flow_grid[w] = 0
self.flow_grid = self.valid_code_map[ self.flow_grid ]
#--------------------------------------------
# This doesn't work because flow_grid is 2D
#--------------------------------------------
## w = where( setmember1d(self.flow_grid, self.code_list) )
## if (size(w[0]) > 0):
## self.flow_grid[w] = 0
#--------------------
# This doesn't work
#--------------------
## w = where( self.flow_grid not in self.code_list )
## ## if (size(w) > 0):
## if (size(w[0]) > 0):
## self.flow_grid[w] = 0
if not(SILENT):
print(' min(codes), max(codes) =', \
self.flow_grid.min(), self.flow_grid.max())
print('flow grid =')
print(self.flow_grid)
#-------------------------------------------------
# Compare saved flow grid to one just computed
#-------------------------------------------------
# Note that unless self.LR_PERIODIC = False and
# self.TB_PERIODIC = False, the flow grids won't
# agree on the edges. This is because we don't
# set them to zero when using periodic boundary
# conditions.
#-------------------------------------------------
if (self.DEBUG):
code_file = (self.in_directory +
self.site_prefix + '_flow.rtg')
saved_flow_grid = rtg_files.read_grid(code_file, self.rti,
RTG_type='BYTE')
w = where( saved_flow_grid != uint8(self.flow_grid) )
if (size(w[0]) == 0):
print('##### SUCCESS! Flow grids are identical.')
else:
print('##### FAILURE. Flow grids differ at:')
print('number of pixels =', size(w[0]))
#---------------------------------------------------
# Change data type from 2-byte to 1-byte (OLD WAY)
#---------------------------------------------------
## dirs = self.flow_grid
## w = where(logical_and(logical_and(logical_and(logical_and( \
## logical_and(logical_and(logical_and(logical_and( \
## (dirs != 1), (dirs != 2)), (dirs != 4)), \
## (dirs != 8)), (dirs != 16)), (dirs != 32)), \
## (dirs != 64)), (dirs != 128)), (dirs != 0)))
## n_bad = size(w[0])
## if (n_bad > 0):
## dirs[w] = int16(0)
## self.flow_grid = numpy.uint8( dirs )
### dirs = idl_func.byte(dirs)
#----------------------------
# Save dir values to file ?
#----------------------------
## if (file is not None):
## file_unit = open(file, 'wb')
## I2PY_SWAP_ENDIAN = False
## if (I2PY_SWAP_ENDIAN):
## array(dirs, copy=0).byteswap(True)
## dirs.tofile(file_unit)
## file_unit.close()
# update_flow_grid()
#-------------------------------------------------------------------
def start_new_flow_grid(self, DEM=None, SILENT=True, REPORT=False):
#--------------------------------------------------------------
# Notes: In caller, modified so that DEM array has
# type INTEGER when DEM has type BYTE. Need a signed
# type to compute slopes correctly here. For example,
# (100b - 200b) = 156b.
# Use of NumPy's ROLL induces periodic boundaries.
#--------------------------------------------------------------
if not(SILENT):
print(' update_flow_grid(): Initializing grid...')
## print ' Starting new flow grid...'
## print ' Initializing grid in update_flow_grid()...'
#-----------------------------
# Define some local synonyms
#-----------------------------
dx = self.dx[0]
dy = self.dy[0]
dd = self.dd[0] #################
#-----------------------------------------------------
# Get a depression-filled DEM via the "erosion" port
#-----------------------------------------------------
## DEM = self.get_port_data('DEM', self.ep, 'EROSION')
#------------------------------------------------
# Read DEM from file. Used by unit_test() now.
#---------------------------------------------------------
# NOTE! This DEM should have already had the depressions
# filled, e.g. by the tool in fill_pits.py.
#---------------------------------------------------------
if (DEM == None):
dp = (self.in_directory + self.site_prefix)
DEM_file = (dp + '_2D-z0.rtg')
if not(os.path.exists(DEM_file)):
DEM_file = (dp + '_DEM.rtg')
if not(os.path.exists(DEM_file)):
print('ERROR: Could not find DEM file.')
return
DEM = rtg_files.read_grid( DEM_file, self.rti, SILENT=False )
if not(SILENT):
print(' min(DEM), max(DEM) =', DEM.min(), DEM.max())
#------------------------------
# Slopes to 8 neighbor pixels
#------------------------------
s1 = (DEM - numpy.roll(numpy.roll(DEM, 1, axis=0), -1, axis=1)) / dd # (upper-right)
s2 = (DEM - numpy.roll(DEM, -1, axis=1)) / dx # (right)
s3 = (DEM - numpy.roll(numpy.roll(DEM, -1, axis=0), -1, axis=1)) / dd # (lower-right)
s4 = (DEM - numpy.roll(DEM, -1, axis=0)) / dy # (bottom)
s5 = (DEM - numpy.roll(numpy.roll(DEM, -1, axis=0), 1, axis=1)) / dd # (lower-left)
s6 = (DEM - numpy.roll(DEM, 1, axis=1)) / dx # (left)
s7 = (DEM - numpy.roll(numpy.roll(DEM, 1, axis=0), 1, axis=1)) / dd # (upper-left)
s8 = (DEM - numpy.roll(DEM, 1, axis=0)) / dy # (top)
#--------------------------
# Find the steepest slope
#--------------------------
max_slope = numpy.maximum(s1, s2)
max_slope = numpy.maximum(max_slope, s3)
max_slope = numpy.maximum(max_slope, s4)
max_slope =
|
numpy.maximum(max_slope, s5)
|
numpy.maximum
|
"""
Copyright 2017 <NAME>, Toyota Technological Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List
from pypvcell.illumination import Illumination
from pypvcell.photocurrent import gen_step_qe, calc_jsc_from_eg, calc_jsc
from .ivsolver import calculate_j01, gen_rec_iv_by_rad_eta, one_diode_v_from_i, \
solve_mj_iv_obj_with_optimization, one_diode_v_from_i_p, \
solve_series_connected_ivs, solve_v_from_j_adding_epsilon
from .fom import max_power
from .spectrum import Spectrum, _energy_to_length
from .detail_balanced_MJ import calculate_j01_from_qe
import numpy as np
import scipy.constants as sc
from scipy.optimize import newton, bisect
from scipy.optimize import bisect
thermal_volt = sc.k / sc.e
def rev_diode(voltage):
rev_j01 = 4.46e-15
rev_bd_v = 0.1
return -rev_j01 * np.exp(sc.e * (-voltage - rev_bd_v) / (sc.k * 300) - 1)
def rev_breakdown_diode(voltage):
rev_j01 = 4.46e-17
rev_bd_v = 6
return -rev_j01 * np.exp(sc.e * (-voltage - rev_bd_v) / (sc.k * 300) - 1)
def guess_max_volt(rad_eta, jsc, j01, cell_T):
"""
Get an estimate of the maximum voltage by given Jsc
:param rad_eta: radiative efficiency
:param jsc: Jsc in A/m^2
:param j01: J01 in A/m^2
:param cell_T: cell temperature in Kelvin
:return:
"""
return rad_eta * np.log(jsc / j01) * thermal_volt * cell_T
class SolarCell(object):
def __init__(self):
self.j01 = None
self.j02 = None
self.jsc = 0
self.j01_r = None
self.j02_r = None
self.subcell = []
def get_iv(self):
raise NotImplementedError()
def set_input_spectrum(self, input_spectrum):
"""
Set the illlumination spectrum of the solar cell
:param input_spectrum: the illumination spectrum.
:type input_spectrum: Illumination
"""
raise NotImplementedError()
def get_transmit_spectrum(self):
"""
:return: The transmitted spectrum of this solar cell
:rtype: Illumination
"""
raise NotImplementedError()
def get_v_from_j(self, current):
raise NotImplementedError()
def get_j_from_v(self, voltage):
raise NotImplementedError()
def set_description(self, desp):
self.desp = desp
def __str__(self):
if self.desp == None:
return "solar cell"
else:
return self.desp
def set_input_spectrum(subcells: List[SolarCell], input_spectrum: Illumination):
filtered_spectrum = None
# Set spectrum for each subcell
for i, sc in enumerate(subcells):
if i == 0:
sc.set_input_spectrum(input_spectrum)
else:
sc.set_input_spectrum(filtered_spectrum)
filtered_spectrum = sc.get_transmit_spectrum()
return subcells
class TransparentCell(SolarCell):
def __init__(self):
self.ill = None
def set_input_spectrum(self, input_spectrum):
self.ill = input_spectrum
def get_transmit_spectrum(self):
return self.ill
def get_eta(self):
return 0
class SQCell(SolarCell):
"""
A SolarCell at Shockley-Queisser limit
"""
def __init__(self, eg: float, cell_T, rad_eta=1, n_c=3.5, n_s=1, approx=False,
plug_in_term='default_rev_breakdown'):
"""
Initialize a solar cell with Shockley-Queisser(SQ) Model
It loads the class and sets up J01 of the cell
:param eg: band gap in (eV)
:param cell_T: cell temperture
:param rad_eta: radiative efficiency in fraction
:param n_c: refractive index of cell
:param n_s: refractive index of ambient
:param approx: Set False to enable higher order terms for calculating J01
:param plug_in_term: the term that added into I(V), "default_rev_breakdown": use default reverse breakdown diode
"""
super().__init__()
self.eg = eg
self.cell_T = cell_T
self.n_c = n_c
self.n_s = n_s
self.rad_eta = rad_eta
self.approx = approx
self.jsc = 0
self.j02 = None
self.desp = 'SQCell'
self._construct()
self.subcell = [self]
if (plug_in_term == "default_rev_breakdown"):
self.plug_in_term = rev_breakdown_diode
else:
self.plug_in_term = plug_in_term
def _construct(self):
method = 'ana'
if method == 'ana':
self.j01 = calculate_j01(self.eg, temperature=self.cell_T,
n1=1, n_c=self.n_c, n_s=self.n_s, approx=self.approx)
elif method == 'num':
qe = gen_step_qe(self.eg, 1)
self.j01 = calculate_j01_from_qe(qe, n_c=self.n_c, n_s=self.n_s, T=self.cell_T)
self.j01_r = self.j01 / self.rad_eta
def set_input_spectrum(self, input_spectrum):
self.ill = input_spectrum
self.jsc = calc_jsc_from_eg(input_spectrum, self.eg)
def get_transmit_spectrum(self):
sp = self.ill.get_spectrum(to_x_unit='m')
filter_y = sp[0, :] >= _energy_to_length(self.eg, 'eV', 'm')
filter = Spectrum(sp[0, :], filter_y, x_unit='m')
return self.ill * filter
def get_eta(self):
volt = np.linspace(-0.5, self.eg, num=300)
volt, current = gen_rec_iv_by_rad_eta(self.j01, self.rad_eta, 1, self.cell_T, 1e15, voltage=volt, jsc=self.jsc)
max_p = max_power(volt, current)
return max_p / self.ill.rsum()
def get_iv(self, volt=None):
if volt is None:
max_volt = guess_max_volt(rad_eta=self.rad_eta, jsc=self.jsc, j01=self.j01, cell_T=self.cell_T) + 0.2
volt = np.linspace(-10, max_volt, num=1000)
volt, current = gen_rec_iv_by_rad_eta(self.j01, self.rad_eta, 1, self.cell_T, np.inf, voltage=volt,
jsc=self.jsc, plug_in_term=self.plug_in_term)
return volt, current
def get_v_from_j(self, current):
return one_diode_v_from_i(current, self.j01, rad_eta=self.rad_eta,
n1=1, temperature=self.cell_T, jsc=self.jsc)
def get_v_from_j_p(self, current):
return one_diode_v_from_i_p(current, self.j01, rad_eta=self.rad_eta,
n1=1, temperature=self.cell_T, jsc=self.jsc)
def get_j_from_v(self, volt):
_, current = gen_rec_iv_by_rad_eta(self.j01, self.rad_eta, 1, self.cell_T, np.inf,
voltage=volt, jsc=self.jsc, plug_in_term=self.plug_in_term)
return current
class HighPSQCell(SQCell):
"""
This calculate a SQ-limit solar cell like SQCell,
but uses the some trick to deal with current at negative voltage numerically
"""
def get_iv(self, volt=None):
if volt is None:
max_volt = guess_max_volt(rad_eta=self.rad_eta, jsc=self.jsc, j01=self.j01, cell_T=self.cell_T) + 0.2
volt = np.linspace(-10, max_volt, num=1000)
volt, current = gen_rec_iv_by_rad_eta(self.j01, 1, 1, self.cell_T, np.inf, voltage=volt, jsc=self.jsc)
return volt, current
def get_v_from_j(self, current):
return one_diode_v_from_i(current, self.j01, rad_eta=self.rad_eta, n1=1, temperature=self.cell_T,
jsc=self.jsc, minus_one=False)[0]
def get_v_from_j_numerical(self, current, x0=0.0):
def f(x):
return self.get_j_from_v(x, to_tup=False) - current
v = newton(f, x0=x0, fprime=self.get_j_from_v_p, fprime2=self.get_j_from_v_pp)
return v
def get_v_from_j_p(self, current):
return one_diode_v_from_i_p(current, self.j01, rad_eta=self.rad_eta,
n1=1, temperature=self.cell_T, jsc=self.jsc)
def get_j_from_v(self, volt, to_tup=False):
_, current = gen_rec_iv_by_rad_eta(self.j01, 1, 1, self.cell_T,
np.inf, voltage=volt, jsc=0, minus_one=False)
tot_current = current - self.jsc
if to_tup:
return -self.jsc, current
else:
return tot_current
def get_j_from_v_p(self, volt):
m = sc.e / sc.k / self.cell_T
return 1 / self.rad_eta * m * np.exp(m * volt)
def get_j_from_v_pp(self, volt):
m = sc.e / sc.k / self.cell_T
return 1 / self.rad_eta * m * m * np.exp(m * volt)
def get_single_j_from_v_bisect_fancy(self, voltage, j_tuple):
def f(x):
j_dark = np.power(10, x)
return self.get_v_from_j((j_tuple[0], j_dark)) - voltage
logj = bisect(f, -20, 1)
return np.power(10, logj)
class DBCell(SolarCell):
def __init__(self, qe: Spectrum, rad_eta: float, T: float, n_c=3.5, n_s=1, qe_cutoff=1e-3, eg=None):
"""
Initialize the solar cell object
:param eg: band gap (in eV)
:param T: temperature of the cell
:param qe: quantum efficiency of the cell
:type qe: Spectrum
:param rad_eta: external radiative efficiency of the cell
:param n_c: refractive index of the cell
:param n_s: refractive index of ambient
:param qe_cutoff: set the qe value to zero if it is lower than qe_cutoff. This is for avoiding the small ground in experimental data ends up becoming large when multiplying generalized Planck's distribution.
"""
super().__init__()
self.qe = qe
self.rad_eta = rad_eta
self.n_c = n_c
self.n_s = n_s
self.qe_cutoff = qe_cutoff
self.cell_T = T
self.n1 = 1
self.n2 = 2
self.eg = eg
self.ill = None
self.jsc = None
self._check_qe()
self._construct()
self.subcell = [self]
def _construct(self):
self.j01 = calculate_j01_from_qe(self.qe, n_c=self.n_c, n_s=self.n_s, threshold=self.qe_cutoff, T=self.cell_T)
self.j01_r = self.j01 / self.rad_eta
def _check_qe(self):
"""
Check if all the values of QE is <1 and >0
:return:
"""
if (np.all(self.qe.core_y >= 0) and np.all(self.qe.core_y <= 1)) == False:
raise ValueError("The values of QE should be between 0 and 1.")
def set_input_spectrum(self, input_spectrum):
self.ill = input_spectrum
self.jsc = calc_jsc(self.ill, self.qe)
def get_transmit_spectrum(self):
"""
:return: the transmitted spetrum
:rtype: Spectrum
"""
filtered_sp = self.ill * (1 - self.qe)
return filtered_sp
def get_iv(self, volt=None):
if volt is None:
volt = np.linspace(-0.5, 5, num=300)
volt, current = gen_rec_iv_by_rad_eta(self.j01, rad_eta=self.rad_eta, n1=1,
temperature=self.cell_T, rshunt=1e15, voltage=volt, jsc=self.jsc)
return volt, current
def get_eta(self):
# Guess the required limit of maximum voltage
volt_lim = self.rad_eta * np.log(self.jsc / self.j01) * thermal_volt * self.cell_T
volt, current = self.get_iv(volt=np.linspace(-0.5, volt_lim + 0.3, 300))
max_p = max_power(volt, current)
return max_p / self.ill.rsum()
class MJCell(SolarCell):
def __init__(self, subcell, connect='2T'):
"""
:param subcell: A list of SolarCell instances of multijunction cell from top to bottom , e.g. [top_cell mid_cell bot_cell]
:type subcell: List[SolarCell]
"""
self.subcell = subcell
self.connect = connect
def set_input_spectrum(self, input_spectrum):
self.ill = input_spectrum
filtered_spectrum = None
# Set spectrum for each subcell
for i, sc in enumerate(self.subcell):
if i == 0:
sc.set_input_spectrum(input_spectrum)
else:
sc.set_input_spectrum(filtered_spectrum)
filtered_spectrum = sc.get_transmit_spectrum()
def get_transmit_spectrum(self):
return self.subcell[-1].get_transmit_spectrum()
def get_iv(self, volt=None, verbose=0):
subcell_voltage = np.linspace(-5, 1.9, num=300)
all_iv = [(sc.get_iv(volt=subcell_voltage)) for sc in self.subcell]
# v, i = new_solve_mj_iv(all_iv)
# v, i = solve_mj_iv_obj_with_optimization(self.subcell, verbose=verbose)
iv_funcs = [cell.get_j_from_v for cell in self.subcell]
v, i = solve_series_connected_ivs(iv_funcs, -3, 3, 100)
return v, i
def get_j_from_v(self, voltage, verbose=False, max_iter=0):
# Use iteration based-method to calculate V(J)
curr_v, curr_i = self.get_iv()
if (type(voltage) == float) or (type(voltage) == int):
voltage = np.array([voltage])
iter_num = 0
interped_i = np.interp(voltage, curr_v, curr_i)
while True:
if iter_num >= max_iter:
break
solved_vs = np.empty((len(self.subcell), len(voltage) * 2))
for subcell_idx, cell in enumerate(self.subcell):
if iter_num == 0:
solved_iv = solve_v_from_j_adding_epsilon(cell.get_j_from_v, interped_i, bisect, epsilon=0.1)
else:
solved_iv = solve_v_from_j_adding_epsilon(cell.get_j_from_v, interped_i, bisect, epsilon=0)
solved_v = solved_iv[:, 0]
solved_i = solved_iv[:, 1]
solved_vs[subcell_idx, :] = solved_v
solved_v_sum = np.sum(solved_vs, axis=0)
# add solved result into new array
interped_i = np.interp(solved_v_sum, curr_v, curr_i)
curr_v = np.concatenate((curr_v, solved_v_sum))
curr_i = np.concatenate((curr_i, interped_i))
sorted_index = np.argsort(curr_v)
curr_v = curr_v[sorted_index]
curr_i = curr_i[sorted_index]
iter_num += 1
interped_i = np.interp(voltage, curr_v, curr_i)
return interped_i
def get_eta(self, verbose=0):
if self.connect == '2T':
v, i = self.get_iv(verbose=verbose)
eta = max_power(v, i) / self.ill.rsum()
elif self.connect == 'MS':
mp = 0
for sc in self.subcell:
mp += max_power(*sc.get_iv())
eta = mp / self.ill.rsum()
return eta
def get_subcell_jsc(self):
jsc = np.array([sc.get_iv(0)[1] for sc in self.subcell])
return jsc
class ResistorCell(SolarCell):
def __init__(self, resistance):
self.r = resistance
def get_v_from_j(self, current):
return current * self.r
def get_j_from_v(self, voltage):
return voltage / self.r
def get_v_from_j_p(self, current):
return self.r
def get_j_from_v_p(self, voltage):
return 1 / self.r
def get_j_from_v_by_newton(self, voltage):
voltage = float(voltage)
def f(x):
return self.get_v_from_j(x) - voltage
j = newton(f, x0=0, fprime=self.get_v_from_j_p)
return j
class SeriesConnect(SolarCell):
def __init__(self, s_list):
self.s_list = s_list
def get_v_from_j(self, current):
result_v = np.zeros_like(current)
for scell in self.s_list:
v = scell.get_v_from_j(current)
result_v += v
return result_v
def get_v_from_j_p(self, current):
result_v = np.zeros_like(current)
for scell in self.s_list:
v = scell.get_v_from_j_p(current)
result_v += v
return result_v
def get_j_from_v(self, voltage):
solved_j = []
for v in voltage:
j = self.get_single_j_from_v(v)
solved_j.append(j)
return np.array(solved_j)
def get_single_j_from_v(self, voltage, x0=0):
def f(x):
return self.get_v_from_j(x) - voltage
j = newton(f, x0=x0, fprime=self.get_v_from_j_p)
return j
def get_single_j_from_v_bisect(self, voltage, a, b):
from scipy.optimize import bisect
def f(x):
return self.get_v_from_j(x) - voltage
return bisect(f, a, b)
class ParallelConnect(SolarCell):
def __init__(self, s_list):
self.s_list = s_list
def get_j_from_v(self, current):
result_j = np.zeros_like(current)
for scell in self.s_list:
j = scell.get_j_from_v(current)
result_j += j
return result_j
def get_j_from_v_p(self, voltage):
result_v =
|
np.zeros_like(voltage)
|
numpy.zeros_like
|
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
import warnings
from sklearn.metrics import fbeta_score, precision_score, recall_score, confusion_matrix,f1_score
import itertools
import pickle
from matplotlib import pyplot as plt,style
from multiprocessing import Pool
import json
import os
import sys
warnings.simplefilter("ignore", category=DeprecationWarning)
style.use('ggplot')
np.random.seed(42)
label_file = sys.argv[5]
labels = []
with open(label_file) as ff:
for line in ff.readlines():
line = line.strip()
if line.startswith('#') or line == '':
continue
labels.append(line)
# TODO: Do not hardcode dictionary. Labels need to be taken from the device.
di ={}
reverse_di = {}
for i in range(len(labels)):
di.update({labels[i]:i})
reverse_di.update({i:labels[i]})
di.update({'anomaly':len(labels)})
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def plot_confusion_matrix(cm, classes,
recall,precision,f2,f1,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.xticks(rotation=90)
plt.text(12,0, f" Recall:{recall},\n Precision:{precision},\n F2 Score:{f2},\n F1 Score:{f1}", fontsize=12)
return plt
#plt.show()
def load_data(path):
anomaly_data = pd.read_csv(path)
# anomaly_data = anomaly_data.drop(anomaly_data.columns[0], axis=1)
return anomaly_data
def filter_anomaly(ss,anomaly_data,multivariate_model_dict,model_path):
mv_model = multivariate_model_dict['mvmodel']
treshold = multivariate_model_dict['treshold']
y_test = anomaly_data['state'].apply(lambda x: 1 if x == 'anomaly' else 0)
y_predict = (mv_model.logpdf(anomaly_data.drop(['state'], axis=1).values) < treshold).astype(int)
recall = recall_score(y_pred=y_predict, y_true=y_test, average='weighted')
precision = precision_score(y_pred=y_predict, y_true=y_test, average='weighted')
f2 = fbeta_score(y_pred=y_predict, y_true=y_test, average='weighted', beta=2)
f1 = f1_score(y_pred=y_predict, y_true=y_test, average='weighted')
_acc_score = accuracy_score(y_test, y_predict)
cm = confusion_matrix(y_test, y_predict)
plt = plot_confusion_matrix(cm, classes=['Normal', 'Anomalous'],
recall=recall, precision=precision, f2=f2, f1=f1, title='Confusion matrix')
if not os.path.exists(model_path+'/plots'):
os.makedirs(model_path+'/plots')
plt.savefig(model_path+'/plots/anomalous_cm.png',bbox_inches='tight')
anomaly_data['anomalous'] = y_predict
normal_data = anomaly_data[anomaly_data['anomalous'] == 0]
anomalous_data = anomaly_data[anomaly_data['anomalous'] == 1]
output_dict = {'predictions': y_predict, 'recall': recall, 'precision': precision, 'f1': f1, 'f2': f2}
if not os.path.exists(model_path+'/results'):
os.makedirs(model_path+'/results')
with open(model_path+'/results/anomaly_output.txt','w') as file:
file.write(json.dumps(output_dict,cls=NumpyEncoder))
return normal_data,anomalous_data
def action_classification_model(normal_data,action_class_dict):
ss = action_class_dict['standard_scaler']
pca = action_class_dict['pca']
trained_model = action_class_dict['trained_model']
transformed_data = ss.transform(normal_data.drop(['state','anomalous'], axis=1))
#TODO: Fix nan value results from transformations
transformed_data = pca.transform(transformed_data)
transformed_data = pd.DataFrame(transformed_data)
transformed_data = transformed_data.iloc[:, :4]
y_predict = trained_model.predict(transformed_data)
y_predicted_1d =
|
np.argmax(y_predict, axis=1)
|
numpy.argmax
|
import numpy as np
import numpy.linalg
import scipy
import scipy.special as special
def to_sin(cos):
return (1 - cos ** 2) ** .5
def to_log(value, dbm=False, tol=1e-15):
return 10 * np.log10(value) + 30 * int(dbm) if value >= tol else -np.inf
def from_log(value, dbm=False):
return 10 ** (value / 10 - 3 * int(dbm))
def vec3D(x, y, z):
return np.array([x, y, z])
def to_power(value, log=True, dbm=False):
power = np.abs(value) ** 2
return to_log(power, dbm=dbm) if log else power
#
# Radiation Pattern
#
def __patch_factor(a_cos, t_cos, wavelen, width, length, tol=1e-9):
a_sin = to_sin(a_cos)
t_sin = to_sin(t_cos)
kw = np.pi / wavelen * width
kl = np.pi / wavelen * length
if a_cos < tol:
return 0
if np.abs(a_sin) < tol:
return 1.
elif np.abs(t_sin) < tol:
return
|
np.cos(kl * a_sin)
|
numpy.cos
|
"""
Module to help convert parameters to our AGN formalism
"""
import bilby
import lalsimulation
import numpy as np
from bilby.gw.conversion import (
component_masses_to_chirp_mass, total_mass_and_mass_ratio_to_component_masses,
chirp_mass_and_mass_ratio_to_total_mass, generate_all_bbh_parameters, generate_spin_parameters, generate_mass_parameters,
convert_to_lal_binary_black_hole_parameters, generate_mass_parameters, generate_component_spins
)
from bilby_pipe.gracedb import (
determine_duration_and_scale_factor_from_parameters,
)
from .spin_conversions import calculate_relative_spins_from_component_spins
from numpy import cos, sin
REFERENCE_FREQ = 20
def add_kick(df):
from bbh_simulator.calculate_kick_vel_from_samples import Samples
samples = Samples(posterior=df)
samples.calculate_remnant_kick_velocity()
return samples.posterior
def add_signal_duration(df):
df["chirp_mass"] = component_masses_to_chirp_mass(df['mass_1'], df['mass_2'])
duration, roq_scale_factor = np.vectorize(
determine_duration_and_scale_factor_from_parameters
)(chirp_mass=df["chirp_mass"])
df["duration"] = duration
long_signals = [
f"data{i}" for i in range(len(duration)) if duration[i] > 4
]
# print(f"long_signals= " + str(long_signals).replace("'", ""))
return df
def add_snr(df):
required_params = [
"dec",
"ra",
"theta_jn",
"geocent_time",
"luminosity_distance",
"psi",
"phase",
"mass_1",
"mass_2",
"a_1",
"a_2",
"tilt_1",
"tilt_2",
"phi_12",
"phi_jl",
]
df_cols = list(df.keys())
missing_params = set(required_params) - set(df_cols)
if len(missing_params) != 0:
raise ValueError(f"Params missing for SNR calculation: {missing_params}")
h1_snr, l1_snr, network_snr = _get_injection_snr(**df)
df["h1_snr"] = h1_snr
df["l1_snr"] = l1_snr
df["network_snr"] = network_snr
return df
@np.vectorize
def _get_injection_snr(
a_1,
a_2,
dec,
ra,
psi,
phi_12,
phase,
geocent_time,
mass_1,
mass_2,
luminosity_distance,
tilt_1,
tilt_2,
theta_jn,
phi_jl,
**kwargs,
):
"""
:returns H1 snr, L1 snr, network SNR
"""
injection_parameters = dict(
# location params
dec=dec,
ra=ra,
theta_jn=theta_jn,
luminosity_distance=luminosity_distance,
geocent_time=geocent_time,
# phase params
psi=psi,
phase=phase,
# mass params
mass_1=mass_1,
mass_2=mass_2,
# spin params
a_1=a_1,
a_2=a_2,
phi_12=phi_12,
tilt_1=tilt_1,
tilt_2=tilt_2,
phi_jl=phi_jl,
)
chirp_mass = bilby.gw.conversion.component_masses_to_chirp_mass(
mass_1, mass_2
)
duration, _ = determine_duration_and_scale_factor_from_parameters(
chirp_mass
)
sampling_frequency = 2048.0
waveform_generator = bilby.gw.WaveformGenerator(
duration=duration,
sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole,
parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters,
waveform_arguments=dict(
waveform_approximant="IMRPhenomPv2",
reference_frequency=REFERENCE_FREQ,
minimum_frequency=20.0,
),
)
# Set up interferometers.
ifos = bilby.gw.detector.InterferometerList(["H1", "L1"])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency,
duration=duration,
start_time=injection_parameters["geocent_time"] - 2,
)
ifos.inject_signal(
waveform_generator=waveform_generator, parameters=injection_parameters
)
snrs = [ifo.meta_data["optimal_SNR"] for ifo in ifos]
network_snr = np.sqrt(np.sum([i ** 2 for i in snrs]))
return snrs[0], snrs[1], network_snr
@np.vectorize
def get_chi_eff(s1z, s2z, q):
return (s1z + s2z * q) / (1 + q)
@np.vectorize
def get_chi_p(s1x, s1y, s2x, s2y, q):
chi1p = np.sqrt(s1x ** 2 + s1y ** 2)
chi2p = np.sqrt(s2x ** 2 + s2y ** 2)
qfactor = q * ((4 * q) + 3) / (4 + (3 * q))
return np.maximum(chi1p, chi2p * qfactor)
def get_component_mass_from_source_mass_and_z(m1_source, q, z):
m1 = m1_source * (1 +
|
np.array(z)
|
numpy.array
|
import numpy as np
import numexpr as ne
import numba
from scipy import optimize as sio
from scipy import ndimage as scnd
@numba.jit
def get_flat_dpc(data4D_flat):
CentralDisk = np.mean(data4D_flat,axis=0)
beam_x,beam_y,_ = st.util.sobel_circle(CentralDisk)
yy, xx = np.mgrid[0:data4D_flat.shape[1],0:data4D_flat.shape[2]]
YCom = np.zeros(data4D_flat.shape[0],dtype=np.float)
XCom = np.zeros(data4D_flat.shape[0],dtype=np.float)
for ii in numba.prange(data4D_flat.shape[0]):
cbed = data4D_flat[ii,:,:]
YCom[ii] = (np.sum(np.multiply(yy,cbed))/np.sum(cbed)) - beam_y
XCom[ii] = (np.sum(np.multiply(xx,cbed))/np.sum(cbed)) - beam_x
return XCom,YCom
def cart2pol(x, y):
rho = ne.evaluate("((x**2) + (y**2)) ** 0.5")
phi = ne.evaluate("arctan2(y, x)")
return (rho, phi)
def pol2cart(rho, phi):
x = ne.evaluate("rho * cos(phi)")
y = ne.evaluate("rho * sin(phi)")
return (x, y)
def angle_fun(angle,rho_dpc,phi_dpc):
x_dpc,y_dpc = pol2cart(rho_dpc,(phi_dpc + (angle*((np.pi)/180))))
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
angle_sum = np.sum(np.abs(charge))
return angle_sum
def optimize_angle(x_dpc,y_dpc,adf_stem):
flips = np.zeros(4,dtype=bool)
flips[2:4] = True
chg_sums = np.zeros(4,dtype=x_dpc.dtype)
angles = np.zeros(4,dtype=x_dpc.dtype)
x0 = 90
for ii in range(2):
to_flip = flips[2*ii]
if to_flip:
xdpcf = np.flip(x_dpc)
else:
xdpcf = x_dpc
rho_dpc,phi_dpc = cart2pol(xdpcf,y_dpc)
x = sio.minimize(angle_fun,x0,args=(rho_dpc,phi_dpc))
min_x = x.x
sol1 = min_x - 90
sol2 = min_x + 90
chg_sums[int(2*ii)] = np.sum(charge_dpc(xdpcf,y_dpc,sol1)*adf_stem)
chg_sums[int(2*ii+1)] = np.sum(charge_dpc(xdpcf,y_dpc,sol2)*adf_stem)
angles[int(2*ii)] = sol1
angles[int(2*ii+1)] = sol2
angle = (-1)*angles[chg_sums==np.amin(chg_sums)][0]
final_flip = flips[chg_sums==np.amin(chg_sums)][0]
return angle, final_flip
def corrected_dpc(x_dpc,y_dpc,angle,flipper):
if flipper:
xdpcf = np.fliplr(x_dpc)
else:
xdpcf = np.copy(x_dpc)
rho_dpc,phi_dpc = cart2pol(xdpcf,y_dpc)
x_dpc2,y_dpc2 = pol2cart(rho_dpc,(phi_dpc - (angle*((np.pi)/180))))
return x_dpc2,y_dpc2
def potential_dpc(x_dpc,y_dpc,angle=0):
if angle==0:
potential = integrate_dpc(x_dpc,y_dpc)
else:
rho_dpc,phi_dpc = cart2pol(x_dpc,y_dpc)
x_dpc,y_dpc = pol2cart(rho_dpc,phi_dpc + (angle*((np.pi)/180)))
potential = integrate_dpc(x_dpc,y_dpc)
return potential
def charge_dpc(x_dpc,y_dpc,angle=0):
if angle==0:
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
else:
rho_dpc,phi_dpc = cart2pol(x_dpc,y_dpc)
x_dpc,y_dpc = pol2cart(rho_dpc,phi_dpc + (angle*((np.pi)/180)))
charge = np.gradient(x_dpc)[1] + np.gradient(y_dpc)[0]
return charge
def integrate_dpc(xshift,
yshift,
fourier_calibration=1):
#Initialize matrices
size_array = np.asarray(np.shape(xshift))
x_mirrored = np.zeros(2*size_array,dtype=np.float64)
y_mirrored = np.zeros(2*size_array,dtype=np.float64)
#Generate antisymmetric X arrays
x_mirrored[0:size_array[0],0:size_array[1]] = np.fliplr(np.flipud(0 - xshift))
x_mirrored[0:size_array[0],size_array[1]:(2*size_array[1])] = np.fliplr(0 - xshift)
x_mirrored[size_array[0]:(2*size_array[0]),0:size_array[1]] = np.flipud(xshift)
x_mirrored[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])] = xshift
#Generate antisymmetric Y arrays
y_mirrored[0:size_array[0],0:size_array[1]] = np.fliplr(np.flipud(0 - yshift))
y_mirrored[0:size_array[0],size_array[1]:(2*size_array[1])] = np.fliplr(yshift)
y_mirrored[size_array[0]:(2*size_array[0]),0:size_array[1]] = np.flipud(0 - yshift)
y_mirrored[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])] = yshift
#Calculated Fourier transform of antisymmetric matrices
x_mirr_ft = np.fft.fft2(x_mirrored)
y_mirr_ft = np.fft.fft2(y_mirrored)
#Calculated inverse Fourier space calibration
qx = np.mean(np.diff((np.arange(-size_array[1],size_array[1], 1))/
(2*fourier_calibration*size_array[1])))
qy = np.mean(np.diff((np.arange(-size_array[0],size_array[0], 1))/
(2*fourier_calibration*size_array[0])))
#Calculate mirrored CPM integrand
mirr_ft = (x_mirr_ft + ((1j)*y_mirr_ft))/(qx + ((1j)*qy))
mirr_int = np.fft.ifft2(mirr_ft)
#Select integrand from antisymmetric matrix
integrand = np.abs(mirr_int[size_array[0]:(2*size_array[0]),size_array[1]:(2*size_array[1])])
return integrand
def centerCBED(data4D_flat,
x_cen,
y_cen):
image_size = np.asarray(data4D_flat.shape[1:3])
fourier_cal_y = (np.linspace((-image_size[0]/2), ((image_size[0]/2) - 1), image_size[0]))/image_size[0]
fourier_cal_x = (np.linspace((-image_size[1]/2), ((image_size[1]/2) - 1), image_size[1]))/image_size[1]
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_pixels = np.flip(image_size/2) - np.asarray((x_cen,y_cen))
move_phase = np.exp((-2) * np.pi * 1j * ((fourier_mesh_x*move_pixels[0]) + (fourier_mesh_y*move_pixels[1])))
FFT_4D = np.fft.fftshift(np.fft.fft2(data4D_flat,axes=(-1,-2)),axes=(-1,-2))
moved_in_fourier = np.abs(np.fft.ifft2(np.multiply(FFT_4D,move_phase),axes=(-1,-2)))
return moved_in_fourier
def wavelength_pm(voltage_kV):
m = 9.109383 * (10 ** (-31)) # mass of an electron
e = 1.602177 * (10 ** (-19)) # charge of an electron
c = 299792458 # speed of light
h = 6.62607 * (10 ** (-34)) # Planck's constant
voltage = voltage_kV * 1000
numerator = (h ** 2) * (c ** 2)
denominator = (e * voltage) * ((2*m*(c ** 2)) + (e * voltage))
wavelength_pm = (10 ** 12) *((numerator/denominator) ** 0.5) #in picometers
return wavelength_pm
def get_sampling(datashape,aperture_mrad,voltage,calibration_pm,radius_pixels):
yscanf = (np.linspace((-datashape[0]/2),
((datashape[0]/2) - 1), datashape[0]))/(calibration_pm*datashape[0])
xscanf = (np.linspace((-datashape[1]/2),
((datashape[1]/2) - 1), datashape[1]))/(calibration_pm*datashape[1])
[xscanf_m, yscanf_m] = np.meshgrid(xscanf, yscanf)
scanf_m = 1000*wavelength_pm(voltage)*(((xscanf_m**2) + (yscanf_m)**2)**0.5)
fourier_beam = np.zeros_like(scanf_m)
fourier_beam[scanf_m < aperture_mrad] = 1
real_rad = (np.sum(fourier_beam)/np.pi)**0.5
sampling = radius_pixels/real_rad
return sampling
@numba.jit
def resizer1D(data,N):
M = data.size
res = np.zeros(N,dtype=data.dtype)
carry=0
m=0
for n in range(int(N)):
data_sum = carry
while m*N - n*M < M :
data_sum += data[m]
m += 1
carry = (m-(n+1)*M/N)*data[m-1]
data_sum -= carry
res[n] = data_sum*N/M
return res
@numba.jit
def resizer1D_numbaopt(data,res,N):
M = data.size
carry=0
m=0
for n in range(int(N)):
data_sum = carry
while m*N - n*M < M :
data_sum += data[m]
m += 1
carry = (m-(n+1)*M/N)*data[m-1]
data_sum -= carry
res[n] = data_sum*N/M
return res
@numba.jit
def resizer2D(data2D,sampling):
data_shape = np.asarray(np.shape(data2D))
sampled_shape = (np.round(data_shape/sampling)).astype(int)
resampled_x = np.zeros((data_shape[0],sampled_shape[1]),dtype=data2D.dtype)
resampled_f = np.zeros(sampled_shape,dtype=data2D.dtype)
for yy in range(data_shape[0]):
resampled_x[yy,:] = resizer1D_numbaopt(data2D[yy,:],resampled_x[yy,:],sampled_shape[1])
for xx in range(sampled_shape[1]):
resampled_f[:,xx] = resizer1D_numbaopt(resampled_x[:,xx],resampled_f[:,xx],sampled_shape[0])
return resampled_f
@numba.jit
def resizer2D_numbaopt(data2D,resampled_x,resampled_f,sampling):
data_shape = np.asarray(np.shape(data2D))
sampled_shape = (np.round(data_shape/sampling)).astype(int)
for yy in range(data_shape[0]):
resampled_x[yy,:] = resizer1D_numbaopt(data2D[yy,:],resampled_x[yy,:],sampled_shape[1])
for xx in range(sampled_shape[1]):
resampled_f[:,xx] = resizer1D_numbaopt(resampled_x[:,xx],resampled_f[:,xx],sampled_shape[0])
return resampled_f
@numba.jit
def resizer4Df(data4D_flat,sampling):
datashape = np.asarray(data4D_flat.shape)
res_shape = np.copy(datashape)
res_shape[1:3] = np.round(datashape[1:3]/sampling)
data4D_res = np.zeros(res_shape.astype(int),dtype=data4D_flat.dtype)
resampled_x = np.zeros((datashape[1],res_shape[2]),data4D_flat.dtype)
resampled_f = np.zeros(res_shape[1:3],dtype=data4D_flat.dtype)
for zz in range(data4D_flat.shape[0]):
data4D_res[zz,:,:] = resizer2D_numbaopt(data4D_flat[zz,:,:],resampled_x,resampled_f,sampling)
return data4D_res
@numba.jit
def resizer4D(data4D,sampling):
data4D_flat = np.reshape(data4D,(data4D.shape[0]*data4D.shape[1],data4D.shape[2],data4D.shape[3]))
datashape = np.asarray(data4D_flat.shape)
res_shape = np.copy(datashape)
res_shape[1:3] = np.round(datashape[1:3]/sampling)
data4D_res = np.zeros(res_shape.astype(int),dtype=data4D_flat.dtype)
resampled_x = np.zeros((datashape[1],res_shape[2]),data4D_flat.dtype)
resampled_f = np.zeros(res_shape[1:3],dtype=data4D_flat.dtype)
for zz in range(data4D_flat.shape[0]):
data4D_res[zz,:,:] = resizer2D_numbaopt(data4D_flat[zz,:,:],resampled_x,resampled_f,sampling)
res_4D = np.reshape(data4D_res,(data4D.shape[0],data4D.shape[1],resampled_f.shape[0],resampled_f.shape[1]))
return res_4D
def subpixel_pad2D(initial_array,final_size):
final_size = np.asarray(final_size)
padded = np.amin(initial_array)*(np.ones(final_size,dtype=initial_array.dtype))
padded[0:initial_array.shape[0],0:initial_array.shape[1]] = initial_array
fourier_cal_y = (np.linspace((-final_size[0]/2), ((final_size[0]/2) - 1), final_size[0]))/final_size[0]
fourier_cal_x = (np.linspace((-final_size[1]/2), ((final_size[1]/2) - 1), final_size[1]))/final_size[1]
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_pixels = np.flip(0.5*(final_size - np.asarray(initial_array.shape)))
move_phase = np.exp((-2) * np.pi * 1j * ((fourier_mesh_x*move_pixels[0]) + (fourier_mesh_y*move_pixels[1])))
padded_f = np.fft.fftshift(np.fft.fft2(padded))
padded_c = np.abs(np.fft.ifft2(np.multiply(padded_f,move_phase)))
return padded_c
@numba.jit
def subpixel_pad4D(data4D_flat,final_size,cut_radius):
final_size = (np.asarray(final_size)).astype(int)
yy,xx = np.mgrid[0:final_size[0],0:final_size[1]]
rad = ((yy - final_size[0]/2)**2) + ((xx - final_size[1]/2)**2)
cutoff = (rad < ((1.1*cut_radius)**2)).astype(data4D_flat.dtype)
cbed = np.zeros(final_size,dtype=data4D_flat.dtype)
fourier_cal_y = (np.linspace((-final_size[0]/2), ((final_size[0]/2) - 1), final_size[0]))/final_size[0]
fourier_cal_x = (np.linspace((-final_size[1]/2), ((final_size[1]/2) - 1), final_size[1]))/final_size[1]
[fourier_mesh_x, fourier_mesh_y] = np.meshgrid(fourier_cal_x, fourier_cal_y)
move_pixels = np.flip(0.5*(final_size - np.asarray(data4D_flat.shape[1:3])))
move_phase =
|
np.exp((-2) * np.pi * 1j * ((fourier_mesh_x*move_pixels[0]) + (fourier_mesh_y*move_pixels[1])))
|
numpy.exp
|
import numpy as np
np.set_printoptions(precision=5)
import matplotlib.patches as patches
import time
import os
from envs.gym_kuka_mujoco import kuka_asset_dir
import mujoco_py
from mujoco_py.generated import const
from envs.gym_kuka_mujoco.controllers import iMOGVIC
from envs.gym_kuka_mujoco.utils.transform_utils import *
from envs.robosuite.robosuite.utils import transform_utils as trans
from code.pytorch.LAMPO.core.task_interface import TaskInterface
from code.pytorch.LAMPO.core.rrt_star import RRTStar
from romi.movement_primitives import ClassicSpace, MovementPrimitive, LearnTrajectory
from romi.groups import Group
from romi.trajectory import NamedTrajectory, LoadTrajectory
import transforms3d as transforms3d
from envs.robosuite.robosuite.controllers import *
from gym import spaces
import pybullet as p
from tensorboardX import SummaryWriter as FileWriter
import imageio
def render_frame(viewer, pos, euler):
viewer.add_marker(pos=pos,
label='',
type=const.GEOM_SPHERE,
size=[.01, .01, .01])
# mat = quat2mat(quat)
mat = transforms3d.euler.euler2mat(euler[0], euler[1], euler[2], 'sxyz')
cylinder_half_height = 0.02
pos_cylinder = pos + mat.dot([0.0, 0.0, cylinder_half_height])
viewer.add_marker(pos=pos_cylinder,
label='',
type=const.GEOM_CYLINDER,
size=[.005, .005, cylinder_half_height],
mat=mat)
viewer.add_marker(pos=pos_cylinder,
label='',
type=const.GEOM_CYLINDER,
size=[cylinder_half_height, .005, .005],
mat=mat)
viewer.add_marker(pos=pos_cylinder,
label='',
type=const.GEOM_CYLINDER,
size=[.005, cylinder_half_height, .005],
mat=mat)
class AssistiveOnePointEnv(TaskInterface):
"""
Interface with MujocoEnv ::: iMOGIC
"""
def __init__(self, env, set_params):
super(AssistiveOnePointEnv).__init__()
self._env = env
self.mujoco_model = Mujoco_model(set_params["controller_options"], **set_params["mujoco_options"])
self._env_name = set_params["env"]
self.total_timesteps = 0
self.episode_timesteps = 0
self.episode_number = 0
self.episode_reward = 0
self.reward_scale = 0.01
self.render = set_params["env_options"]["render"]
self.max_episode_steps = set_params["alg_options"]["max_episode_steps"]
self._state_dim = set_params["alg_options"]['context_dim']
self._context_dim = set_params["alg_options"]['context_dim']
self._latent_parameter_dim = set_params["alg_options"]['parameter_dim']
self._latent_parameter_high = set_params["controller_options"]['stiffness_high']
self._latent_parameter_low = set_params["controller_options"]['stiffness_low']
self.num_way_points = set_params["controller_options"]['num_waypoints']
self.action_dim = set_params["alg_options"]['action_dim']
self.tool_delta_pos = np.array(set_params["env_options"]["tool_delta_pos"][self._env_name])
self.tool_delta_ori = np.array(set_params["env_options"]["tool_delta_ori"][self._env_name])
self.context = None
self.target_pos = None
self.target_ori = None
self.target_euler = None
self.delta_pose_high = self.mujoco_model.delta_pose_high
self.delta_pose_low = self.mujoco_model.delta_pose_low
self.delta_pose_scale = self.mujoco_model.delta_pose_scale
self.delta_pose_params = np.zeros(self.action_dim)
self.target_pos_list = np.zeros((self.mujoco_model.num_way_points, 3))
self.target_euler_list = np.zeros((self.mujoco_model.num_way_points, 3))
def reset(self):
"""
get initial context
"""
print("Environment Reset !!!")
print("+" * 50)
if self.render:
self._env.render()
observation = self._env.reset()
# print('robot_joint_angles:', observation['robot_joint_angles'])
# reset joints and way points
self.mujoco_ee_pose = self.mujoco_model.reset(observation['robot_joint_angles'])
# self.initial_tool_pos, self.initial_tool_orient = self._env.get_tool_pose()
# self.initial_tool_euler = transforms3d.euler.quat2euler(self.initial_tool_orient, 'sxyz')
self.get_ee_state()
self.target_pos_list[0] = self.mujoco_target_pos
# self.target_pos_list[1] = self.mujoco_target_pos_1
self.target_euler_list[0] = self.mujoco_target_euler
# self.target_euler_list[1] = self.mujoco_target_euler_1
self.set_waypoints(way_points_list=1,
target_pos=self.target_pos_list,
target_euler=self.target_euler_list)
print("mujoco_target_pos :", self.mujoco_target_pos,
"mujoco_target_euler :", np.array(self.mujoco_target_euler))
self.read_context()
return observation
def get_ee_state(self):
self.tool_pos, self.tool_orient = self._env.get_tool_pose()
self.tool_euler = transforms3d.euler.quat2euler(self.tool_orient, 'sxyz')
# print("tool_pos :", self.tool_pos, "tool_euler :", np.array(self.tool_euler),
# np.array(p.getEulerFromQuaternion(np.array(self.tool_orient), physicsClientId=self._env.id)))
self.ee_pos, self.ee_ori = self._env.robot.get_ee_pose()
# print("original ori :", self.ee_ori)
self.ee_euler = transforms3d.euler.quat2euler(self.ee_ori, 'sxyz')
# print("ee_pos :", self.ee_pos, "ee_euler :", np.array(self.ee_euler),
# np.array(p.getEulerFromQuaternion(np.array(self.ee_ori), physicsClientId=self._env.id)))
self.mujoco_model_ee = self.mujoco_model.get_ee_pose()
# print("mujoco_model_ee :", np.array(self.mujoco_model_ee))
self.target_pos, self.target_ori, \
self.target_pos_ref, self.target_orient_ref, \
self.target_pos_tool, self.target_orient_tool = self._env.get_context()
self.target_euler_ref = transforms3d.euler.quat2euler(self.target_orient_ref, 'sxyz')
self.target_euler = transforms3d.euler.quat2euler(self.target_ori, 'sxyz')
# print("target_pos :", self.target_pos, "target_euler :", np.array(self.target_euler))
# print("reference_target_pos :", self.target_pos_ref,
# "reference_target_euler :", self.target_euler_ref)
# self.delta_pos = self.target_pos_ref + self.tool_delta_pos - self.tool_pos
# self.target_pos_ref_new, self.target_orient_ref_new = p.multiplyTransforms(
# self.target_pos_ref, self.target_orient_ref,
# self.ee_pos - self.tool_pos, [0, 0, 0, 1], physicsClientId=self._env.id)
# self.tool_delta_pos, desired_ori = p.multiplyTransforms(
# self.ee_pos, self.ee_ori,
# self.ee_pos - self.tool_pos, [0, 0, 0, 1], physicsClientId=self._env.id)
# + self.tool_delta_pos
# print("tool_delta_pos :", self.tool_delta_pos, np.array(p.getEulerFromQuaternion(np.array(desired_ori), physicsClientId=self._env.id)))
# self.initial_tool_delta_ori = np.array([-self.target_euler_ref[0], -self.target_euler_ref[1], self.target_euler_ref[2] - 1.57])
# self.initial_tool_delta_ori = np.array([self.target_euler_ref[1], self.target_euler_ref[0], 0.0])
self.pybullet_ori_euler = np.array(
p.getEulerFromQuaternion(np.array(self.target_orient_tool), physicsClientId=self._env.id))
# print("pybullet_euler :", self.pybullet_ori_euler)
# self.delta_pos = self.target_pos_tool - self.ee_pos + self.tool_delta_pos
self.delta_pos = self.target_pos_tool - self.ee_pos + self.delta_pose_params[:3]
# self.delta_pos = self.target_pos_ref + (self.tool_delta_pos - self.ee_pos) - self.ee_pos
self.mujoco_target_pos = self.mujoco_ee_pose[:3] + self.delta_pos
# self.mujoco_target_euler = \
# self._desired_ori(self.mujoco_ee_pose[3:],
# self.pybullet_ori_euler + self.tool_delta_ori)
self.mujoco_target_euler = \
self._desired_ori(self.mujoco_ee_pose[3:],
self.pybullet_ori_euler + self.delta_pose_params[3:])
# self.delta_pos_1 = self.target_pos_ref - self.ee_pos + self.tool_delta_pos
#
# # self.delta_pos = self.target_pos_ref + (self.tool_delta_pos - self.ee_pos) - self.ee_pos
# self.mujoco_target_pos_1 = self.mujoco_ee_pose[:3] + self.delta_pos_1
# self.mujoco_target_euler_1 = self._desired_ori(self.mujoco_ee_pose[3:], np.array([0.0, 0.0, 0.0]))
def read_context(self):
"""
return context and target
"""
self.target_pos, self.target_ori, _, _, _, _ = self._env.get_context()
self.target_euler = transforms3d.euler.quat2euler(self.target_ori, 'sxyz')
# self.context = np.concatenate((self.target_pos, self.target_ori), axis=0)
self.context = np.concatenate((self.delta_pos, self.target_ori), axis=0)
return self.context
def send_movement(self, params):
"""
send parameters
"""
self.send_params(params)
reward, context, info = self.run_single_trajectory(n=1)
self.get_ee_state()
# print("error_pos :", self.tool_pos - self.initial_spoon_pos)
# print("Final dist state :", self.mujoco_model.get_state_dist())
return reward, context, info
def set_waypoints(self, way_points_list=None, target_pos=None, target_euler=None):
"""
set way points
"""
# print("Mujoco_target_pos :", target_pos, 'target_euler :', target_euler)
if way_points_list is None:
self.target_pos_list = np.tile(target_pos, (self.mujoco_model.num_way_points, 1))
self.target_euler_list = np.tile(target_euler, (self.mujoco_model.num_way_points, 1))
self.mujoco_model.set_waypoints(self.target_pos_list, self.target_euler_list)
def send_params(self, params):
"""
set relations
"""
self.mujoco_model.set_impedance_params(params[:self.num_way_points * self.action_dim])
# print("params :", params[-self.action_dim:])
# self.delta_pose_params = params[-self.action_dim:] * self.delta_pose_scale[-self.action_dim:] \
# + self.delta_pose_low[-self.action_dim:]
# print("delta_pose_params :", self.delta_pose_params)
def run_single_trajectory(self, n=None):
average_reward = 0.0
info = None
context = None
self.done = False
self.episode_number += 1
self.episode_timesteps = 0
self.episode_reward = 0.0
for i in range(n):
obs = self.reset()
context = self.read_context()
joint_last = obs['robot_joint_angles'].copy()
while True:
robot_action = np.zeros(6)
joint = self.mujoco_model.step(robot_action)
# control human or not
human_action = np.zeros(self._env.action_human_len)
action = {'robot': joint.copy() - joint_last, 'human': human_action}
# env.action_space_human.sample()
# joint_list.append(joint[0].copy())
# print("action :", action)
np.putmask(action['robot'], action['robot'] > 3.14, 0.0)
np.putmask(action['robot'], action['robot'] < -3.14, 0.0)
obs, reward, self.done, info = self._env.step(action)
# obs, reward, self.done, info = self._env.step(action, joint)
# print("done robot :", self.done['robot'])
# print("done human :", self.done['human'])
joint_last = obs['robot_joint_angles'].copy()
# print("joint last :", joint_last)
# done_bool = 0 if self.episode_timesteps - 1 == self.max_episode_steps else float(self.done['robot'])
if self.render:
self._env.render()
self.episode_reward += reward['robot']
# self.episode_reward += reward['human']
self.episode_timesteps += 1
self.total_timesteps += 1
if self.mujoco_model.get_state_dist() < 0.006:
break
# print("mujoco_dist :", self.mujoco_model.get_state_dist())
if self.done['__all__'] or info['robot']['task_success']:
break
print("episode_reward :", self.episode_reward)
# if self.done or self.episode_timesteps == self.max_episode_steps - 1:
# average_reward += self.episode_reward
# else:
# average_reward += -300.0
average_reward += self.episode_reward
if info['robot']['task_success']:
average_reward += 100
else:
average_reward += -100.0
# print('joint :', joint.copy())
# print('final :', obs['robot_joint_angles'].copy())
print("Success :", info['robot']['task_success'], "Episode timesteps :", self.episode_timesteps,
"Reward :", np.around(average_reward / n, 4) * self.reward_scale)
self.episode_number += 1
self.episode_timesteps = 0
self.episode_reward = 0.0
return np.around(average_reward / n, 4) * self.reward_scale, context, info['robot']
def get_demonstrations(self, num_traj=50):
"""
generate demonstration samples
"""
params_list = np.random.uniform(0, 1, size=(num_traj, self._latent_parameter_dim)) \
* (np.array(self._latent_parameter_high) - np.array(self._latent_parameter_low)) \
+ np.array(self._latent_parameter_low)
# # params_list = np.random.uniform(0, 1, size=(num_traj, self._env.latent_parameter_dim))
print("params_list :", params_list)
reward_list = []
context_list = []
for i in range(num_traj):
# context, _, _, _ = self._env.get_context()
params = params_list[i, :]
reward, context, info = self.send_movement(params)
context_list.append(context.tolist())
# print("info :", info)
reward_list.append(np.copy(reward))
return np.hstack((np.array(context_list), params_list)), np.array(reward_list)
# return np.hstack((np.array(context_list), params_list)), reward_list
# return [np.concatenate((np.array(context_list), params_list), axis=0)]
def get_context_dim(self):
return self._context_dim
def get_impedance_dim(self):
return self._latent_parameter_dim
def _desired_ori(self, current_euler, rot_euler):
# convert axis-angle value to rotation matrix
# quat_error = trans.axisangle2quat(rot_euler)
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
rotation_mat_error = transforms3d.euler.euler2mat(rot_euler[0], rot_euler[1], rot_euler[2], 'sxyz')
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
curr_mat = transforms3d.euler.euler2mat(current_euler[0], current_euler[1], current_euler[2], 'sxyz')
goal_orientation = np.dot(rotation_mat_error, curr_mat)
# return trans.mat2euler(goal_orientation, 'sxyz')
return transforms3d.euler.mat2euler(goal_orientation, 'sxyz')
class AssistiveEnv(TaskInterface):
"""
Interface with MujocoEnv
"""
def __init__(self, args, env, set_params):
super(AssistiveEnv).__init__()
self.args = args
self._env = env
self.mujoco_model = Mujoco_model(set_params["controller_options"], **set_params["mujoco_options"])
self._env_name = args.env
if self.args.video_record:
fps = int(set_params["mujoco_options"]["frame_skip"])
self.video_writer = imageio.get_writer("{}.mp4".format(self.args.video_path), fps=fps)
self._env.setup_camera(camera_width=1920//2, camera_height=1080//2)
self.total_timesteps = 0
self.episode_timesteps = 0
self.episode_number = 0
self.episode_reward = 0
self.reward_scale = 0.01
self.render = set_params["env_options"]["render"]
self.max_episode_steps = set_params["alg_options"]["max_episode_steps"]
self._state_dim = set_params["alg_options"]['context_dim']
self._context_dim = set_params["alg_options"]['context_dim']
self._latent_parameter_dim = set_params["alg_options"]['parameter_dim']
self.action_dim = set_params["alg_options"]['action_dim']
self.num_way_points = set_params["controller_options"]["num_waypoints"]
self._latent_parameter_high = set_params["controller_options"]['stiffness_high']
self._latent_parameter_low = set_params["controller_options"]['stiffness_low']
self.tool_delta_pos = np.array(set_params["env_options"]["tool_delta_pos"][self._env_name])
self.tool_delta_ori = np.array(set_params["env_options"]["tool_delta_ori"][self._env_name])
self.delta_pose_high = self.mujoco_model.delta_pose_high
self.delta_pose_low = self.mujoco_model.delta_pose_low
self.delta_pose_scale = self.mujoco_model.delta_pose_scale
self.delta_pose_params = np.zeros(self.action_dim)
self.context = None
self.target_pos = None
self.target_ori = None
self.target_euler = None
self.target_pos_list = np.zeros((self.mujoco_model.num_way_points, 3))
self.target_euler_list = np.zeros((self.mujoco_model.num_way_points, 3))
def reset(self):
"""
get initial context
"""
print("Environment Reset !!!")
print("+" * 50)
if self.render:
self._env.render()
observation = self._env.reset()
# reset joints and way points
self.mujoco_ee_pose = self.mujoco_model.reset(observation['robot_joint_angles'])
self.get_ee_state()
self.target_pos_list[0] = self.mujoco_target_pos
self.target_pos_list[1] = self.mujoco_target_pos_1
self.target_euler_list[0] = self.mujoco_target_euler
self.target_euler_list[1] = self.mujoco_target_euler_1
self.set_waypoints(way_points_list=1,
target_pos=self.target_pos_list,
target_euler=self.target_euler_list)
self.read_context()
return observation
def get_ee_state(self):
self.tool_pos, self.tool_orient = self._env.get_tool_pose()
self.tool_euler = transforms3d.euler.quat2euler(self.tool_orient, 'sxyz')
print("tool_pos :", self.tool_pos, "tool_euler :", np.array(self.tool_euler),
np.array(p.getEulerFromQuaternion(np.array(self.tool_orient), physicsClientId=self._env.id)))
self.ee_pos, self.ee_ori = self._env.robot.get_ee_pose()
self.ee_euler = transforms3d.euler.quat2euler(self.ee_ori, 'sxyz')
print("ee_pos :", self.ee_pos, "ee_euler :", np.array(self.ee_euler),
np.array(p.getEulerFromQuaternion(np.array(self.ee_ori), physicsClientId=self._env.id)))
self.mujoco_model_ee = self.mujoco_model.get_ee_pose()
print("mujoco_model_ee :", np.array(self.mujoco_model_ee))
self.target_pos, self.target_ori, \
self.target_pos_ref, self.target_orient_ref, \
self.target_pos_tool, self.target_orient_tool = self._env.get_context()
self.target_euler_ref = transforms3d.euler.quat2euler(self.target_orient_ref, 'sxyz')
self.target_euler = transforms3d.euler.quat2euler(self.target_ori, 'sxyz')
print("target_pos :", self.target_pos, "target_euler :", np.array(self.target_euler))
print("reference_target_pos :", self.target_pos_ref,
"reference_target_euler :", self.target_euler_ref)
# self.delta_pos = self.target_pos_ref + self.tool_delta_pos - self.tool_pos
# self.target_pos_ref_new, self.target_orient_ref_new = p.multiplyTransforms(
# self.target_pos_ref, self.target_orient_ref,
# self.ee_pos - self.tool_pos, [0, 0, 0, 1], physicsClientId=self._env.id)
# self.tool_delta_pos, desired_ori = p.multiplyTransforms(
# self.ee_pos, self.ee_ori,
# self.ee_pos - self.tool_pos, [0, 0, 0, 1], physicsClientId=self._env.id)
# + self.tool_delta_pos
# print("tool_delta_pos :", self.tool_delta_pos, np.array(p.getEulerFromQuaternion(np.array(desired_ori), physicsClientId=self._env.id)))
# self.initial_tool_delta_ori = np.array([-self.target_euler_ref[0], -self.target_euler_ref[1], self.target_euler_ref[2] - 1.57])
# self.initial_tool_delta_ori = np.array([self.target_euler_ref[1], self.target_euler_ref[0], 0.0])
self.pybullet_ori_euler = np.array(p.getEulerFromQuaternion(np.array(self.target_ori),
physicsClientId=self._env.id))
# self.delta_pos = self.target_pos_tool - self.ee_pos + self.tool_delta_pos
# self.mujoco_target_pos = self.mujoco_ee_pose[:3] + self.delta_pos
# self.mujoco_target_euler = self._desired_ori(self.mujoco_ee_pose[3:],
# self.pybullet_ori_euler + self.tool_delta_ori)
# self.delta_pos_1 = self.target_pos_ref - self.ee_pos + self.tool_delta_pos
# self.mujoco_target_pos_1 = self.mujoco_ee_pose[:3] + self.delta_pos_1
# self.mujoco_target_euler_1 = self._desired_ori(self.mujoco_ee_pose[3:],
# np.array([0.0, 0.0, 0.0]))
self.tool_delta_pos += self.delta_pose_params[:3]
self.tool_delta_ori += self.delta_pose_params[3:]
self.delta_pos = self.target_pos_tool - self.tool_pos + self.tool_delta_pos
self.mujoco_target_pos = self.mujoco_ee_pose[:3] + self.delta_pos
self.mujoco_target_euler = self._desired_ori(self.mujoco_ee_pose[3:],
self.pybullet_ori_euler + self.tool_delta_ori)
self.delta_pos_1 = self.target_pos_ref - self.tool_pos + self.tool_delta_pos
self.mujoco_target_pos_1 = self.mujoco_ee_pose[:3] + self.delta_pos_1
self.mujoco_target_euler_1 = self._desired_ori(self.mujoco_ee_pose[3:],
np.array([0.0, 0.0, 0.0]))
def read_context(self):
"""
return context and target
"""
self.target_pos, self.target_ori, _, _, _, _ = self._env.get_context()
self.target_euler = transforms3d.euler.quat2euler(self.target_ori, 'sxyz')
# self.context = np.concatenate((self.target_pos, self.target_ori), axis=0)
self.context = np.concatenate((self.delta_pos, self.target_ori), axis=0)
return self.context
def send_movement(self, params):
"""
send parameters
"""
self.send_params(params)
reward, context, info = self.run_single_trajectory(n=1)
self.get_ee_state()
return reward, context, info
def set_waypoints(self, way_points_list=None, target_pos=None, target_euler=None):
"""
set way points
"""
# print("Mujoco_target_pos :", target_pos, 'target_euler :', target_euler)
if way_points_list is None:
self.target_pos_list = np.tile(target_pos, (self.mujoco_model.num_way_points, 1))
self.target_euler_list = np.tile(target_euler, (self.mujoco_model.num_way_points, 1))
self.mujoco_model.set_waypoints(self.target_pos_list, self.target_euler_list)
def send_params(self, params):
"""
set relations
"""
self.mujoco_model.set_impedance_params(params[:self.num_way_points * self.action_dim + self.num_way_points - 1])
self.delta_pose_params = params[-self.action_dim:]
# self.delta_pose_params = params[-self.action_dim:] * self.delta_pose_scale[-self.action_dim:] \
# + self.delta_pose_low[-self.action_dim:]
# print("delta_pose_params :", self.delta_pose_params)
def run_single_trajectory(self, n=None):
average_reward = 0.0
info = None
context = None
self.done = False
self.episode_number += 1
self.episode_timesteps = 0
self.episode_reward = 0.0
for i in range(n):
obs = self.reset()
context = self.read_context()
joint_last = obs['robot_joint_angles'].copy()
while True:
robot_action = np.zeros(6)
joint = self.mujoco_model.step(robot_action)
# control human or not
human_action = np.zeros(self._env.action_human_len)
action = {'robot': joint.copy() - joint_last, 'human': human_action}
# env.action_space_human.sample()
# joint_list.append(joint[0].copy())
# print("action :", action)
np.putmask(action['robot'], action['robot'] > 3.14, 0.0)
np.putmask(action['robot'], action['robot'] < -3.14, 0.0)
obs, reward, self.done, info = self._env.step(action)
# obs, reward, self.done, info = self._env.step(action, joint)
# print("done robot :", self.done['robot'])
# print("done human :", self.done['human'])
joint_last = obs['robot_joint_angles'].copy()
# print("joint last :", joint_last)
# done_bool = 0 if self.episode_timesteps - 1 == self.max_episode_steps else float(self.done['robot'])
if self.args.video_record:
img, _ = self._env.get_camera_image_depth()
self.video_writer.append_data(img)
if self.render:
self._env.render()
self.episode_reward += reward['robot']
# self.episode_reward += reward['human']
self.episode_timesteps += 1
self.total_timesteps += 1
if self.mujoco_model.get_state_dist() < 0.006:
break
# print("mujoco_dist :", self.mujoco_model.get_state_dist())
if self.done['__all__'] or info['robot']['task_success']:
break
print("episode_reward :", self.episode_reward)
# if self.done or self.episode_timesteps == self.max_episode_steps - 1:
# average_reward += self.episode_reward
# else:
# average_reward += -300.0
average_reward += self.episode_reward
if info['robot']['task_success']:
average_reward += 100
else:
average_reward += -100.0
# print('joint :', joint.copy())
# print('final :', obs['robot_joint_angles'].copy())
print("Success :", info['robot']['task_success'], "Episode timesteps :", self.episode_timesteps,
"Reward :", np.around(average_reward / n, 4) * self.reward_scale)
self.episode_number += 1
self.episode_timesteps = 0
self.episode_reward = 0.0
return np.around(average_reward / n, 4) * self.reward_scale, context, info['robot']
def get_demonstrations(self, num_traj=50):
"""
generate demonstration samples
"""
params_list = np.random.uniform(0, 1, size=(num_traj, self._latent_parameter_dim)) \
* (np.array(self._latent_parameter_high) - np.array(self._latent_parameter_low)) \
+ np.array(self._latent_parameter_low)
# # params_list = np.random.uniform(0, 1, size=(num_traj, self._env.latent_parameter_dim))
print("params_list :", params_list)
reward_list = []
context_list = []
success_list = []
for i in range(num_traj):
# context, _, _, _ = self._env.get_context()
params = params_list[i, :]
reward, context, info = self.send_movement(params)
context_list.append(context.tolist())
reward_list.append(np.copy(reward))
success_list.append(info['task_success'])
return np.hstack((np.array(context_list), params_list)), reward_list, success_list
# return np.hstack((np.array(context_list), params_list)), reward_list
# return [np.concatenate((np.array(context_list), params_list), axis=0)]
def get_context_dim(self):
return self._context_dim
def get_impedance_dim(self):
return self._latent_parameter_dim
def _desired_ori(self, current_euler, rot_euler):
# convert axis-angle value to rotation matrix
# quat_error = trans.axisangle2quat(rot_euler)
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
rotation_mat_error = transforms3d.euler.euler2mat(rot_euler[0], rot_euler[1], rot_euler[2], 'sxyz')
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
curr_mat = transforms3d.euler.euler2mat(current_euler[0], current_euler[1], current_euler[2], 'sxyz')
goal_orientation = np.dot(rotation_mat_error, curr_mat)
# return trans.mat2euler(goal_orientation, 'sxyz')
return transforms3d.euler.mat2euler(goal_orientation, 'sxyz')
class Mujoco_model():
def __init__(self, controller_options, render=True, frame_skip=10, ratio=2.0,
stiff_scale=10, params_fixed=True):
model_path = os.path.join(controller_options['model_root'], controller_options['model_path'])
model = mujoco_py.load_model_from_path(model_path)
self.sim = mujoco_py.MjSim(model)
self.controller = iMOGVIC(self.sim, **controller_options)
self.action_dim = self.controller.action_dim
self.render = render
if self.render:
self.viewer = mujoco_py.MjViewer(self.sim)
self.num_way_points = controller_options['num_waypoints']
self.stiffness_initial = controller_options['stiffness_initial']
self.weight_initial = controller_options['weight_initial']
self.frame_skip = frame_skip
self.ratio = ratio
self.stiffness_scale = stiff_scale
self.params_fixed = params_fixed
self.delta_pose_high = controller_options['stiffness_high']
self.delta_pose_low = controller_options['stiffness_low']
self.delta_pose_scale = np.array(self.delta_pose_high) - np.array(self.delta_pose_low)
# way points
self.pos_set_list = np.zeros((self.num_way_points, 3))
self.euler_set_list = np.zeros((self.num_way_points, 3))
def reset(self, initial_angles):
"""
reset robot joints
"""
# self.controller.update_initial_joints(initial_angles)
self.sim_state = self.sim.get_state()
self.sim_state.qpos[:7] = initial_angles
qvel = np.zeros(7)
self.sim_state.qvel[:7] = qvel
self.sim.set_state(self.sim_state)
self.sim.forward()
self.controller.update_state()
if self.render:
self.viewer.render()
render_frame(self.viewer, self.controller.ee_pose[:3], self.controller.ee_pose[3:])
return self.controller.ee_pose
def step(self, action):
dt = self.sim.model.opt.timestep
for _ in range(self.frame_skip):
# update controller via imogic
torque, V, pose_err, vel_err, stiffness_eqv, damping_eqv = self.controller.update_vic_torque()
self.sim.data.ctrl[:7] = np.clip(torque[:7], -100, 100)
self.sim.step()
self.sim.forward()
# render_frame(viewer, pos_set_list[0, :], quat_set_list[0, :])
if self.render:
self.viewer.render()
render_frame(self.viewer, self.pos_set_list[0, :], self.euler_set_list[0, :])
return self.controller.get_robot_joints()[0]
def set_waypoints(self, target_pos_list, target_euler_list):
"""
target_pose:
"""
self.pos_set_list = target_pos_list
self.euler_set_list = target_euler_list
way_points_list = np.concatenate((self.pos_set_list, self.euler_set_list), axis=1)
self.controller.set_way_points(way_points_list)
def set_impedance_params(self, params):
"""
stiffness, damping and weight
"""
if self.params_fixed:
print("params_fixed :")
stiffness_params = self.stiffness_initial
weight = self.weight_initial
params = np.concatenate((stiffness_params, weight), axis=0)
stiffness_list, damping_list, weight_list = self.send_params(params)
else:
stiffness_list, damping_list, weight_list = self.send_params(params)
self.controller.set_params_direct(stiffness_list, damping_list, weight_list)
def send_params(self, params):
"""
set relations
"""
params = np.clip(params, 0, 1)
stiffness_list = params[:self.num_way_points * self.action_dim].reshape(
self.num_way_points, -1) * self.stiffness_scale
print("stiffness_list :", stiffness_list)
damping_list = self.ratio * np.sqrt(stiffness_list)
weight_list = np.ones(self.num_way_points)
weight_list[1:] = params[self.num_way_points * self.action_dim:]
return stiffness_list, damping_list, weight_list
def get_joint_pos(self):
return self.controller.get_robot_joints()[0]
def get_ee_pose(self):
return self.controller.ee_pose
def get_state_dist(self):
return np.linalg.norm(self.controller.state, ord=2)
class AssistiveDRL(object):
def __init__(self, env, set_params, logdir):
super(AssistiveDRL).__init__()
self.logdir = logdir
self.writer = FileWriter(logdir)
self.render = set_params["render"]
self._env = env
self.mujoco_model = Mujoco_RL_model(set_params["controller_options"],
**set_params["mujoco_options"])
self.keys = set_params["controller_options"]["obs_keys"]
self.state_dim = self._env.obs_robot_len + 7
print("state_dim :", self.state_dim)
self.observation_space = spaces.Box(low=np.array([-1000.0] * self.state_dim, dtype=np.float32),
high=np.array([1000.0] * self.state_dim, dtype=np.float32),
dtype=np.float32)
self.action_dim = set_params["controller_options"]["action_dim"]
self.action_space = spaces.Box(low=np.array([-1.0] * self.action_dim, dtype=np.float32),
high=np.array([1.0] * self.action_dim, dtype=np.float32),
dtype=np.float32)
self.env_name = set_params["env"]
self.max_episode_steps = set_params["env_options"]['max_episode_steps'][self.env_name]
self.metadata = None
self.reward_range = (-float(100), float(100))
self.total_steps = 0
self.episode_reward = 0.0
def reset(self):
"""
get initial context
"""
if self.render:
self._env.render()
observation = self._env.reset()
# print('Observation :', observation['robot'].shape)
# print('robot_joint_angles :', observation['robot_joint_angles'])
self.last_joint = observation['robot_joint_angles']
# set way points : target pose
start_pos, start_ori = self._env.robot.get_ee_pose()
start_euler = transforms3d.euler.quat2euler(start_ori, 'sxyz')
# print("Pybullet start_pos :", start_pos, "start_euler :", start_euler)
# reset joints
mujoco_ee_pose = self.mujoco_model.reset(observation['robot_joint_angles'])
# print("Mujoco_ee_pos :", mujoco_ee_pose)
self.episode_steps = 0
self.episode_reward = 0.0
return self._flatten_obs(observation)
def step(self, action):
self.get_ee_state()
target_ee_ori_mat = self._desired_ori(current_euler=self.ee_pose[3:], rot_euler=action[3:])
joint = self.mujoco_model.step(action, set_ori=target_ee_ori_mat)
human_action = np.zeros(self._env.action_human_len)
action = {'robot': joint.copy() - self.last_joint, 'human': human_action} # env.action_space_human.sample()
# print("action :::", action)
np.putmask(action['robot'], action['robot'] > 3.14, 0.0)
np.putmask(action['robot'], action['robot'] < -3.14, 0.0)
observation, reward, done, info = self._env.step(action)
if self.render:
self._env.render()
self.last_joint = observation['robot_joint_angles'].copy()
reward, done, info = self._flatten_reward(reward, done, info)
self.episode_steps += 1
self.total_steps += 1
self.episode_reward += reward
# self.episode_steps > self.max_episode_steps
if done or info['task_success']:
done = True
if info['task_success']:
reward += 10
self.episode_reward += reward
else:
reward += -10
self.episode_reward += reward
if done:
self.writer.add_scalar('train_episode_reward', self.episode_reward, self.total_steps)
self.writer.add_scalar('success_rate', info['task_success'], self.total_steps)
self.episode_reward = 0
# Clear the episode_info dictionary
self.episode_info = dict()
return self._flatten_obs(observation), reward, done, info
def get_ee_state(self):
self.ee_pose = self.mujoco_model.get_ee_pose()
self.ee_pos = self.ee_pose[:3]
self.ee_ori_mat = transforms3d.euler.euler2mat(self.ee_pose[3], self.ee_pose[4], self.ee_pose[5])
def view_render(self):
self._env.render()
def _flatten_obs(self, obs_dict):
"""
Filters keys of interest out and concatenate the information.
"""
ob_lst = []
ob_lst.append(obs_dict["robot"])
ob_lst.append(np.cos(obs_dict["robot_joint_angles"]))
ob_lst.append(np.sin(obs_dict["robot_joint_angles"]))
# print(np.sin(obs_dict["robot_joint_angles"]))
# for key in self.keys:
# if key in obs_dict:
# ob_lst.append(np.array(obs_dict[key]).flatten())
return np.concatenate(ob_lst)
def _flatten_reward(self, reward, done, info):
# {'robot': reward, 'human': reward}, {'robot': done, 'human': done, '__all__': done}, {'robot': info, 'human': info}
return reward['robot'], done['__all__'], info['robot']
def _desired_ori(self, current_euler, rot_euler):
# convert axis-angle value to rotation matrix
# quat_error = trans.axisangle2quat(rot_euler)
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
rotation_mat_error = transforms3d.euler.euler2mat(rot_euler[0], rot_euler[1], rot_euler[2], 'sxyz')
# rotation_mat_error = trans.quat2mat(quat_error)
# curr_mat = trans.euler2mat(current_euler)
curr_mat = transforms3d.euler.euler2mat(current_euler[0], current_euler[1], current_euler[2], 'sxyz')
goal_orientation = np.dot(rotation_mat_error, curr_mat)
# return trans.mat2euler(goal_orientation, 'sxyz')
# return transforms3d.euler.mat2euler(goal_orientation, 'sxyz')
return goal_orientation
class Mujoco_RL_model():
def __init__(self,
controller_options,
render=True,
frame_skip=10,
ratio=2.0,
stiff_scale=10,
params_fixed=True):
model_path = os.path.join(controller_options['model_root'], controller_options['model_path'])
model = mujoco_py.load_model_from_path(model_path)
self.sim = mujoco_py.MjSim(model)
self.controller_name = controller_options['controller_name']
controller_path = os.path.join('/home/zhimin/code/5_thu/rl-robotic-assembly-control/envs/robosuite/robosuite/',
'controllers/config/{}.json'.format(self.controller_name.lower()))
self.controller_config = load_controller_config(custom_fpath=controller_path)
self.controller_config['sim'] = self.sim
self.controller_config["eef_name"] = "ee_site"
self.controller_config['robot_joints'] = controller_options["controlled_joints"]
self._get_index(self.controller_config['robot_joints'])
self.controller_config["joint_indexes"] = {
"joints": self._ref_joint_indexes,
"qpos": self._ref_joint_pos_indexes,
"qvel": self._ref_joint_vel_indexes
}
self.controller_config["impedance_mode"] = controller_options["impedance_mode"]
self.controller_config["kp_limits"] = controller_options["kp_limits"]
self.controller_config["damping_limits"] = [0, 10]
self.controller_config["actuator_range"] = self._torque_limits()
self.controller_config["policy_freq"] = 20
self.controller = controller_factory(self.controller_name, self.controller_config)
self.action_dim = controller_options['action_dim']
self.frame_skip = frame_skip
self.render = render
if self.render:
self.viewer = mujoco_py.MjViewer(self.sim)
# self.num_way_points = controller_options['num_waypoints']
# self.stiffness_initial = controller_options['stiffness_initial']
# self.weight_initial = controller_options['weight_initial']
# self.ratio = ratio
# self.stiffness_scale = stiff_scale
# self.params_fixed = params_fixed
def reset(self, initial_angles):
"""
reset robot joints
"""
# self.controller.update_initial_joints(initial_angles)
self.sim_state = self.sim.get_state()
self.sim_state.qpos[:7] = initial_angles
qvel = np.zeros(7)
self.sim_state.qvel[:7] = qvel
self.sim.set_state(self.sim_state)
self.sim.forward()
self.controller.update(force=True)
self.controller.reset_goal()
if self.render:
self.viewer.render()
self.episode_step = 0
return self.get_ee_pose()
def step(self, action, set_pos=None, set_ori=None):
dt = self.sim.model.opt.timestep
self._set_goal(action, set_pos=set_pos, set_ori=set_ori)
for _ in range(self.frame_skip):
# print("set_pos", self.controller.goal_pos, "set_ori", self.controller.goal_ori)
torques = self._control(action, set_pos=set_pos, set_ori=set_ori)
# print("torques :", torques)
self.sim.data.ctrl[:7] = torques[:7]
self.sim.step()
self.sim.forward()
# render_frame(viewer, pos_set_list[0, :], quat_set_list[0, :])
if self.render:
self.viewer.render()
# print("dist :", np.linalg.norm(self.controller.state, ord=2))
return self.get_joint_state()
def _set_goal(self, action, set_pos=None, set_ori=None):
# clip actions into valid range
assert len(action) == self.action_dim, \
"environment got invalid action dimension -- expected {}, got {}".format(
self.action_dim, len(action))
arm_action = action
# Update the controller goal if this is a new policy step
self.controller.set_goal(arm_action, set_pos=set_pos, set_ori=set_ori)
def _control(self, action, set_pos=None, set_ori=None):
# # clip actions into valid range
# assert len(action) == self.action_dim, \
# "environment got invalid action dimension -- expected {}, got {}".format(
# self.action_dim, len(action))
#
# arm_action = action
#
# # Update the controller goal if this is a new policy step
# self.controller.set_goal(arm_action, set_pos=set_pos, set_ori=set_ori)
# Now run the controller for a step
torques = self.controller.run_controller()
# print("torques :", torques)
# Clip the torques
low, high = self._torque_limits()
self.torques = np.clip(torques, low, high)
# # Apply joint torque control
# self.sim.data.ctrl[self._ref_joint_actuator_indexes] = self.torques
return self.torques
def get_joint_state(self):
# , self.controller.joint_vel
self.controller.update()
return self.controller.joint_pos
def get_ee_pose(self):
self.ee_pos = self.controller.ee_pos
self.ee_ori_euler = transforms3d.euler.mat2euler(self.controller.ee_ori_mat, 'sxyz')
return np.hstack([self.ee_pos, self.ee_ori_euler])
def _get_index(self, robot_joints):
# indices for joint indexes
self._ref_joint_indexes = [
self.sim.model.joint_name2id(joint) for joint in robot_joints
]
self._ref_joint_pos_indexes = [
self.sim.model.get_joint_qpos_addr(x) for x in robot_joints
]
self._ref_joint_vel_indexes = [
self.sim.model.get_joint_qvel_addr(x) for x in robot_joints
]
self._ref_joint_actuator_indexes = [
self.sim.model.actuator_name2id(actuator)
for actuator in robot_joints
]
def _torque_limits(self):
"""
Torque lower/upper limits per dimension.
Returns:
2-tuple:
- (np.array) minimum (low) torque values
- (np.array) maximum (high) torque values
"""
# Torque limit values pulled from relevant robot.xml file
low = self.sim.model.actuator_ctrlrange[self._ref_joint_actuator_indexes, 0]
high = self.sim.model.actuator_ctrlrange[self._ref_joint_actuator_indexes, 1]
return low, high
def viewer_render(self):
self.viewer.render()
class MujocoEnv(TaskInterface):
"""
Interface with MujocoEnv
"""
def __init__(self, env, params):
super(MujocoEnv).__init__()
self._env = env
self.total_timesteps = 0
self.episode_timesteps = 0
self.episode_number = 0
self.episode_reward = 0
self.reward_scale = params['alg_options']['reward_scale']
self.stiffness_scale = params['alg_options']['stiffness_scale']
self.ratio = params['alg_options']['ratio']
self.max_episode_steps = self._env._max_episode_steps
self._state_dim = self._env.context_dim
self._context = None
self.num_way_points = self._env.num_waypoints
self.action_dim = self._env.action_dim
self.target_pos = None
self.target_quat = None
self.target_euler = None
def get_context_dim(self):
return self._env.context_dim
def get_impedance_dim(self):
return self._env.parameter_dim
def read_context(self):
# context, target_pos, target_quat, target_euler = self._env.get_context()
# return context, target_pos, target_quat, target_euler
if self._context is not None:
return self._context
else:
exit()
def send_movement(self, params, render=False):
"""
send parameters
"""
stiffness_list, damping_list, weight_list = self.send_params(params)
# stiffness_list = stiffness_list.reshape(self.num_way_points, self.action_dim)
# damping_list = damping_list.reshape(self.num_way_points, self.action_dim)
# print("stiffness_list :", stiffness_list)
# print("damping_list :", damping_list)
# print("weight_list :", weight_list)
self._env.set_params(stiffness_list, damping_list, weight_list)
reward, context, info = self.run_single_trajectory(n=1, render=render)
return reward, context, info
def set_waypoints(self, way_points_list=None):
if way_points_list is None:
pos_set_list = np.zeros((self.num_way_points, 3))
quat_set_list = np.zeros((self.num_way_points, 3))
for i in range(self.num_way_points):
pos_set_list[i, :] = self.target_pos
quat_set_list[i, :] = self.target_euler
way_points_list = np.concatenate((pos_set_list, quat_set_list), axis=1)
self._env.set_waypoints(way_points_list)
else:
print("Please give the waypoints !!!")
def send_params(self, params):
"""
set relations
"""
params = np.clip(params, 0, 1)
# print("params :", params)
stiffness_list = params[:self.num_way_points * self.action_dim].reshape(self.num_way_points,
-1) * self.stiffness_scale
damping_list = self.ratio * np.sqrt(stiffness_list)
weight_list = params[self.num_way_points * self.action_dim:]
return stiffness_list, damping_list, weight_list
def run_single_trajectory(self, n=None, render=False):
average_reward = 0.0
info = None
for i in range(n):
self.reset()
if render:
self._env.render()
time.sleep(1.0)
# set way points
self.set_waypoints(way_points_list=None)
while self.episode_timesteps < self.max_episode_steps:
action = np.zeros(6)
new_obs, reward, self.done, info = self._env.step_imogic(action)
done_bool = 0 if self.episode_timesteps - 1 == self._env._max_episode_steps else float(self.done)
if render:
self._env.render()
self.episode_reward += reward
self.episode_timesteps += 1
self.total_timesteps += 1
if self.done:
break
if done_bool:
average_reward += self.episode_reward
else:
average_reward += -100.0
print("Done :", done_bool, "Episode timesteps :", self.episode_timesteps, "Reward :",
np.around(average_reward / n, 4) * self.reward_scale)
self.episode_number += 1
self.episode_timesteps = 0
self.episode_reward = 0.0
done_bool = 0
return np.around(average_reward / n, 4) * self.reward_scale, self._context, info
def reset(self):
"""
get context after env reset
"""
self._env.reset()
self._context, self.target_pos, self.target_quat, self.target_euler = \
self._env.get_context()
def get_demonstrations(self, num_traj=50, render=False):
"""
generate demonstration samples
"""
params_list = np.random.uniform(0, 1, size=(num_traj, self._env.latent_parameter_dim)) \
* (np.array(self._env.latent_parameter_high) - np.array(self._env.latent_parameter_low)) \
+ np.array(self._env.latent_parameter_low)
# # params_list = np.random.uniform(0, 1, size=(num_traj, self._env.latent_parameter_dim))
# print("params_list :", params_list.shape)
reward_list = []
context_list = []
for i in range(num_traj):
params = params_list[i, :]
reward, context, info = self.send_movement(params, render=render)
# context, _, _, _ = self._env.get_context()
context_list.append(context.tolist())
# print("info :", info)
reward_list.append(np.copy(reward))
return np.hstack((np.array(context_list), params_list))
# return np.hstack((np.array(context_list), params_list)), reward_list
# return [np.concatenate((np.array(context_list), params_list), axis=0)]
class PybulletEnv(TaskInterface):
def __init__(self, env, state_dim, action_dim, n_features):
super(PybulletEnv).__init__()
self._group = Group("pybullet", ["joint%d" % i for i in range(action_dim)])
self._space = ClassicSpace(self._group, n_features)
self._env = env
self.task = self._env.task
self._state_dim = state_dim
self._action_dim = action_dim
self._n_features = n_features
self._headless = True
self._env.latent_parameter_dim = self._n_features * self._action_dim
# obs_config = ObservationConfig()
# obs_config.set_all_low_dim(True)
# obs_config.set_all_high_dim(False)
# self._obs_config = obs_config
#
# action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)
# self._action_mode = action_mode
self._obs = None
self.render = True
def get_context_dim(self):
return self._state_dim
def get_impedance_dim(self):
return self._env.latent_parameter_dim
def read_context(self):
# self._env.reset()
tool_pos, tool_orient = self._env.get_tool_pose()
target_pos, target_orient, _, _, _, _ = self._env.get_context()
delta_pos = target_pos - tool_pos
return np.concatenate((delta_pos, target_orient), axis=0)
def get_demonstrations(self, num_traj=50):
# file = "parameters/%s_%d.npy" % (self.env_name, self._space.n_features)
# try:
# return np.load(file)
# except:
# raise Exception("File %s not found. Please consider running 'dataset_generator.py'" % file)
# generate random parameters and collect data
# params_list = np.random.uniform(0, 1, size=(num_traj, self._env.latent_parameter_dim)) \
# * (np.array(self._env.latent_parameter_high) - np.array(self._env.latent_parameter_low)) \
# + np.array(self._env.latent_parameter_low)
params_list = np.random.uniform(0, 1, size=(num_traj, self._n_features * self._action_dim))
print("params_list :", params_list.shape)
reward_list = []
context_list = []
duration = 5
for i in range(num_traj):
print('+' * 25, i)
self.reset()
context = self.read_context()
context_list.append(context.tolist())
params = params_list[i, :]
reward, info = self.send_movement(params, duration)
reward_list.append(np.copy(reward))
return np.hstack((np.array(context_list), params_list))
def send_movement(self, weights, duration):
mp = MovementPrimitive(self._space, MovementPrimitive.get_params_from_block(self._space, weights))
duration = 1 if duration < 0 else duration
if self._headless:
trajectory = mp.get_full_trajectory(duration=min(duration, 1), frequency=200)
else:
trajectory = mp.get_full_trajectory(duration=5 * duration, frequency=200)
# return trajectory
tot_reward = 0.
success = 0
epi_reward = 0.
info = dict()
# self._env.render()
# self._env.reset()
for a1 in trajectory.values: # a2 in zip(trajectory.values[:-1, :], trajectory.values[1:, :]):
joint = a1 # (a2-a1) * 20.
print("robot joint :", joint)
action = {'robot': joint - self.joint, 'human': np.zeros(self._env.action_human_len)} # self._env.action_space_human.sample()
obs, reward, terminate, info = self._env.step(action)
self.joint = obs['robot_joint_angles'].copy()
if self.render:
self._env.render()
if reward == 1. or terminate == 1.:
if reward == 1.:
success = 1.
break
# tot_reward, success = self._stop(action, success)
info['tot_reward'] = tot_reward
info['epi_reward'] = epi_reward
return success, info
def reset(self):
self._env.render()
self._obs = self._env.reset()
self.joint = self._obs['robot_joint_angles'].copy()
# def _stop(self, joint_gripper, previous_reward):
# if previous_reward == 0.:
# success = 0.
# for _ in range(50):
# obs, reward, terminate = self.task.step(joint_gripper)
# if reward == 1.0:
# success = 1.
# break
# return self.task._task.get_dense_reward(), success
# return self.task._task.get_dense_reward(), 1.
class RLBenchBox(TaskInterface):
def __init__(self, task_class, state_dim, n_features, headless=True):
super().__init__(n_features)
self._group = Group("rlbench", ["d%d" % i for i in range(7)] + ["gripper"])
self._space = ClassicSpace(self._group, n_features)
obs_config = ObservationConfig()
obs_config.set_all_low_dim(True)
obs_config.set_all_high_dim(False)
self._obs_config = obs_config
self._state_dim = state_dim
self._headless = headless
action_mode = ActionMode(ArmActionMode.ABS_JOINT_POSITION)
self._task_class = task_class
self._action_mode = action_mode
self.env = Environment(action_mode, "", obs_config, headless=headless)
self.env.launch()
self.task = self.env.get_task(task_class)
self._obs = None
def get_context_dim(self):
return self._state_dim
def read_context(self):
return self._obs[1].task_low_dim_state
def get_demonstrations(self):
file = "parameters/%s_%d.npy" % (self.task.get_name(), self._space.n_features)
try:
return np.load(file)
except:
raise Exception("File %s not found. Please consider running 'dataset_generator.py'" % file)
def _stop(self, joint_gripper, previous_reward):
if previous_reward == 0.:
success = 0.
for _ in range(50):
obs, reward, terminate = self.task.step(joint_gripper)
if reward == 1.0:
success = 1.
break
return self.task._task.get_dense_reward(), success
return self.task._task.get_dense_reward(), 1.
def send_movement(self, weights, duration):
mp = MovementPrimitive(self._space, MovementPrimitive.get_params_from_block(self._space, weights))
duration = 1 if duration < 0 else duration
if self._headless:
trajectory = mp.get_full_trajectory(duration=min(duration, 1), frequency=200)
else:
trajectory = mp.get_full_trajectory(duration=5 * duration, frequency=200)
tot_reward = 0.
success = 0
for a1 in trajectory.values: # , a2 in zip(trajectory.values[:-1, :], trajectory.values[1:, :]):
joint = a1 # (a2-a1)*20.
joint_gripper = joint
obs, reward, terminate = self.task.step(joint_gripper)
if reward == 1. or terminate == 1.:
if reward == 1.:
success = 1.
break
tot_reward, success = self._stop(joint_gripper, success)
return success, tot_reward
def reset(self):
self._obs = self.task.reset()
class Forward2DKinematics:
def __init__(self, d1, d2):
self._d1 = d1
self._d2 = d2
def _link(self, d):
return np.array([d, 0.])
def _rot(self, theta):
return np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
def get_forward(self, theta1, theta2):
x1 = self._rot(theta1) @ self._link(self._d1)
r1 = self._rot(theta1) @ self._rot(0.)
r2 = self._rot(theta2) @ r1
x2 = r2 @ self._link(self._d2) + x1
return x2
def get_full_forward(self, theta1, theta2):
x1 = self._rot(theta1) @ self._link(self._d1)
r1 = self._rot(theta1) @ self._rot(0.)
r2 = self._rot(theta2) @ r1
x2 = r2 @ self._link(self._d2) + x1
return x1, x2
def get_loss(self, theta1, theta2, goal):
ref = self.get_forward(theta1, theta2)
return np.mean((ref - goal)**2)
def jac(self, theta1, theta2, goal, delta=1E-5):
ref = self.get_loss(theta1, theta2, goal)
j1 = (self.get_loss(theta1 + delta, theta2, goal) - ref)/delta
j2 = (self.get_loss(theta1, theta2+delta, goal) - ref)/delta
return np.array([j1, j2])
def get_trajectory(self, theta1, theta2, goal, v=0.1):
conf = [np.array([theta1, theta2])]
for _ in range(200):
conf.append(conf[-1]-v*self.jac(conf[-1][0], conf[-1][1], goal))
return conf, [self.get_forward(c[0], c[1]) for c in conf]
class Reacher2D(TaskInterface):
def __init__(self, n_features, points=0, headless=True):
super().__init__(n_features)
self._group = Group("reacher2d", ["j%d" % i for i in range(2)])
self._space = ClassicSpace(self._group, n_features)
self._state_dim = 2
self._headless = headless
self._n_points = points
self._goals = [self._point(3/2, np.pi/8),
self._point(1., np.pi/2 + np.pi/8),
self._point(2/3, np.pi + np.pi/4),
self._point(1/2, 3/2*np.pi + np.pi/6)]
self._kinematics = Forward2DKinematics(1., 1.)
self._context = None
def _point(self, d, theta):
return d*np.array([np.cos(theta), np.sin(theta)])
def _generate_context(self, goal=None):
if self._n_points == 0:
d = np.random.uniform(0, 1)
a = np.random.uniform(-np.pi, np.pi)
return self._point(d, a)
else:
if goal is None:
k = np.random.choice(range(self._n_points))
else:
k = goal
g = self._goals[k]
d = np.random.uniform(0, 1/5)
a = np.random.uniform(-np.pi, np.pi)
return g + self._point(d, a)
def give_example(self, goal=None):
goal = self._generate_context(goal)
conf, traj = self._kinematics.get_trajectory(0., 0., goal)
return goal, conf, traj
def _generate_demo(self):
goal = self._generate_context()
conf, traj = self._kinematics.get_trajectory(0., 0., goal)
trajectory = NamedTrajectory(*self._group.refs)
for c in conf:
trajectory.notify(duration=1/100.,
j0=c[0], j1=c[1])
return goal, np.array([3.]), LearnTrajectory(self._space, trajectory).get_block_params()
def get_context_dim(self):
return self._state_dim
def read_context(self):
return self._context
def get_demonstrations(self):
return np.array([np.concatenate(self._generate_demo(), axis=0) for _ in range(100)])
def send_movement(self, weights, duration):
mp = MovementPrimitive(self._space, MovementPrimitive.get_params_from_block(self._space, weights))
duration = 1 if duration < 0 else duration
trajectory = mp.get_full_trajectory(duration=duration, frequency=200)
vals = trajectory.get_dict_values()
reward = -self._kinematics.get_loss(vals["j0"][-1], vals["j1"][-1], self._context)
return reward, reward
def reset(self):
self._context = self._generate_context()
class ObstacleRectangle:
def __init__(self, x, y, dx, dy):
self.x1 = x
self.x2 = x + dx
self.dx = dx
self.y1 = y
self.y2 = y + dy
self.dy = dy
self._patch = patches.Rectangle((x, y), dx, dy,
linewidth=1, edgecolor='r', facecolor='r')
def check_collision_point(self, point):
if self.x1 <= point[0] <= self.x2:
if self.y1 <= point[1] <= self.y2:
return True
return False
def check_collision_points(self, points):
for point in points:
if self.check_collision_point(point):
return True
return False
def draw(self, ax):
self._patch = patches.Rectangle((self.x1, self.y1), self.dx, self.dy,
linewidth=1, edgecolor='r', facecolor='r')
ax.add_patch(self._patch)
def _positive_range(angle):
ret = angle
while ret < 0.:
ret += 2 * np.pi
while ret > 2 * np.pi:
ret -= 2 * np.pi
return ret
def _2p_range(angle):
ret = angle
while ret < -np.pi:
ret += 2 * np.pi
while ret > np.pi:
ret -= 2 * np.pi
return ret
def get_angle_between(angle_1, angle_2):
_angle_1 = _positive_range(angle_1)
_angle_2 = _positive_range(angle_2)
if np.abs(_angle_1 - _angle_2) < np.pi:
return np.abs(_angle_1 - _angle_2)
else:
return 2*np.pi - np.abs(_angle_1 - _angle_2)
def get_mid_angle(angle_1, angle_2, length):
_angle_1 = _positive_range(angle_1)
_angle_2 = _positive_range(angle_2)
if np.abs(_angle_1 - _angle_2) < np.pi:
ret = _angle_1 + np.clip(_angle_2 - _angle_1, -length, length)
else:
if _angle_2 > _angle_1:
delta = get_angle_between(_angle_2, _angle_1)
delta = min(delta, length)
ret = _angle_1 - delta
else:
delta = get_angle_between(_angle_2, _angle_1)
delta = min(delta, length)
ret = _angle_1 + delta
ret = _2p_range(ret)
return ret
def sampling():
return np.random.uniform(-np.pi * np.ones(2), np.pi * np.ones(2))
# print(get_angle_between(-np.pi+0.1, np.pi-0.1))
class ObstacleReacher2d(TaskInterface):
def __init__(self, n_features, headless=True):
super().__init__(n_features)
self._group = Group("reacher2d", ["j%d" % i for i in range(2)])
self._space = ClassicSpace(self._group, n_features)
self._state_dim = 2
self._headless = headless
self._obstacle = ObstacleRectangle(0.5, 0.5, 0.25, 0.25)
self._kinematics = Forward2DKinematics(1., 1.)
self._rrt_star = RRTStar(
|
np.array([0., 0.])
|
numpy.array
|
"""
utils.py
Utiliy functions for DeepND
Bilkent University, Department of Computer Engineering
Ankara, 2020
"""
import numpy as np
import pandas as pd
from datetime import timedelta
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda as cutorch
from torch_geometric.nn import GCNConv
from torch_geometric.data import Data
import csv
import math
import time
from scipy.sparse import identity
import subprocess
def memoryUpdate(usage = 0, cached = 0):
# Memory Update!
current_usage = 0
current_cached = 0
for d in range(torch.cuda.device_count()):
current_usage += torch.cuda.max_memory_allocated(device='cuda:'+str(d))
current_cached += torch.cuda.max_memory_cached(device='cuda:'+str(d))
usage = max(usage,current_usage)
cached = max(cached, current_cached)
print("GPU Memory Usage:", usage / 10**9, "GB Used, ", cached / 10**9, "GB Cached")
return usage, cached
def weight_reset(m):
if isinstance(m, GCNConv) or isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm1d):
m.reset_parameters()
# Xavier initialization for layers
if isinstance(m, nn.Linear) or isinstance(m, GCNConv):
torch.nn.init.xavier_uniform_(m.weight)
torch.nn.init.zeros_(m.bias)
def freeze_layer(layer):
for param in layer.parameters():
param.requires_grad = False
def unfreeze_layer(layer):
for param in layer.parameters():
param.requires_grad = True
def intersect_lists(source_list, target_list, lookup):
#Intersects gene list from multple sources
source_indices = []
target_indices = []
result = []
not_found_indices = []
not_found_item = []
not_found_count = 0
for source_index, source_item in enumerate(source_list):
found = 0
for target_index, target_item in enumerate(target_list):
if source_item.lower() == target_item.lower():
source_indices.append(source_index)
target_indices.append(target_index)
result.append(target_item.lower())
found = 1
break
if found == 0:
for target_index, target_item in enumerate(target_list):
#if source_item.lower() in lookup and target_item.lower() in lookup and does_intersect(lookup[source_item.lower()], lookup[target_item.lower()]): #Dictionary search
if source_item.lower() in lookup and target_item.lower() in lookup and lookup[source_item.lower()] and lookup[target_item.lower()] and lookup[source_item.lower()][-1] == lookup[target_item.lower()][-1] : #Dictionary search
source_indices.append(source_index)
target_indices.append(target_index)
result.append(target_item.lower())
found = 1
print("Found in Dictionary!", source_item , target_item)
break
if found == 0:
not_found_indices.append(source_index)
not_found_item.append(source_item)
not_found_count += 1
#print("The gene {0} is not found. Not Found Count:{1}".format(source_item, not_found_count))
return result, source_indices, not_found_indices, target_indices
def does_intersect(source_list, target_list):
for source_item in source_list:
for target_item in target_list:
if source_item.lower() == target_item.lower():
return True
return False
def constructGeneDictionary(path):
# Constructs a dictionary for gene aliases and ids
genes = dict()
lineCount = 1
with open(path) as tsv:
for line in csv.reader(tsv, dialect = csv.excel_tab, delimiter = "\t"): #You can also use delimiter="\t" rather than giving a dialect.
if line[0] == "Approved symbol":
continue
for item in line:
if item == "":
continue
gene_item = item.split(", ")
#if lineCount == 10282:
#print(gene_item)
for comma_item in gene_item:
gene_list = []
for item2 in line:
if item2 == "":
continue
gene_item2 = item2.split(", ")
for comma_item2 in gene_item2:
if comma_item2 == comma_item:
continue
gene_list.append(comma_item2.lower())
genes[comma_item.lower()] = gene_list
lineCount += 1
return genes
def create_validation_set(g_bs_tada_intersect_indices, n_bs_tada_intersect_indices, gold_evidence, k, state):
# k for k-fold cross validation
# If another validation set is used, gene counts must be updated. This part could be done automatically
# as well by checking gene evidences and standard values from files
e1_gene_count = 0
e2_gene_count = 0
e3e4_gene_count = 0
e1_gene_indices = []
e2_gene_indices = []
e3e4_gene_indices = []
neg_gene_indices = []
pos_gold_standards = []
neg_gold_standards = []
for index,i in enumerate(gold_evidence):
if i == "E1":
e1_gene_count += 1
e1_gene_indices.append(g_bs_tada_intersect_indices[index])
elif i == "E2":
e2_gene_count += 1
e2_gene_indices.append(g_bs_tada_intersect_indices[index])
else:
e3e4_gene_count += 1
e3e4_gene_indices.append(g_bs_tada_intersect_indices[index])
for item in n_bs_tada_intersect_indices:
neg_gene_indices.append(item)
e1_fold_size = math.ceil(e1_gene_count / k)
e2_fold_size = math.ceil(e2_gene_count / k)
e3e4_fold_size = math.ceil(e3e4_gene_count / k)
neg_gene_count = len(n_bs_tada_intersect_indices)
neg_fold_size = math.ceil(neg_gene_count / k)
print("E1 Gene Count:", e1_gene_count)
print("E2 Gene Count:", e2_gene_count)
print("E3E4 Gene Count:", e3e4_gene_count)
counts = [e1_gene_count, e2_gene_count, e3e4_gene_count, neg_gene_count]
# Shuffle all genes
if state:
np.random.set_state(state)
e1_perm = np.random.permutation(e1_gene_count)
e2_perm = np.random.permutation(e2_gene_count)
e3e4_perm = np.random.permutation(e3e4_gene_count)
neg_perm = np.random.permutation(neg_gene_count)
return [e1_gene_indices, e2_gene_indices, e3e4_gene_indices, neg_gene_indices],[e1_perm, e2_perm, e3e4_perm, neg_perm], counts
def write_prediction(predictions, e1_indices, e2_indices, e3e4_indices, negative_indices, feature_names, root, task_name, trial, k, experiment, experiment_name):
fpred = open( root + experiment_name + "Exp" + str(experiment) + "/predict_" + task_name.lower() +".csv","w+")
if feature_names == "indices":
fpred.write('Index, Probability,Positive Gold Standard,Negative Gold Standard, Evidence Level\n')
for index,row in enumerate(predictions):
evidence_level = ""
if index in e1_indices:
evidence_level = "E1"
elif index in e2_indices:
evidence_level = "E2"
elif index in e3e4_indices:
evidence_level = "E3E4"
elif index in negative_indices:
evidence_level = "Negative"
fpred.write('%d,%s,%d,%d,%s\n' % (index, str(row.item()), 1 if (index in e1_indices or index in e2_indices or index in e3e4_indices) else 0, 1 if index in negative_indices else 0, evidence_level) )
else:
feature_names_file = pd.read_csv(feature_names.strip())
headers = feature_names_file.columns.tolist()
results_header = ""
format_string = ""
for header in headers:
results_header += header + ","
format_string += "%s,"
results_header += "Probability, Positive Gold Standard, Negative Gold Standard, Evidence Level\n"
format_string += "%s,%d,%d,%s"
fpred.write(results_header)
index = 0
for prediction_row, feature_names_row in zip(predictions, feature_names_file.values):
line_tuple = ()
evidence_level = ""
if index in e1_indices:
evidence_level = "E1"
elif index in e2_indices:
evidence_level = "E2"
elif index in e3e4_indices:
evidence_level = "E3E4"
elif index in negative_indices:
evidence_level = "Negative"
for i in range(len(feature_names_row)):
line_tuple += (feature_names_row[i],)
fpred.write(format_string % (line_tuple + (str(prediction_row.item()), 1 if index in e1_indices else 0, 1 if index in negative_indices else 0, evidence_level)) )
fpred.write("\n")
index += 1
fpred.close()
def write_experiment_stats (root, aucs, aupr, mmcs, experiment_name, trial, k, init_time, network_count, mode, task_names, experiment):
f = open( root + experiment_name + "Exp" + str(experiment) + "/runreport.txt","w")
f.write("Number of networks: %d\n" % network_count)
print("Number of networks:" , network_count)
if mode:
f.write("Generated test results, i.e. no training process.")
f.write("\nDone in %s hh:mm:ss.\n" % timedelta( seconds = (time.time()-init_time) ) )
for task_index in range(len(aucs)):
f.write("Task Name: %s\n" % task_names[task_index])
print("Task Name:", task_names[task_index])
f.write("-"*20+"\n")
f.write("\nMean (\u03BC) AUC of All Runs:%f\n" % np.mean(aucs[task_index]) )
print(" Mean(\u03BC) AUC of All Runs:", np.mean(aucs[task_index]) )
f.write(" \u03C3 of AUCs of All Runs:%f\n" % np.std(aucs[task_index]) )
print("\u03C3 of AUCs of All Runs:", np.std(aucs[task_index]) )
f.write(" Median of AUCs of All Runs:%f\n" % np.median(aucs[task_index]) )
print(" Median of AUCs of All Runs:", np.median(aucs[task_index]) )
print("-"*25)
f.write("\n Mean (\u03BC) AUPRCs of All Runs:%f\n" % np.mean(aupr[task_index]) )
print(" Mean(\u03BC) AUPRCs of All Runs:", np.mean(aupr[task_index]) )
f.write(" \u03C3 of AUPRCs of All Runs:%f\n" % np.std(aupr[task_index]) )
print(" \u03C3 of AUPRCs of All Runs:", np.std(aupr[task_index]) )
f.write(" Median of AUPRCs of All Runs:%f\n" % np.median(aupr[task_index]) )
print("Median of AUPRCSs of All Runs:", np.median(aupr[task_index]) )
print("-"*25)
f.write("\n Mean (\u03BC) MMCs of All Runs:%f\n" % np.mean(mmcs[task_index]) )
print(" Mean(\u03BC) MMCs of All Runs:", np.mean(mmcs[task_index]) )
f.write(" \u03C3 of MMCs of All Runs:%f\n" % np.std(mmcs[task_index]) )
print(" \u03C3 of MMCs of All Runs:", np.std(mmcs[task_index]) )
f.write(" Median of MMCs of All Runs:%f\n" % np.median(mmcs[task_index]) )
print("Median of MMCs of All Runs:", np.median(mmcs[task_index]) )
f.write("*"*80+"\n")
for j in range(len(aucs[task_index])):
f.write("%s AUC:%f\n" % (task_names[task_index], aucs[task_index][j]))
f.write("-"*20+"\n")
for j in range(len(aupr[task_index])):
f.write("%s AUPR:%f\n" % (task_names[task_index] , aupr[task_index][j]))
f.write("-"*20+"\n")
def create_network_list(networks):
network_files = []
splitted_tokens = networks.split(",")
test_network_path = "Data/"
regions = ["PFC","MDCBC","V1C","SHA"]
periods = ["1-3","2-4","3-5","4-6","5-7","6-8","7-9","8-10","9-11","10-12","11-13","12-14","13-15"]
for token in splitted_tokens:
if token.strip() == "brainspan_all":
for region in regions:
for period in periods:
network_files.append(test_network_path + region + period + "wTensor.pt")
elif token.strip() == "brainspan_no_overlap":
for region in regions:
for period in ["1-3", "4-6", "7-9", "10-12", "13-15"]:
network_files.append(test_network_path + region + period + "wTensor.pt")
else:
if token.strip().split("-")[0].isnumeric() and token.strip().split("-")[1].isnumeric():
regions = ["PFC","MDCBC","V1C","SHA"]
for region in regions:
network_files.append(test_network_path + region + token.strip() + "wTensor.pt")
elif "PFC" in token.strip() or "MDCBC" in token.strip() or "V1C" in token.strip() or "SHA" in token.strip():
network_files.append(test_network_path + token.strip() + "wTensor.pt")
else:
network_files.append(token.strip())
networks = []
for network in network_files:
networks.append(torch.load(network).type(torch.LongTensor))
return networks
def create_feature_set_list(feature_set):
feature_files = []
splitted_tokens = feature_set.split(",")
for token in splitted_tokens:
if token.strip() == "ASD":
feature_files.append("Data/ASD_TADA_Features.npy")
elif token.strip() == "ID":
feature_files.append("Data/ID_TADA_Features.npy")
elif token.strip() == "SCZ":
feature_files.append("Data/SCZ_TADA_Features.npy")
elif token.strip() == "EPI":
feature_files.append("Data/EPI_TADA_Features.npy")
elif token.strip() == "ASDID": # asd & id
feature_files.append("Data/Multi_TADA_Features.npy")
elif token.strip() == "ALL":
feature_files.append("Data/ALL_TADA_Features.npy")
elif token.strip() == "EPISCZ": # epilepsy & schzoprenia
feature_files.append("Data/Multi_EPISCZ_TADA_Features.npy")
elif token.strip() == "ASDSCZ": # asd & schzoprenia
feature_files.append("Data/Multi_ASDSCZ_TADA_Features.npy")
elif token.strip() == "ASDEPI": # asd & epilepsy
feature_files.append("Data/Multi_ASDEPI_TADA_Features.npy")
elif token.strip() == "IDSCZ": # id & schzoprenia
feature_files.append("Data/Multi_IDSCZ_TADA_Features.npy")
elif token.strip() == "IDEPI": # id & epilepsy
feature_files.append("Data/Multi_IDEPI_TADA_Features.npy")
elif token.strip() == "Krishnan":
feature_files.append("/mnt/oguzhan/oguzhan_workspace/Krishnan/KrishnanFeatures.pt")
else:
feature_files.append(token.strip())
return feature_files
def create_gt_list(root, positive_gt, negative_gt, verbose, k, state, instance_count):
gt_files = []
splitted_tokens = positive_gt.split(",")
neg_splitted_tokens = negative_gt.split(",")
all_gt_gene_indices = []
all_gt_gene_permutations = []
all_gt_gene_counts = []
all_gt_labels = []
for index,token in enumerate(splitted_tokens):
if token.strip() == "ASD":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "ASD", verbose, k, state)
elif token.strip() == "ID":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "ID", verbose, k, state)
elif token.strip() == "SCZ":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "SCZ", verbose, k, state)
elif token.strip() == "EPI":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "EPI", verbose, k, state)
elif token.strip() == "ASD_SFARI_E1E2":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "ASD_SFARI_E1E2", verbose, k, state)
elif token.strip() == "ASD_SFARI_E1E2E3":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "ASD_SFARI_E1E2E3", verbose, k, state)
elif token.strip() == "SFARI_Brueggeman":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "SFARI_Brueggeman", verbose, k, state)
elif token.strip() == "SPARK_Pilot":
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, "Data/SPARK_Pilot_Pos_Gold_Standards.csv", verbose, k, state,take_path = True,neg_file = "Data/ASD_SPARC_Neg_Gold_Standards.csv")
else:
gene_indices,gene_permutations, gene_counts, y = __match_ground_truths__(root, token.strip(), verbose, k, state,take_path = True,neg_file = neg_splitted_tokens[index])
'''
negative_gt_file = pd.read_csv(negative_gt.split(",")[index].strip()).values
positive_gt_file = pd.read_csv(token.strip()).values
if positive_gt_file.shape[1] == 2: # With evidence weights
positive_indices = np.array(positive_gt_file[:,0], dtype = np.long)
positive_evidences = positive_gt_file[:,1]
elif positive_gt_file.shape[1] == 1: #Without evidence weights
positive_indices = np.array(positive_gt_file[:,0], dtype = np.long)
positive_evidences = np.full(positive_indices.shape, "E1")
else:
print("Ground truth file ", token.strip()," is expected to have 1 or 2 columns. Please check github.com/ciceklab/DeepND for details.")
exit(0)
if negative_gt_file.shape[1] == 2: #With evidence weights
negative_indices = np.array(negative_gt_file[:,0], dtype = np.long)
negative_evidences = negative_gt_file[:,1]
elif negative_gt_file.shape[1] == 1: #Without evidence weights
negative_indices = np.array(negative_gt_file[:,0], dtype = np.long)
negative_evidences = np.full(negative_indices.shape, "E1") #NOT SUPPORTED CURRENTLY !!
else:
print("Ground truth file ", token.strip(), " is expected to have 1 or 2 columns. Please check github.com/ciceklab/DeepND for details.")
exit(0)
gene_indices, gene_permutations, gene_counts = create_validation_set(positive_indices, negative_indices, positive_evidences, k, state)
y = torch.zeros((instance_count,), dtype = torch.long)
y[positive_indices] = 1
y[negative_indices] = 0
'''
all_gt_gene_indices.append(gene_indices)
all_gt_gene_permutations.append(gene_permutations)
all_gt_gene_counts.append(gene_counts)
all_gt_labels.append(y)
return all_gt_gene_indices, all_gt_gene_permutations, all_gt_gene_counts, all_gt_labels
# Private method for gene matching. Written for ASD and ID.
def __match_ground_truths__(root,disorder_name, verbose, k, state, take_path = False, neg_file = ""):
geneDict = constructGeneDictionary(root + "Data/hugogenes_entrez.txt")
geneNames_all = pd.read_csv(root + "Data/row-genes.txt", header = None)
geneNames_all = geneNames_all[0].tolist()
if take_path == False:
pos_gold_standards = pd.read_csv( root + "Data/" + disorder_name + "_Pos_Gold_Standards.csv",na_filter=False,verbose=verbose)
else:
pos_gold_standards = pd.read_csv( root + disorder_name,na_filter=False,verbose=verbose)
pos_gold_std = pos_gold_standards.values
print(pos_gold_std[0:10,:])
pos_gold_std_genes = [str(item) for item in pos_gold_std[:,0]]
pos_gold_std_evidence = [str(item) for item in pos_gold_std[:,2]]
pgold_tada_intersect, pgold_indices, pgold_delete_indices, g_bs_tada_intersect_indices = intersect_lists(pos_gold_std_genes , [str(item) for item in geneNames_all], geneDict)
gold_evidence = [pos_gold_std_evidence[item] for item in pgold_indices]
if take_path == False:
neg_gold_standards = pd.read_csv(root + "Data/" + disorder_name + "_Neg_Gold_Standards.csv")
else:
neg_gold_standards = pd.read_csv(root + neg_file.strip())
neg_gold_std = neg_gold_standards.values
neg_gold_std_genes = [str(item) for item in neg_gold_std[:,0]]
pgold_tada_intersect, pgold_indices, pgold_delete_indices, g_bs_tada_intersect_indices = intersect_lists(pos_gold_std_genes , [str(item) for item in geneNames_all], geneDict)
ngold_tada_intersect, ngold_indices, ngold_delete_indices, n_bs_tada_intersect_indices = intersect_lists(neg_gold_std_genes , [str(item) for item in geneNames_all], geneDict)
pos_neg_intersect, pos_indices, not_found_indices , neg_indices = intersect_lists(pgold_tada_intersect , ngold_tada_intersect, geneDict)
pos_neg_intersect, pos_indices, not_found_indices , neg_indices = intersect_lists(pgold_tada_intersect , ngold_tada_intersect, geneDict)
y = torch.zeros(len(geneNames_all), dtype = torch.long)
y[n_bs_tada_intersect_indices] = 0
y[g_bs_tada_intersect_indices] = 1
if verbose:
print(len(pgold_tada_intersect), " positive gold standard genes are found for ", disorder_name)
print(len([pos_gold_std_genes[item] for item in pgold_delete_indices]), " positive gold standard genes cannot be found for ", disorder_name)
print(len(ngold_tada_intersect), " negative gold standard genes are found for ", disorder_name)
print(len([neg_gold_std_genes[item] for item in ngold_delete_indices]), " negative gold standard genes cannot be found for ", disorder_name)
print("Positive and negative gold standard gene intersection list:", pos_neg_intersect)
print("Positive and negative gold standard gene intersection list length:", len(pos_neg_intersect))
gene_indices, gene_permutations, gene_counts = create_validation_set(g_bs_tada_intersect_indices, n_bs_tada_intersect_indices, gold_evidence, k, state)
return gene_indices, gene_permutations, gene_counts, y
def load_all_features(feature_list):
features = []
for feature_set in feature_list:
feature = None
file_tokens = feature_set.split(".")
print(file_tokens)
if len(file_tokens) == 1 and file_tokens[0].strip().split("_")[0].strip() == "identity":
i = torch.zeros((2,int(file_tokens[0].strip().split("_")[1].strip())),dtype= torch.long)
v = torch.zeros(int(file_tokens[0].strip().split("_")[1].strip()),dtype= torch.float)
for index in range(int(file_tokens[0].strip().split("_")[1].strip())):
i[0,index] = index
i[1,index] = index
v[index] = 1
#feature = identity(int(file_tokens[0].strip().split("_")[1].strip()), dtype='long', format='dia')
feature = torch.sparse.LongTensor(i,v,torch.Size([int(file_tokens[0].strip().split("_")[1].strip()),int(file_tokens[0].strip().split("_")[1].strip())]))
#feature = torch.eye(int(file_tokens[0].strip().split("_")[1].strip()), dtype = torch.long)
elif file_tokens[1].strip() == "csv" or file_tokens[1].strip() == "txt":
feature = pd.csv_read(feature_set).values
feature = torch.from_numpy(feature).float()
feature = (feature - torch.mean(feature,0)) / (torch.std(feature,0))
elif file_tokens[1].strip() == "npy":
feature =
|
np.load(feature_set)
|
numpy.load
|
import json
import logging
import os
import xml.etree.ElementTree as ET
import gym
import gym_microrts
import jpype
import numpy as np
from jpype.imports import registerDomain
from jpype.types import JArray
from PIL import Image
from .grid_mode_vec_env import MicroRTSGridModeVecEnv
class MicroRTSBotVecEnv(MicroRTSGridModeVecEnv):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 150
}
def __init__(self,
ai1s=[],
ai2s=[],
partial_obs=False,
max_steps=2000,
map_path="maps/10x10/basesTwoWorkers10x10.xml",
reward_weight=np.array([0.0, 1.0, 0.0, 0.0, 0.0, 5.0])):
self.logger = logging.getLogger("")
self.ai1s = ai1s
self.ai2s = ai2s
assert len(ai1s) == len(ai2s), "for each environment, a microrts ai should be provided"
self.num_envs = len(ai1s)
self.partial_obs = partial_obs
self.max_steps = max_steps
self.map_path = map_path
self.reward_weight = reward_weight
# read map
self.microrts_path = os.path.join(gym_microrts.__path__[0], 'microrts')
root = ET.parse(os.path.join(self.microrts_path, self.map_path)).getroot()
self.height, self.width = int(root.get("height")), int(root.get("width"))
# launch the JVM
if not jpype._jpype.isStarted():
registerDomain("ts", alias="tests")
registerDomain("ai")
jars = [
"microrts.jar", "Coac.jar", "Droplet.jar", "GRojoA3N.jar",
"Izanagi.jar", "MixedBot.jar", "RojoBot.jar", "TiamatBot.jar", "UMSBot.jar" # "MindSeal.jar"
]
for jar in jars:
jpype.addClassPath(os.path.join(self.microrts_path, jar))
jpype.startJVM(convertStrings=False)
# start microrts client
from rts.units import UnitTypeTable
self.real_utt = UnitTypeTable()
from ai.rewardfunction import (AttackRewardFunction,
CloserToEnemyBaseRewardFunction,
ProduceBuildingRewardFunction,
ProduceCombatUnitRewardFunction,
ProduceWorkerRewardFunction,
ResourceGatherRewardFunction,
RewardFunctionInterface,
WinLossRewardFunction)
self.rfs = JArray(RewardFunctionInterface)([
WinLossRewardFunction(),
ResourceGatherRewardFunction(),
ProduceWorkerRewardFunction(),
ProduceBuildingRewardFunction(),
AttackRewardFunction(),
ProduceCombatUnitRewardFunction(),
# CloserToEnemyBaseRewardFunction(),
])
self.start_client()
self.num_planes = [5, 5, 3, len(self.utt['unitTypes'])+1, 6]
if partial_obs:
self.num_planes = [5, 5, 3, len(self.utt['unitTypes'])+1, 6, 2]
self.observation_space = gym.spaces.Discrete(2)
self.action_space = gym.spaces.Discrete(2)
def start_client(self) -> None:
"""Start Client to communicate with microRTS environment.
Client is accessable as `vec_client` property on the instance.
"""
from ai.core import AI
from ts import JNIGridnetVecClient as Client
self.vec_client = Client(
self.max_steps,
self.rfs,
os.path.expanduser(self.microrts_path),
self.map_path,
JArray(AI)([ai1(self.real_utt) for ai1 in self.ai1s]),
JArray(AI)([ai2(self.real_utt) for ai2 in self.ai2s]),
self.real_utt,
self.partial_obs,
)
self.render_client = self.vec_client.botClients[0]
# get the unit type table
self.utt = json.loads(str(self.render_client.sendUTT()))
def seed(self, seed: int) -> None:
"""Sets seed for action space"""
self.action_space.seed(seed)
self.logger.warning("")
def reset(self):
responses = self.vec_client.reset([0]*self.num_envs)
raw_obs = np.ones((self.num_envs,2)),
reward = np.array(responses.reward)
done = np.array(responses.done)
info = {}
return raw_obs
def step_async(self, actions):
self.actions = actions
def step_wait(self):
e = [0 for _ in range(self.num_envs)]
self.logger.info(self.actions)
self.logger.info(e)
responses = self.vec_client.gameStep(self.actions, e)
raw_obs, reward, done = np.ones((self.num_envs,2)), np.array(responses.reward),
|
np.array(responses.done)
|
numpy.array
|
import numpy as np
from paddle import fluid
from perception.scene.x2paddle_yolov4 import x2paddle_net
DEFAULT_MULTI_CLASS_NMS_CONF = {
'score_threshold': 0.5,
'nms_top_k': 1000,
'keep_top_k': 200,
'nms_threshold': 0.5,
'background_label': -1
}
class YOLOv4(object):
"""
YOLOv4 network, see https://github.com/AlexeyAB/darknet
This class is for inference only.
"""
def __init__(self,
img_shape=[3, 416, 416],
num_classes=80,
anchors=[[12, 16], [19, 36], [40, 28], [36, 75],
[76, 55], [72, 146], [142, 110],
[192, 243], [459, 401]],
anchor_masks=[[6, 7, 8], [3, 4, 5], [0, 1, 2]],
nms_cfg=DEFAULT_MULTI_CLASS_NMS_CONF,
get_roi_feat=False,
roi_feat_after_gap=False,
roi_feat_resolution=5,
fm_spacial_scale=1/32.):
self.img_shape = img_shape
self.num_classes = num_classes
self.anchor_masks = anchor_masks
self._parse_anchors(anchors)
self.nms_cfg = nms_cfg
self.get_roi_feat = get_roi_feat
self.roi_feat_after_gap = roi_feat_after_gap
self.roi_feat_resolution = roi_feat_resolution
self.fm_spacial_scale = fm_spacial_scale
def _parse_anchors(self, anchors):
self.anchors = []
self.mask_anchors = []
assert len(anchors) > 0, "ANCHORS not set."
assert len(self.anchor_masks) > 0, "ANCHOR_MASKS not set."
for anchor in anchors:
assert len(anchor) == 2, "anchor {} len should be 2".format(anchor)
self.anchors.extend(anchor)
anchor_num = len(anchors)
for masks in self.anchor_masks:
self.mask_anchors.append([])
for mask in masks:
assert mask < anchor_num, "anchor mask index overflow"
self.mask_anchors[-1].extend(anchors[mask])
def _correct_boxes(self, box, input_size, im_size):
input_size = fluid.layers.cast(input_size, dtype='float32')
im_size = fluid.layers.cast(im_size, dtype='float32')
new_size = fluid.layers.elementwise_mul(
im_size,
fluid.layers.reduce_min(
fluid.layers.elementwise_div(input_size, im_size),
dim=1, keep_dim=True))
offset = 0.5 * fluid.layers.elementwise_sub(input_size, new_size)
offset = fluid.layers.elementwise_div(offset, input_size)
scale = fluid.layers.elementwise_div(input_size, new_size)
in_h = fluid.layers.unsqueeze(
fluid.layers.slice(input_size, axes=[1], starts=[0], ends=[1]),
axes=[1])
in_w = fluid.layers.unsqueeze(
fluid.layers.slice(input_size, axes=[1], starts=[1], ends=[2]),
axes=[1])
xmin = fluid.layers.slice(box, axes=[2], starts=[0], ends=[1])
ymin = fluid.layers.slice(box, axes=[2], starts=[1], ends=[2])
xmax = fluid.layers.slice(box, axes=[2], starts=[2], ends=[3])
ymax = fluid.layers.slice(box, axes=[2], starts=[3], ends=[4])
cx = fluid.layers.elementwise_div(
0.5 * fluid.layers.elementwise_add(xmin, xmax), in_w)
cy = fluid.layers.elementwise_div(
0.5 * fluid.layers.elementwise_add(ymin, ymax), in_h)
h = fluid.layers.elementwise_div(
fluid.layers.elementwise_sub(ymax, ymin), in_h)
w = fluid.layers.elementwise_div(
fluid.layers.elementwise_sub(xmax, xmin), in_w)
y_offset = fluid.layers.unsqueeze(
fluid.layers.slice(offset, axes=[1], starts=[0], ends=[1]),
axes=[1])
x_offset = fluid.layers.unsqueeze(
fluid.layers.slice(offset, axes=[1], starts=[1], ends=[2]),
axes=[1])
h_scale = fluid.layers.unsqueeze(
fluid.layers.slice(scale, axes=[1], starts=[0], ends=[1]),
axes=[1])
w_scale = fluid.layers.unsqueeze(
fluid.layers.slice(scale, axes=[1], starts=[1], ends=[2]),
axes=[1])
cx = fluid.layers.elementwise_mul(
fluid.layers.elementwise_sub(cx, x_offset), w_scale)
cy = fluid.layers.elementwise_mul(
fluid.layers.elementwise_sub(cy, y_offset), h_scale)
h = fluid.layers.elementwise_mul(h, h_scale)
w = fluid.layers.elementwise_mul(w, w_scale)
im_h = fluid.layers.unsqueeze(
fluid.layers.slice(im_size, axes=[1], starts=[0], ends=[1]),
axes=[1])
im_w = fluid.layers.unsqueeze(
fluid.layers.slice(im_size, axes=[1], starts=[1], ends=[2]),
axes=[1])
new_xmin = fluid.layers.elementwise_mul(
im_w, fluid.layers.elementwise_sub(cx, 0.5 * w))
new_xmax = fluid.layers.elementwise_mul(
im_w, fluid.layers.elementwise_add(cx, 0.5 * w))
new_ymin = fluid.layers.elementwise_mul(
im_h, fluid.layers.elementwise_sub(cy, 0.5 * h))
new_ymax = fluid.layers.elementwise_mul(
im_h, fluid.layers.elementwise_add(cy, 0.5 * h))
new_box = fluid.layers.concat(
[new_xmin, new_ymin, new_xmax, new_ymax], axis=-1)
return new_box
def _correct_rois(self, rois, input_size, im_size):
input_size = fluid.layers.cast(input_size, dtype='float32')
im_size = fluid.layers.cast(im_size, dtype='float32')
new_size = fluid.layers.elementwise_mul(
im_size,
fluid.layers.reduce_min(
fluid.layers.elementwise_div(input_size, im_size),
dim=1, keep_dim=True))
offset = 0.5 * fluid.layers.elementwise_sub(input_size, new_size)
y_offset = fluid.layers.unsqueeze(
fluid.layers.slice(offset, axes=[1], starts=[0], ends=[1]),
axes=[1])
x_offset = fluid.layers.unsqueeze(
fluid.layers.slice(offset, axes=[1], starts=[1], ends=[2]),
axes=[1])
scale = fluid.layers.elementwise_div(new_size, im_size)
y_scale = fluid.layers.unsqueeze(
fluid.layers.slice(scale, axes=[1], starts=[0], ends=[1]),
axes=[1])
x_scale = fluid.layers.unsqueeze(
fluid.layers.slice(scale, axes=[1], starts=[1], ends=[2]),
axes=[1])
# NOTE: due to training batch data may contain r2 and r1 data with different scales and offsets,
# for convenice, here use py_func layer
corrected_rois = create_tmp_var(
'corrected_rois', 'float32', rois.shape)
fluid.layers.py_func(
func=correct_rois,
x=[rois, x_scale, y_scale, x_offset, y_offset],
out=corrected_rois)
return corrected_rois
def _no_instance_found(self, pred):
def _np_func(pred_):
pred_ = np.array(pred_)
if np.all(pred_ == -1):
return np.array([[True]])
else:
return np.array([[False]])
cond = create_tmp_var('no_instance', 'bool', [1, 1])
fluid.layers.py_func(func=_np_func, x=[pred], out=cond)
return cond
def infer(self, main_program=None):
if main_program is None:
test_program = fluid.default_main_program().clone(for_test=True)
else:
test_program = main_program.clone(for_test=True)
with fluid.program_guard(test_program):
_, fetch_list = self.build()
return test_program, fetch_list
def build(self):
inputs, outputs, feature_map = x2paddle_net(self.img_shape)
feature_map = fluid.layers.transpose(feature_map, [0, 3, 1, 2])
im_size = fluid.layers.data(
name='im_size', shape=[2], dtype='int32') # (h, w) of each im
# NOTE: instead of forced resize, we expand image to
# keep the aspect ratio, input_size is needed for calibariation
input_size = fluid.layers.data(
name='in_size', shape=[2], dtype='int32')
inputs.extend([im_size, input_size])
boxes, scores = [], []
downsample = 32
for i, output in enumerate(reversed(outputs)):
box, score = fluid.layers.yolo_box(
x=output,
img_size=input_size,
anchors=self.mask_anchors[i],
class_num=self.num_classes,
conf_thresh=self.nms_cfg['score_threshold'],
downsample_ratio=downsample,
clip_bbox=False,
name='yolo_box' + str(i))
box = self._correct_boxes(box, input_size, im_size)
boxes.append(box)
scores.append(fluid.layers.transpose(score, perm=[0, 2, 1]))
downsample //= 2
yolo_boxes = fluid.layers.concat(boxes, axis=1)
yolo_scores = fluid.layers.concat(scores, axis=2)
# FIXME: using nms2 unable to train the attention controller model!
# pred = fluid.contrib.multiclass_nms2(
# bboxes=yolo_boxes, scores=yolo_scores, **self.nms_cfg)
pred = fluid.layers.multiclass_nms(
bboxes=yolo_boxes, scores=yolo_scores, **self.nms_cfg)
if not self.get_roi_feat:
return inputs, [pred, feature_map]
# Process rois feats
def _true_func():
return pred
def _false_func():
rois = fluid.layers.slice(
pred, axes=[1], starts=[2], ends=[6])
rois = self._correct_rois(rois, input_size, im_size)
# FIXME: @paddle-dev, `roi_align' layer does not keep the lod
# information!!! i.e. rois_feats.lod() == []
rois_feats = fluid.layers.roi_align(
input=feature_map,
rois=rois,
pooled_height=self.roi_feat_resolution,
pooled_width=self.roi_feat_resolution,
spatial_scale=self.fm_spacial_scale)
if self.roi_feat_after_gap:
# Global average pooling
rois_feats = fluid.layers.reduce_sum(
rois_feats, dim=[2, 3]) / (self.roi_feat_resolution ** 2)
return rois_feats
rois_feats = fluid.layers.cond(
self._no_instance_found(pred), _true_func, _false_func)
return inputs, [pred, rois_feats, feature_map]
def correct_rois(rois, x_scale, y_scale, x_offset, y_offset):
lod = rois.lod()[0]
rois = np.array(rois)
rois_lst = []
for i in range(len(lod) - 1):
rois_lst.append(rois[lod[i]:lod[i+1]])
x_scale = np.reshape(
|
np.array(x_scale)
|
numpy.array
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import numpy.random
import numpy as np
import os, sys
import onnx
from onnx import helper
import math
from onnx import numpy_helper
import seedot.compiler.ONNX.common as common
#TODO: Refactor this later. Also part of paramBuilder file.
class Param:
def __init__(self, name, shape, range):
self.name = name
self.shape = shape
self.range = range
self.sparse = False
#TODO: Shift to common.py
def get_range(np_array):
return (np.min(np_array),
|
np.max(np_array)
|
numpy.max
|
#!/usr/bin/env python
'''
Creates initial configuration for a system containing branched linear chains.
'''
import sys
import math
import numpy as np
from configuration import Configuration
from config_io import *
#-------------------------------------------------------------------------------
config = Configuration()
#Add simulation box and boundary condition. The box size can be updated later if
#necessary.
config.add_simbox(20.0, 20.0, 20.0, 0)
#Molecule details
num_mols = 1
config.add_molecule_type('BTLBRS', num_mols)
#Number of backbone atoms
na_bbone = int(sys.argv[1])
#Number of atoms on each side chain
na_sc = int(sys.argv[2])
#Number of atoms between consecutive branch points
na_sp = 0
#Number of side chains growing from a branch point
f = 1
#Number of branch points
n_bp = 1 + (na_bbone-1)//(na_sp+1)
#Number of side chains
n_sc = n_bp*f
#Total number of side chain atoms
nat_sc = n_sc*na_sc
#Total number of atoms in this molecule
natm = na_bbone + nat_sc
#Total number of bonds in this molecule
nbnd = (na_bbone - 1) + nat_sc
#Atom types
#All atoms are point particles with unit mass
atm_t_bb = config.add_atom_type('BB', na_bbone, 0, 1.0)
atm_t_sc = config.add_atom_type('SC', nat_sc, 0, 1.0)
#Vdw interaction
eps = 1.0; sigma = 2.0; rcut = 2.0**(1.0/6)*sigma
config.add_ia_vdw('BB', 'BB', 'lj', np.array([eps, sigma, rcut]))
config.add_ia_vdw('BB', 'SC', 'lj', np.array([eps, sigma, rcut]))
config.add_ia_vdw('SC', 'SC', 'lj', np.array([eps, sigma, rcut]))
#config.add_ia_vdw('BB', 'SC', 'tab', np.array([50.0, 0.0, 1, 0]))
#config.add_ia_vdw('SC', 'SC', 'tab', np.array([0.0, 0.0, 0]))
#Bond types
bnd_t_bb = config.add_bond_type('kg', np.array([7.5, 3.0, eps, sigma]))
bnd_t_bs = config.add_bond_type('kg',
|
np.array([7.5, 3.0, eps, sigma])
|
numpy.array
|
"""
This module contains classes and methods for building tabulated chemistry libraries
"""
# Spitfire - a Python-C++ library for building tabulated chemistry models and solving differential equations
# Copyright 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
#
# You should have received a copy of the 3-clause BSD License
# along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
#
# Questions? Contact <NAME> (<EMAIL>)
import numpy as np
from scipy.interpolate import interp1d
from scipy.interpolate import InterpolatedUnivariateSpline
from multiprocessing import Pool, Manager
from time import perf_counter
from spitfire.chemistry.flamelet import Flamelet, FlameletSpec
from spitfire.chemistry.library import Dimension, Library
import spitfire.chemistry.analysis as sca
import copy
from functools import partial
import itertools
"""these names are specific to tabulated chemistry libraries for combustion"""
_mixture_fraction_name = 'mixture_fraction'
_dissipation_rate_name = 'dissipation_rate'
_enthalpy_defect_name = 'enthalpy_defect'
_enthalpy_offset_name = 'enthalpy_offset'
_scaled_scalar_variance_name = 'scaled_scalar_variance_mean'
_stoich_suffix = '_stoich'
_mean_suffix = '_mean'
def _write_library_header(lib_type, mech, fuel, oxy, verbose):
if verbose:
print('-' * 82)
print(f'building {lib_type} library')
print('-' * 82)
print(f'- mechanism: {mech.mech_xml_path}')
print(f'- {mech.n_species} species, {mech.n_reactions} reactions')
print(f'- stoichiometric mixture fraction: {mech.stoich_mixture_fraction(fuel, oxy):.3f}')
print('-' * 82)
return perf_counter()
def _write_library_footer(cput0, verbose):
if verbose:
print('----------------------------------------------------------------------------------')
print(f'library built in {perf_counter() - cput0:6.2f} s')
print('----------------------------------------------------------------------------------', flush=True)
def build_unreacted_library(flamelet_specs, verbose=True):
"""Build a flamelet library for a nonreacting flow, with linear enthalpy and mass fraction profiles.
Parameters
----------
flamelet_specs : FlameletSpec or dictionary of arguments for a FlameletSpec
flamelet specifications
Returns
-------
library : spitfire.chemistry.library.Library instance
a chemistry library with only the "mixture_fraction" dimension
"""
fs = FlameletSpec(**flamelet_specs) if isinstance(flamelet_specs, dict) else copy.copy(flamelet_specs)
fs.initial_condition = 'unreacted'
flamelet = Flamelet(fs)
return flamelet.make_library_from_interior_state(flamelet.initial_interior_state)
def build_adiabatic_eq_library(flamelet_specs, verbose=True):
"""Build a flamelet library with the equilibrium (infinitely fast) chemistry assumption,
equivalently with Gibbs free energy minimization.
Parameters
----------
flamelet_specs : FlameletSpec or dictionary of arguments for a FlameletSpec
flamelet specifications
Returns
-------
library : spitfire.chemistry.library.Library instance
a chemistry library with only the "mixture_fraction" dimension
"""
fs = FlameletSpec(**flamelet_specs) if isinstance(flamelet_specs, dict) else copy.copy(flamelet_specs)
fs.initial_condition = 'equilibrium'
flamelet = Flamelet(fs)
return flamelet.make_library_from_interior_state(flamelet.initial_interior_state)
def build_adiabatic_bs_library(flamelet_specs, verbose=True):
"""Build a flamelet library with the Burke-Schumann (idealized, one-step combustion) assumptions
Parameters
----------
flamelet_specs : FlameletSpec or dictionary of arguments for a FlameletSpec
flamelet specifications
Returns
-------
library : spitfire.chemistry.library.Library instance
a chemistry library with only the "mixture_fraction" dimension
"""
fs = FlameletSpec(**flamelet_specs) if isinstance(flamelet_specs, dict) else copy.copy(flamelet_specs)
fs.initial_condition = 'Burke-Schumann'
flamelet = Flamelet(fs)
return flamelet.make_library_from_interior_state(flamelet.initial_interior_state)
def _build_nonadiabatic_defect_unstrained_library(initialization, flamelet_specs, n_defect_st=16, verbose=True):
flamelet_specs = FlameletSpec(**flamelet_specs) if isinstance(flamelet_specs, dict) else copy.copy(flamelet_specs)
m = flamelet_specs.mech_spec
fuel = flamelet_specs.fuel_stream
oxy = flamelet_specs.oxy_stream
z_st = m.stoich_mixture_fraction(fuel, oxy)
flamelet_specs.initial_condition = initialization
flamelet = Flamelet(flamelet_specs)
# compute the extreme enthalpy defect
state_ad = flamelet.initial_interior_state
adiabatic_lib = flamelet.make_library_from_interior_state(state_ad)
enthalpy_ad = sca.compute_specific_enthalpy(m, adiabatic_lib)['enthalpy']
z_interior = flamelet.mixfrac_grid[1:-1]
state_cooled_eq = state_ad.copy()
state_cooled_eq[::m.n_species] = z_interior * fuel.T + (1 - z_interior) * oxy.T
cooled_lib = flamelet.make_library_from_interior_state(state_cooled_eq)
enthalpy_cooled_eq = sca.compute_specific_enthalpy(m, cooled_lib)['enthalpy']
z = flamelet.mixfrac_grid
h_ad_st = interp1d(z, enthalpy_ad)(z_st)
h_ce_st = interp1d(z, enthalpy_cooled_eq)(z_st)
defect_ext = h_ad_st - h_ce_st
# build the library with equilibrium solutions at with enthalpies offset by the triangular defect form
defect_range = np.linspace(-defect_ext, 0, n_defect_st)[::-1]
z_dim = Dimension(_mixture_fraction_name, flamelet.mixfrac_grid)
g_dim = Dimension(_enthalpy_defect_name + _stoich_suffix, defect_range)
output_library = Library(z_dim, g_dim)
output_library.extra_attributes['mech_spec'] = m
for p in adiabatic_lib.props:
output_library[p] = output_library.get_empty_dataset()
output_library['enthalpy_defect'] = output_library.get_empty_dataset()
output_library['enthalpy_cons'] = output_library.get_empty_dataset()
output_library['enthalpy'] = output_library.get_empty_dataset()
output_library[_mixture_fraction_name] = output_library.get_empty_dataset()
fz = z.copy()
fz[z <= z_st] = z[z <= z_st] / z_st
fz[z > z_st] = (1 - z[z > z_st]) / (1 - z_st)
ns = m.n_species
g_library = flamelet.make_library_from_interior_state(flamelet.initial_interior_state)
for ig in range(n_defect_st):
defected_enthalpy = enthalpy_ad + defect_range[ig] * fz
for iz in range(1, z.size - 1):
y = np.zeros(ns)
for ispec in range(ns):
y[ispec] = g_library['mass fraction ' + m.species_names[ispec]][iz]
m.gas.HPY = defected_enthalpy[iz], flamelet.pressure, y
if initialization == 'equilibrium':
m.gas.equilibrate('HP')
g_library['temperature'][iz] = m.gas.T
for ispec in range(ns):
g_library['mass fraction ' + m.species_names[ispec]][iz] = m.gas.Y[ispec]
for p in g_library.props:
if p != 'defected_enthapy':
output_library[p][:, ig] = g_library[p].ravel()
output_library['enthalpy_defect'][:, ig] = defected_enthalpy - enthalpy_ad
output_library['enthalpy_cons'][:, ig] = enthalpy_ad
output_library['enthalpy'][:, ig] = defected_enthalpy
output_library[_mixture_fraction_name][:, ig] = flamelet.mixfrac_grid.ravel()
return output_library
def build_nonadiabatic_defect_eq_library(flamelet_specs, n_defect_st=16, verbose=True):
"""Build a flamelet library with the equilibrium (infinitely fast) chemistry assumption
and heat loss effects captured through a presumed triangular form of the enthalpy defect.
Parameters
----------
flamelet_specs : FlameletSpec or dictionary of arguments for a FlameletSpec
flamelet specifications
n_defect_st : Int
the number of stoichiometric enthalpy defect values to include in the table (default: 16)
Returns
-------
library : spitfire.chemistry.library.Library instance
a chemistry library with the "mixture_fraction" and "enthalpy_defect_stoich" dimensions
"""
return _build_nonadiabatic_defect_unstrained_library('equilibrium', flamelet_specs, n_defect_st, verbose)
def build_nonadiabatic_defect_bs_library(flamelet_specs, n_defect_st=16, verbose=True):
"""Build a flamelet library with the Burke-Schumann chemistry assumption
and heat loss effects captured through a presumed triangular form of the enthalpy defect.
Parameters
----------
flamelet_specs : FlameletSpec or dictionary of arguments for a FlameletSpec
flamelet specifications
n_defect_st : Int
the number of stoichiometric enthalpy defect values to include in the table (default: 16)
Returns
-------
library : spitfire.chemistry.library.Library instance
a chemistry library with the "mixture_fraction" and "enthalpy_defect_stoich" dimensions
"""
return _build_nonadiabatic_defect_unstrained_library('Burke-Schumann', flamelet_specs, n_defect_st, verbose)
def build_adiabatic_slfm_library(flamelet_specs,
diss_rate_values=np.logspace(-3, 2, 16),
diss_rate_ref='stoichiometric',
verbose=True,
solver_verbose=False,
_return_intermediates=False,
include_extinguished=False,
diss_rate_log_scaled=True):
"""Build a flamelet library with an adiabatic strained laminar flamelet model
Parameters
----------
flamelet_specs : dictionary or FlameletSpec instance
data for the mechanism, streams, mixture fraction grid, etc.
diss_rate_values : np.array
reference dissipation rate values in the table (note that if the flamelet extinguishes at any point,
the extinguished flamelet and larger dissipation rates are not included in the library unless the
include_extinguished argument is set to True)
diss_rate_ref : str
the reference point of the specified dissipation rate values, either 'stoichiometric' or 'maximum'
verbose : bool
whether or not to show progress of the library construction
include_extinguished : bool
whether or not to include extinguished states in the output table, if encountered in the provided range of
dissipation rates, off by default
diss_rate_log_scaled : bool
whether or not the range of dissipation rates is logarithmically scaled
Returns
-------
library : spitfire.chemistry.library.Library instance
the structured chemistry library
"""
if isinstance(flamelet_specs, dict):
flamelet_specs = FlameletSpec(**flamelet_specs)
m = flamelet_specs.mech_spec
fuel = flamelet_specs.fuel_stream
oxy = flamelet_specs.oxy_stream
flamelet_specs.initial_condition = 'equilibrium'
if diss_rate_ref == 'maximum':
flamelet_specs.max_dissipation_rate = 0.
else:
flamelet_specs.stoich_dissipation_rate = 0.
cput00 = _write_library_header('adiabatic SLFM', m, fuel, oxy, verbose)
f = Flamelet(flamelet_specs)
table_dict = dict()
nchi = diss_rate_values.size
suffix = _stoich_suffix if diss_rate_ref == 'stoichiometric' else '_max'
x_values = list()
for idx, chival in enumerate(diss_rate_values):
if diss_rate_ref == 'maximum':
flamelet_specs.max_dissipation_rate = chival
else:
flamelet_specs.stoich_dissipation_rate = chival
flamelet = Flamelet(flamelet_specs)
if verbose:
print(f'{idx + 1:4}/{nchi:4} (chi{suffix} = {chival:8.1e} 1/s) ', end='', flush=True)
cput0 = perf_counter()
x_library = flamelet.compute_steady_state(tolerance=1.e-6, verbose=solver_verbose, use_psitc=True)
dcput = perf_counter() - cput0
if np.max(flamelet.current_temperature - flamelet.linear_temperature) < 10. and not include_extinguished:
if verbose:
print(' extinction detected, stopping. The extinguished state will not be included in the table.')
break
else:
if verbose:
print(f' converged in {dcput:6.2f} s, T_max = {np.max(flamelet.current_temperature):6.1f}')
z_st = flamelet.mechanism.stoich_mixture_fraction(flamelet.fuel_stream, flamelet.oxy_stream)
chi_st = flamelet._compute_dissipation_rate(np.array([z_st]),
flamelet._max_dissipation_rate,
flamelet._dissipation_rate_form)[0]
x_values.append(chi_st)
table_dict[chi_st] = dict()
for k in x_library.props:
table_dict[chi_st][k] = x_library[k].ravel()
flamelet_specs.initial_condition = flamelet.current_interior_state
if _return_intermediates:
table_dict[chi_st]['adiabatic_state'] = np.copy(flamelet.current_interior_state)
if _return_intermediates:
_write_library_footer(cput00, verbose)
return table_dict, f.mixfrac_grid, np.array(x_values)
else:
z_dim = Dimension(_mixture_fraction_name, f.mixfrac_grid)
x_dim = Dimension(_dissipation_rate_name + _stoich_suffix, np.array(x_values), diss_rate_log_scaled)
output_library = Library(z_dim, x_dim)
output_library.extra_attributes['mech_spec'] = m
for quantity in table_dict[chi_st]:
output_library[quantity] = output_library.get_empty_dataset()
for ix, x in enumerate(x_values):
output_library[quantity][:, ix] = table_dict[x][quantity]
_write_library_footer(cput00, verbose)
return output_library
def _expand_enthalpy_defect_dimension_transient(chi_st, managed_dict, flamelet_specs, table_dict,
h_stoich_spacing, verbose, input_integration_args, solver_verbose):
flamelet_specs.initial_condition = table_dict[chi_st]['adiabatic_state']
flamelet_specs.stoich_dissipation_rate = chi_st
flamelet_specs.heat_transfer = 'nonadiabatic'
flamelet_specs.scale_heat_loss_by_temp_range = True
flamelet_specs.scale_convection_by_dissipation = True
flamelet_specs.use_linear_ref_temp_profile = True
flamelet_specs.convection_coefficient = 1.e7
flamelet_specs.radiative_emissivity = 0.
integration_args = dict(
{'first_time_step': 1.e-9,
'max_time_step': 1.e-1,
'write_log': solver_verbose,
'log_rate': 100})
if input_integration_args is not None:
integration_args.update(input_integration_args)
if 'transient_tolerance' not in integration_args:
integration_args['transient_tolerance'] = 1.e-8
cput0000 = perf_counter()
running = True
while running and integration_args['transient_tolerance'] > 1.e-15:
try:
fnonad = Flamelet(flamelet_specs)
transient_lib = fnonad.integrate_for_heat_loss(**integration_args)
running = False
except Exception as e:
if solver_verbose:
print(
f'Transient heat loss calculation failed with tolerance of {integration_args["transient_tolerance"]:.1e}, retrying with 100x lower...')
integration_args.update(dict({'transient_tolerance': integration_args['transient_tolerance'] * 1.e-2}))
indices = [0]
z = fnonad.mixfrac_grid
z_st = fnonad.mechanism.stoich_mixture_fraction(fnonad.fuel_stream, fnonad.oxy_stream)
h_tz = sca.compute_specific_enthalpy(flamelet_specs.mech_spec, transient_lib)['enthalpy']
h_ad = h_tz[0, :]
nt, nz = h_tz.shape
last_hst = interp1d(z, h_ad)(z_st)
for i in range(nt):
this_hst = interp1d(z, h_tz[i, :])(z_st)
if last_hst - this_hst > h_stoich_spacing:
indices.append(i)
last_hst = this_hst
if nt - 1 not in indices:
indices.append(-1)
for i in indices:
defect = h_tz[i, :] - h_ad
gst = float(interp1d(z, defect)(z_st))
this_data = dict()
this_data['enthalpy_defect'] = np.copy(defect)
this_data['enthalpy_cons'] = np.copy(h_ad)
this_data['enthalpy'] = np.copy(h_tz[i, :])
this_data[_mixture_fraction_name] = fnonad.mixfrac_grid
for q in transient_lib.props:
this_data[q] = transient_lib[q][i, :]
managed_dict[(chi_st, gst)] = this_data
dcput = perf_counter() - cput0000
if verbose:
print('chi_st = {:8.1e} 1/s converged in {:6.2f} s'.format(chi_st, dcput), flush=True)
def _expand_enthalpy_defect_dimension_steady(chi_st, managed_dict, flamelet_specs, table_dict,
h_stoich_spacing, verbose, input_integration_args, solver_verbose):
flamelet_specs.initial_condition = table_dict[chi_st]['adiabatic_state']
flamelet_specs.stoich_dissipation_rate = chi_st
flamelet_specs.heat_transfer = 'nonadiabatic'
flamelet_specs.scale_heat_loss_by_temp_range = False
flamelet_specs.scale_convection_by_dissipation = False
flamelet_specs.use_linear_ref_temp_profile = True
flamelet_specs.radiative_emissivity = 0.
flamelet_specs.convection_coefficient = 0.
flamelet = Flamelet(flamelet_specs)
first = True
refine_before_extinction = False
extinguished = False
extinguished_first = False
maxT = -1
state_old = np.copy(flamelet.current_interior_state)
hval = 0.
dh = 1.e-1
diff_target = 1e-1
diff_norm = 1e-1
hval_max = 1.e10
solutions = []
hvalues = []
hvalues.append(hval)
solutions.append(dict())
for p in table_dict[chi_st]:
if p != 'adiabatic_state':
solutions[-1][p] = table_dict[chi_st][p]
current_state = table_dict[chi_st]['adiabatic_state']
cput0000 = perf_counter()
while first or (not extinguished and hval < hval_max):
hval += dh
if first:
first = False
flamelet_specs.convection_coefficient = hval
flamelet_specs.initial_condition = current_state
flamelet = Flamelet(flamelet_specs)
g_library = flamelet.compute_steady_state(verbose=solver_verbose)
current_state = flamelet.current_interior_state
maxT = np.max(current_state)
diff_norm = np.max(np.abs(current_state - state_old) / (np.abs(current_state) + 1.e-4))
extinguished = maxT < (np.max([flamelet.oxy_stream.T, flamelet.fuel_stream.T]) + 10.)
if (extinguished and (not extinguished_first)) and refine_before_extinction:
extinguished_first = True
extinguished = False
hval -= dh
dh *= 0.1
diff_target *= 0.1
current_state = state_old.copy()
continue
state_old = np.copy(current_state)
dh *= np.min([np.max([np.sqrt(diff_target / diff_norm), 0.1]), 2.])
hvalues.append(hval)
solutions.append(dict())
for p in g_library.props:
solutions[-1][p] = g_library[p].ravel()
z_dim = Dimension(_mixture_fraction_name, flamelet.mixfrac_grid)
h_dim = Dimension(_enthalpy_defect_name + _stoich_suffix, np.array(hvalues))
steady_lib = Library(z_dim, h_dim)
steady_lib.extra_attributes['mech_spec'] = flamelet_specs.mech_spec
for p in table_dict[chi_st]:
if p != 'adiabatic_state':
steady_lib[p] = steady_lib.get_empty_dataset()
for ig, sol in enumerate(solutions):
for p in sol:
steady_lib[p][:, ig] = sol[p].ravel()
indices = [0]
z = flamelet.mixfrac_grid
z_st = flamelet.mechanism.stoich_mixture_fraction(flamelet.fuel_stream, flamelet.oxy_stream)
h_tz = sca.compute_specific_enthalpy(flamelet_specs.mech_spec, steady_lib)['enthalpy']
h_ad = h_tz[:, 0]
nz, nt = h_tz.shape
last_hst = interp1d(z, h_ad)(z_st)
for i in range(nt - 1):
this_hst = interp1d(z, h_tz[:, i])(z_st)
if last_hst - this_hst > h_stoich_spacing:
indices.append(i)
last_hst = this_hst
for i in indices:
defect = h_tz[:, i] - h_ad
gst = float(interp1d(z, defect)(z_st))
this_data = dict()
this_data['enthalpy_defect'] =
|
np.copy(defect)
|
numpy.copy
|
import gradio as gr
import pretty_errors
import numpy as np
from helper import save_image, check_human
from PIL import Image
import magic
import os
import requests
if not os.path.exists("uploads"):
os.makedirs("uploads")
matches = []
score_filter = 1
headers = {"Content-Type": "application/json"}
message = gr.outputs.HTML("")
image_source = "webcam"
def search_by_file(image, endpoint="http://0.0.0.0:12345/search", top_k=1):
"""search_by_file.
:param endpoint:
:param top_k:
:param image: image sent from gradio
"""
image_file = save_image(image)
filetype = magic.from_file(image_file, mime=True)
filename = os.path.abspath(image_file)
if not check_human(filename):
image = Image.open("no_human.jpg")
image_np_array = np.array(image)
return image_np_array
print(f"Searching for {filename}")
data = (
'{"parameters": {"top_k": '
+ str(top_k)
+ '}, "mode": "search", "data": [{"uri": "'
+ filename
+ '", "mime_type": "'
+ filetype
+ '"}]}'
)
response = requests.post(endpoint, headers=headers, data=data)
content = response.json()
match = content["data"]["docs"][0]["matches"][0]
match_uri = match["tags"]["uri_absolute"]
match_score = match["scores"]["cosine"]["value"]
os.remove(filename)
if match_score < score_filter:
image = Image.open(match_uri)
image_np_array =
|
np.array(image)
|
numpy.array
|
import numpy as np
import pyglet
from points import x_points, y_points
def circle_points(center, radius, resolution = 36):
angle_inc = 2 * np.pi / resolution
angles = (angle_inc * i for i in range(resolution))
points = tuple((center[0] +
|
np.cos(angle)
|
numpy.cos
|
import numpy as np
import itertools
import sys
import argparse
import os
import random
import tqdm
import time
###############################################################################
def get_distance_matrix(dist_matrix_file):
tstart = time.time()
if not os.path.exists(dist_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
dist_matrix = np.loadtxt(fname=dist_matrix_file, delimiter=",", dtype=float)
#sys.stdout.write("get-distance-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return dist_matrix
#end get_distance_matrix()
def get_color_matrix(color_matrix_file):
tstart= time.time()
if not os.path.exists(color_matrix_file):
sys.stderr.write("File '%s' do not exist\n"%(dist_matrix_file))
#end if
color_matrix = np.loadtxt(fname=color_matrix_file, delimiter=",", dtype=int)
#sys.stdout.write("get-color-matrix: [total: %.2fs]\n"%(time.time()-tstart))
#sys.stdout.flush()
return color_matrix
#end get_distance_matrix()
################################################################################
def local_search_v1_iter(A, F0, F1, S0_in, S1_in, cost_in):
cost = cost_in
S0 = S0_in
S1 = S1_in
iters = 0
for i in range(len(S0)):
u = S0[i]
S0_d = S0
for j in range(len(F0)):
v = F0[j]
if v in S0:
continue;
iters += 1
S0_d[i] = v
S_d = np.sort(np.concatenate([S0_d, S1]))
temp_cost = np.sum(A[:, S_d].min(axis=1))
if temp_cost < cost:
cost = temp_cost
S0[i] = v
#end if
#end for
#end for
for i in range(len(S1)):
u = S1[i]
S1_d = S1
for j in range(len(F1)):
v = F1[j]
if v in S1_d:
continue;
iters += 1
S1_d[i] = v
S_d = np.sort(np.concatenate([S0, S1_d]))
temp_cost = np.sum(A[:, S_d].min(axis=1))
if temp_cost < cost:
cost = temp_cost
S1[i] = v
#end if
#end for
#end for
return cost, S0, S1, iters
#end local_search_v1_iter()
def local_search_v1(A, C, R, seed):
np.random.seed(seed)
r0 = R[0]
r1 = R[1]
F0 = np.array(np.nonzero(C[0])[0])
F1 = np.array(np.nonzero(C[1])[0])
if (len(F0) < r0) or (len(F1) < r1):
cost = np.inf
solution = []
return cost, [], [], 0
#end if
# initialise a random assignment
S0 = np.random.choice(F0, r0)
S1 = np.random.choice(F1, r1)
S =
|
np.concatenate([S0, S1])
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
#from phasor.utilities.print import print
import numpy as np
import declarative
from ..utilities.future_from_2 import super
from .. import base
from . import noise
def np_roll_2D_mat_back(arr_mat, N = 2):
for idx in range(len(arr_mat.shape) - N):
arr_mat = np.rollaxis(arr_mat, -1, 0)
return arr_mat
def np_roll_2D_mat_front(arr_mat, N = 2):
for idx in range(len(arr_mat.shape) - N):
arr_mat = np.rollaxis(arr_mat, 0, len(arr_mat.shape))
return arr_mat
class HomodyneACReadoutBase(base.SystemElementBase):
noise = None
_AC_sensitivity = None
t_sub = None
@declarative.dproperty
def phase_deg(self, val = 0):
return val
@declarative.dproperty
def portNI(self, val):
return val
@declarative.dproperty
def portNQ(self, val):
return val
def rotate_deg(self, phase_deg):
#TODO: FIX THESE SEMANTICS
name = "ROTATOR{0}".format(int(np.random.uniform(0, 1000000)))
obj = self.insert(
self.t_sub(
portNI = self.portNI,
portNQ = self.portNQ,
phase_deg = self.phase_deg + phase_deg,
noise = self.noise,
_AC_sensitivity = self._AC_sensitivity,
t_sub = self.t_sub,
),
name
)
return obj
@declarative.mproperty
def F_Hz(self):
return self.F_sep.F_Hz
@declarative.mproperty
def AC_sensitivity(self):
return self.AC_sensitivity_IQ[0]
def homodyne_SNR(self):
return self.noise.noise / abs(self.AC_sensitivity)
@declarative.mproperty
def AC_noise_limited_sensitivity(self):
return self.AC_ASD / abs(self.AC_sensitivity)
@declarative.mproperty
def AC_ASD(self):
return self.AC_PSD**.5
@declarative.mproperty
def AC_PSD(self):
return self.AC_CSD_IQ[0, 0]
@declarative.mproperty
def AC_PSD_by_source(self):
eachCSD = dict()
for nobj, subCSD in list(self.noise.CSD_by_source.items()):
II = subCSD['ps_In', 'ps_In']
IQ = subCSD['ps_In', 'Q']
QI = subCSD['Q', 'ps_In']
QQ = subCSD['Q', 'Q']
arr = np_roll_2D_mat_back(
np.array(
[[II, IQ], [QI, QQ]]
)
)
print(arr)
print(arr.dtype)
arr = np.einsum('...ij,...jk->...ik', self.rotation_matrix_back, arr)
#this builds the transpose into the sum
arr = np.einsum('...ij,...kj->...ik', arr, self.rotation_matrix_back)
eachCSD[nobj] = arr[..., 0, 0]
return eachCSD
@declarative.mproperty
def rotation_matrix_back(self):
phase_rad = self.phase_deg * np.pi / 180
S = np.sin(phase_rad)
pe_C = np.cos(phase_rad)
ROT = np_roll_2D_mat_back(
np.array(
[[pe_C, S], [-S, pe_C]]
)
)
return ROT
@declarative.mproperty
def AC_CSD_IQ_back(self):
II = self.noise.CSD['ps_In', 'ps_In']
IQ = self.noise.CSD['ps_In', 'Q']
QI = self.noise.CSD['Q', 'ps_In']
QQ = self.noise.CSD['Q', 'Q']
arr = np_roll_2D_mat_back(
np.array(
[[II, IQ], [QI, QQ]]
)
)
#Normal dot product doesn't work here
arr = np.einsum('...ij,...jk->...ik', self.rotation_matrix_back, arr)
#this builds the transpose into the sum
arr = np.einsum('...ij,...kj->...ik', arr, self.rotation_matrix_back)
return arr
@declarative.mproperty
def AC_CSD_IQ(self):
return np_roll_2D_mat_front(
self.AC_CSD_IQ_back
)
@declarative.mproperty
def AC_CSD_IQ_re_inv_back(self):
return np.linalg.inv(np.real(self.AC_CSD_IQ_back))
@declarative.mproperty
def AC_CSD_IQ_re_inv(self):
return np_roll_2D_mat_front(self.AC_CSD_IQ_re_inv_back)
@declarative.mproperty
def AC_noise_limited_sensitivity_optimal(self):
arr = self.AC_CSD_IQ_re_inv_back
IQ = self.AC_sensitivity_IQ_back
#Normal dot product doesn't work here
arr = np.einsum('...j,...jk->...k', IQ, arr)
#this builds the transpose into the sum
arr = np.einsum('...i,...i->...', arr, IQ.conjugate())
return 1/(arr)**.5
@declarative.mproperty
def AC_CSD_IQ_inv_back(self):
return np.linalg.inv(self.AC_CSD_IQ_back)
@declarative.mproperty
def AC_CSD_IQ_inv(self):
return np_roll_2D_mat_front(self.AC_CSD_IQ_inv_back)
@declarative.mproperty
def AC_CSD_ellipse(self):
NIQ = np.real(self.AC_CSD_IQ)
rtDisc = np.sqrt((NIQ[0, 0] - NIQ[1, 1])**2 + 4*(NIQ[0, 1]*NIQ[1, 0]))
min_eig = (NIQ[0, 0] + NIQ[1, 1] - rtDisc)/2
max_eig = (NIQ[0, 0] + NIQ[1, 1] + rtDisc)/2
disc = np.asarray(NIQ[0, 0] - min_eig)
disc[disc < 0] = 0
ratio = ((NIQ[1, 0] > 0)*2 - 1) * np.sqrt(disc / (max_eig - min_eig))
ang_rad = np.pi - np.arccos(ratio)
Imin = NIQ[0, 0] - abs(NIQ[1, 0])**2 / NIQ[1, 1]
Qmin = NIQ[1, 1] - abs(NIQ[1, 0])**2 / NIQ[0, 0]
return declarative.Bunch(
ps_In = self.AC_CSD_IQ[0, 0],
Q = self.AC_CSD_IQ[1, 1],
IQ = self.AC_CSD_IQ[0, 1],
min = min_eig,
max = max_eig,
Imin = Imin,
Qmin = Qmin,
rad = ang_rad,
deg = 180 * ang_rad / np.pi,
)
@declarative.mproperty
def AC_CSD_ellipse_norm(self):
ellipse = self.AC_CSD_ellipse
#TODO, get appropriate wavelength rather than assuming 1064nm
R = np.sqrt(ellipse.min / ellipse.max)
return declarative.Bunch(
min = R,
max = 1 / R,
rad = ellipse.rad,
deg = ellipse.deg,
)
@declarative.mproperty
def AC_CSD_ellipse_normSN(self):
ellipse = self.AC_CSD_ellipse
#TODO, get appropriate wavelength rather than assuming 1064nm
qmag = self.system.adjust_PSD * self.symbols.h_Js * self.symbols.c_m_s / 1064e-9 # * iwavelen_m
return declarative.Bunch(
min = ellipse.min / qmag,
max = ellipse.max / qmag,
rad = ellipse.rad,
deg = ellipse.deg,
)
@declarative.mproperty
def AC_signal_matrix(self):
SIQ = np.einsum('i...,j...->ji...', self.AC_sensitivity_IQ, self.AC_sensitivity_IQ.conjugate())
return SIQ
@declarative.mproperty
def AC_signal_matrix_norm(self):
SIQ = self.AC_signal_matrix
SIQ = SIQ / (SIQ[0, 0] + SIQ[1, 1])
return SIQ
@declarative.mproperty
def AC_signal_ellipse(self):
SIQ = np.real(self.AC_signal_matrix)
rtDisc = np.sqrt((SIQ[0, 0] - SIQ[1, 1])**2 + 4*(SIQ[0, 1]*SIQ[1, 0]))
min_eig = (SIQ[0, 0] + SIQ[1, 1] - rtDisc)/2
max_eig = (SIQ[0, 0] + SIQ[1, 1] + rtDisc)/2
ratio = ((SIQ[1, 0] > 0)*2 - 1) *
|
np.sqrt((SIQ[0, 0] - min_eig) / (max_eig - min_eig))
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import numpy as np
from jittermodel import u, q2unitless
from jittermodel.simulation import (Simulation, SphereCapacitance, _alpha,
sum_sinh, _eta, _lambda, _thetaI,
_thetaII)
from jittermodel._sim import _thetaI_c
from jittermodel.base import Cantilever, Experiment, Transistor
from numpy.testing import assert_allclose
from nose.tools import eq_, assert_almost_equal, assert_raises
from bunch import Bunch
from jittermodel.tests import expected_failure
import unittest
u.d = u.dimensionless # For brevity
import mpmath as mp
def mp_sum_sinh(alpha):
"""Implements the infinite sum using mpmath, at very high precision.
Method 'r+s+e' was found to work accurately for all values of alpha,
unlike most other alogithms in Mathematica, python, etc."""
summand = lambda n: mp.sinh(alpha) / mp.sinh(alpha * n)
return mp.nsum(summand, [1, mp.inf], method='r+s+e')
class Test_sum_sinh(unittest.TestCase):
@staticmethod
def test_sum_sinh():
"""Test that the sum is working properly for a range of alpha values.
The mpmath module is used to verify that the sum meets error
specifications.
"""
alphas = [2 ** i for i in xrange(-12, 7)]
results = [sum_sinh(alpha) for alpha in alphas]
mp_results = [mp_sum_sinh(alpha) for alpha in alphas]
for mp_result, test_result in zip(mp_results, results):
assert_almost_equal(mp_result, test_result, 7)
class MockSimulationCapacitance(object):
"""A mock simulation object only containing the parameters necessary to
test SphereCapacitance"""
units = {"[mass]": u.pg, "[length]": u.um, "[time]": u.ms,
"[current]": u.aC / u.ms, "[temperature]": u.K, "[angle]": u.rad}
E_0 = q2unitless(u.epsilon_0, units)
q = q2unitless(u.elementary_charge, units)
k_B = q2unitless(u.boltzmann_constant, units)
Samp = Bunch(h=0.1, E_s1=3)
Cant = Bunch(R_tip=0.05)
Expt = Bunch(d=0.15)
def __init__(self):
self.sphere = SphereCapacitance(self)
# TODO: Where do these test cases come from?
class TestSphereCapacitance(unittest.TestCase):
def setUp(self):
self.sim = MockSimulationCapacitance()
def test_C(self):
assert_almost_equal(0.00623177, self.sim.sphere.C())
def test_Cd(self):
assert_almost_equal(-0.00322151, self.sim.sphere.Cd())
def test_Cd2(self):
assert_almost_equal(0.0311542, self.sim.sphere.Cd2())
class TestSimulation(unittest.TestCase):
@staticmethod
def test_init_Simulation():
cant = Cantilever(f_c=50*u.kHz, k_c=3.5*u.N/u.m, Q=20000*u.d,
R_tip=40*u.nm, L_tip=15*u.um, theta_tip=16*u.degrees,
geometry_c='perpendicular')
trans = Transistor(semiconductor='TPD', h=70 * u.nm, h_trans=1 * u.nm,
h_i=300 * u.nm, E_s1=3.5, E_s2=-0.0005, E_i1=4.65,
E_i2=0, mobility=3e-6 * u.cm ** 2 / u.V / u.s,
T=298 * u.K, V_g=10 * u.V, rho=None)
expt = Experiment(d=100 * u.nm, V_ts=5 * u.V, jitter_f_i=0.2 * u.Hz,
jitter_f_f=3 * u.Hz)
sim = Simulation(cant, trans, expt)
# Test some properties are correct
eq_(sim.Cant.f_c, 50)
eq_(sim.Expt.d, 0.1)
eq_(sim.Samp.h_i, 0.3)
assert_almost_equal(sim.Samp.diff, 0.0077038955272097955)
# These tests are all generated by implementing sympy code for the functions in
# validate-im-dielectric.ipynb. That should be a good comparison; sympy
# uses mpmath as a backend for its infinite precision arithmatic, so this
# should be robust against ordinary floating point errors.
class TestImDielectricHelperFunctions(unittest.TestCase):
@staticmethod
def test__eta():
k =
|
np.array([1, 10, 100, 1000, 10000, 100000])
|
numpy.array
|
#!/usr/bin/env python
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from rapidtide.util import valtoindex
from rapidtide.filter import noncausalfilter
def spectralfilterprops(thefilter, debug=False):
lowerstop, lowerpass, upperpass, upperstop = thefilter["filter"].getfreqlimits()
lowerstopindex = valtoindex(thefilter["frequencies"], lowerstop)
lowerpassindex = valtoindex(thefilter["frequencies"], lowerpass)
upperpassindex = valtoindex(thefilter["frequencies"], upperpass)
upperstopindex = np.min(
[
valtoindex(thefilter["frequencies"], upperstop),
len(thefilter["frequencies"]) - 1,
]
)
if debug:
print("target freqs:", lowerstop, lowerpass, upperpass, upperstop)
print(
"actual freqs:",
thefilter["frequencies"][lowerstopindex],
thefilter["frequencies"][lowerpassindex],
thefilter["frequencies"][upperpassindex],
thefilter["frequencies"][upperstopindex],
)
response = {}
passbandmean = np.mean(thefilter["transferfunc"][lowerpassindex:upperpassindex])
passbandmax = np.max(thefilter["transferfunc"][lowerpassindex:upperpassindex])
passbandmin = np.min(thefilter["transferfunc"][lowerpassindex:upperpassindex])
response["passbandripple"] = (passbandmax - passbandmin) / passbandmean
if lowerstopindex > 2:
response["lowerstopmean"] = (
|
np.mean(thefilter["transferfunc"][0:lowerstopindex])
|
numpy.mean
|
import warnings
from typing import Tuple
import numpy as np
import skimage.segmentation as segm
from celltk.utils._types import Track, Mask, Image, Arr
from celltk.core.operation import BaseEvaluator
from celltk.utils.utils import ImageHelper
from celltk.utils.operation_utils import track_to_mask, get_cell_index
from celltk.utils.info_utils import nan_helper_1d
class Evaluator(BaseEvaluator):
@ImageHelper(by_frame=False)
def save_kept_cells(self,
track: Track,
array: Arr
) -> Track:
""""""
# Figure out all the cells that were kept
kept_cells = np.unique(array[:, :, 'label']).astype(int)
# Change to mask to also blank negatives
ravel = track_to_mask(track).ravel()
# Remove the missing cells by excluding from mapping
mapping = {c: c for c in kept_cells}
ravel = np.asarray([mapping.get(c, 0) for c in ravel])
# Add back the parent labels
parent_ravel = track.ravel() # Includes negative values
mask = (parent_ravel < 0) * (ravel > 0)
np.copyto(ravel, parent_ravel, where=mask)
return ravel.reshape(track.shape).astype(np.int16)
@ImageHelper(by_frame=False, as_tuple=False)
def make_single_cell_stack(self,
image: Image,
array: Arr,
cell_id: int,
position_id: int = None,
window_size: Tuple[int] = (40, 40),
region: str = None,
channel: str = None
) -> Image:
"""
Should make a montage and follow the centroid of a single cell
NOTE:
- Cells very close to edge might raise IndexError
"""
# Simpler if it's limited to even numbers only
assert all([not (w % 2) for w in window_size])
# Find the row that contains the cell data
region = array.regions[0] if not region else region
channel = array.channels[0] if not channel else channel
label_array = array[region, channel, 'label']
position_array = array[region, channel, 'position_id']
cell_index = get_cell_index(cell_id, label_array,
position_id, position_array)
# Get the centroid, window for the cell, and img size
y, x = array[region, channel, ('y', 'x'), cell_index, :]
y = nan_helper_1d(y)
x = nan_helper_1d(x)
frames, y_img, x_img = image.shape
x_win, y_win = window_size
# Make the window with the cell in the center of the window
x_adj = int(x_win / 2)
y_adj = int(y_win / 2)
y_min = np.floor(np.clip(y - y_adj, a_min=0, a_max=None)).astype(int)
y_max = np.floor(np.clip(y + y_adj, a_min=None, a_max=y_img)).astype(int)
x_min = np.floor(np.clip(x - x_adj, a_min=0, a_max=None)).astype(int)
x_max = np.floor(
|
np.clip(x + x_adj, a_min=None, a_max=x_img)
|
numpy.clip
|
import numpy as np
from sklearn import preprocessing, neighbors, model_selection, svm
import pandas as pd
import pickle
#import serial
import re
import random
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, plot_precision_recall_curve
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
def load_data():
df_B = pd.read_table('sign2/b.txt', header=None, sep=',')#
B = np.array(df_B)
df_C = pd.read_table('sign2/c.txt', header=None, sep=',')#
C = np.array(df_C)
df_D = pd.read_table('sign2/d.txt', header=None, sep=',')#
D =
|
np.array(df_D)
|
numpy.array
|
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
X =
|
np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
|
numpy.array
|
import numpy as np
import yaml
from pathlib import Path
class VirtualLinearModelExperiment:
"""create virtual test data for a function a+b*x+c*x**2
Note:
The model is actually nonlinear due to the quadratic component c that represents a model bias
Attributes:
all_a (np.array): array with offsets (for each entry in a, there is a corresponding measurement
performed at x_function and x_derivative)
b (float): linear coefficient of the model
c (float): quadratic coefficient of the model
x_function(np.array): positions of function sensors in the interval [0,1]
x_derivative(np.array): positions of derivative sensors in the interval [0,1]
"""
def __init__(self, virtual_experiment_metadata_yaml):
"""Create virtual experiment
Args:
virtual_experiment_metadata_yaml: meta data file to generate the data from
"""
with open(virtual_experiment_metadata_yaml, "r") as f:
d = yaml.load(f, Loader=yaml.FullLoader)
self.all_a = np.asarray(d['all_a'])
self.b = d['b']
self.c = d['c']
self.x_function = np.asarray(d['x_function'])
self.sigma_noise_function = np.asarray(d['sigma_noise_function'])
self.x_derivative = np.asarray(d['x_derivative'])
self.sigma_noise_derivative = np.asarray(d['sigma_noise_derivative'])
self.seed =
|
np.asarray(d['seed'])
|
numpy.asarray
|
# Copyright (c) 2020 <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
class VideoProcessor(object):
def __init__(self, video_path, landmark_path, output_folder, extract_audio):
# path of the video file
self.video_path = video_path
self.landmarks_path = landmark_path
self.extract_audio = extract_audio
self.frames_folder = os.path.join(output_folder, "frames")
self.audios_folder = os.path.join(output_folder, "audios")
if not os.path.exists(output_folder):
os.mkdir(output_folder)
if not os.path.exists(self.audios_folder):
os.mkdir(self.audios_folder)
if not os.path.exists(self.frames_folder):
os.mkdir(self.frames_folder)
def preprocess(self, seq_len=30, target_resolution=(224, 224)):
"""
extract frames and audio from the video,
store the cropped frames and audio file in the output folders
seq_len: how many frames will be extracted from the video.
Considering all videos from this dataset have similar duration
video_duration = seq_len / fps
target_resolution: (desired_height, desired_width) of the facial frame extracted
"""
video = VideoFileClip(self.video_path, audio=self.extract_audio, target_resolution=target_resolution)
if self.extract_audio:
video.audio.write_audiofile(os.path.join(self.audios_folder, "audio.wav"))
times = list(np.arange(0, video.duration, video.duration/seq_len))
if len(times) < seq_len:
times.append(video.duration)
times = times[:seq_len]
# extract 2D points from csv
data =
|
np.genfromtxt(self.landmarks_path, delimiter=',')
|
numpy.genfromtxt
|
import itertools
import numpy as np
import os
import copy
from cv2 import cv2 as cv
# # # # sift helper # # # #
def prepare_sift(input_image):
sift = cv.xfeatures2d.SIFT_create()
kp1, des1 = find_sift(input_image, sift)
return kp1, des1
def find_sift(img, sift):
kp1, des1 = sift.detectAndCompute(img, None)
return kp1, des1
# # # # sift helper # # # #
# # # # sketch # # # #
class _BaseSketch:
file_name = None
resize_factor = None
pixels_in_cm = None
patterns_corners_pts = None
def __init__(self, keep_corners='1111'):
self.sketch_path = os.path.join(os.path.dirname(__file__), self.file_name)
self.sketch_image = None
self.kp1, self.des1 = self._calc_keypoints(keep_corners)
if not self.kp1:
raise AttributeError
def _load_image(self):
sketch_image = cv.cvtColor(cv.imread(self.sketch_path), cv.COLOR_BGR2GRAY)
return cv.resize(sketch_image, dsize=(0, 0), fx=1 / self.resize_factor, fy=1 / self.resize_factor)
def _crop_image(self, keep_corners):
sketch_size = self.sketch_image.shape
mid_x, mid_y = [round(d / 2) for d in sketch_size]
if not int(keep_corners[0]): # top-left
self.sketch_image[:mid_x, :mid_y] = 0
if not int(keep_corners[1]): # top-right
self.sketch_image[mid_x:, :mid_y] = 0
if not int(keep_corners[2]): # bottom-left
self.sketch_image[:mid_x, mid_y:] = 0
if not int(keep_corners[3]): # bottom-right
self.sketch_image[mid_x:, mid_y:] = 0
def _calc_keypoints(self, keep_corners):
self.sketch_image = self._load_image()
self._crop_image(keep_corners)
kp1, des1 = prepare_sift(self.sketch_image)
return kp1, des1
def convert_pixels_to_cm(self, distance_in_pixels):
return (distance_in_pixels / self.pixels_in_cm) * self.resize_factor
def convert_cm_to_pixels(self, distance_in_cm):
return (distance_in_cm * self.pixels_in_cm) / self.resize_factor
class TwoKoalas(_BaseSketch):
file_name = 'two_koalas.png'
resize_factor = 3
pixels_in_cm = 28.35
patterns_corners_pts = np.float32([[90, 90], [1875, 90], [1875, 3600], [90, 3600]]).reshape(-1, 1, 2) / resize_factor
sketch = TwoKoalas()
# # # # sketch # # # #
# # # # homography # # # #
# config
MIN_RATIO_FOR_KNN_AMBIGUITY = 0.8
MIN_MATCH_COUNT_TO_RUN_RANSAC = 10
MIN_INLIERS_FOR_RANSAC = 5
MIN_LEVEL_FOR_HOMOGRAPHY_SUCCESS = 2.2
class Homography:
def __init__(self, sketch_obj):
self.sift = cv.xfeatures2d.SIFT_create()
self.matcher = self.initialize_matcher()
self.sketch_obj = sketch_obj
self.img_query = None
self.matches_mask = None
self.homography_matrix = None
self.kp2 = None
self.des2 = None
self.matches = None
self.good = None
self.success_level = None
self.error_message = None
self.sheet_corners = None
@staticmethod
def initialize_matcher():
# using BFMatcher
# FlannBasedMatcher had inconsistent results interpreter vs docker
return cv.BFMatcher()
def process_image(self, img_query):
self.img_query = img_query
self.kp2, self.des2 = find_sift(img_query, self.sift)
if len(self.kp2) < 2:
return False
self.matches = self.matcher.knnMatch(self.sketch_obj.des1.astype(np.float32), self.des2.astype(np.float32), k=2)
# store all the good matches as per Lowe's ratio test.
self.good = self.get_only_unambiguous_matches()
return True
def get_only_unambiguous_matches(self):
good = []
for m, n in self.matches:
if m.distance < MIN_RATIO_FOR_KNN_AMBIGUITY * n.distance:
good.append(m)
return good
def calc_homography(self):
# homography from image to sketch coordinates
matched_pts_in_image = [self.kp2[m.trainIdx].pt for m in self.good]
matched_pts_in_sketch = [self.sketch_obj.kp1[m.queryIdx].pt for m in self.good]
image_pts_valid_format = np.float32(matched_pts_in_image).reshape(-1, 1, 2)
sketch_pts_valid_format = np.float32(matched_pts_in_sketch).reshape(-1, 1, 2)
self.success_level, self.homography_matrix, self.matches_mask = Homography.calc_homography_from_points(
image_pts_valid_format, sketch_pts_valid_format, self.sketch_obj.sketch_image.shape)
if self.homography_matrix is not None:
self.find_sheet_boundaries()
self.error_message = ""
def find_sheet_boundaries(self):
h, w = self.sketch_obj.sketch_image.shape
# sketch_boundaries order = tl, bl, br, tr
sketch_boundaries = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
inverseM = np.linalg.inv(self.homography_matrix) # need from sketch to image
sheet_corners_before_reshape = cv.perspectiveTransform(sketch_boundaries, inverseM)
self.sheet_corners =
|
np.float32(sheet_corners_before_reshape)
|
numpy.float32
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 20 15:07:16 2021
@author: Haad-Rathore
"""
# Notes:
# Baseline time is selected to be 700 ms. It should not be too long since electrical activity can change very fast and very dramatically.
from scipy.io import loadmat, savemat # Import function to read data.
from matplotlib import pyplot as plt
from matplotlib import colors as cl
from scipy import signal, integrate, stats
import sys, os, shutil, pywt
sys.path.append(os.getcwd())
import pandas as pd
import numpy as np
from natsort import natsorted
from Support import *
# Files and folders
source_dir = input('Enter the source directory: \n')
output_dir_1 = os.path.join(source_dir,'Processed','Spectrogram')
output_dir_2 = os.path.join(source_dir,'Processed','Rasters')
output_dir_3 = os.path.join(source_dir,'Processed','Avg_Trials')
filename_save_data = os.path.join(source_dir,'Processed','Spectrogram_mat')
output_dir_cortical_depth = os.path.join(source_dir,'Processed','Cortical-Depth')
dir_chan_list = os.path.join(source_dir,'chan_list.xlsx')
dir_chan_map = os.path.join(source_dir,'chan_map_1x32_128ch_rigid.xlsx')
dir_Bin2Trials = os.path.join(source_dir,'Bin2Trials')
dir_expsummary = os.path.join(source_dir,'exp_summary.xlsx')
os.makedirs(output_dir_1)
os.makedirs(output_dir_2)
os.makedirs(output_dir_3)
os.makedirs(filename_save_data)
os.makedirs(output_dir_cortical_depth)
# Extracting data from summary file .xlsx
df_exp_summary = pd.read_excel(dir_expsummary)
arr_exp_summary = df_exp_summary.to_numpy()
Num_chan = arr_exp_summary[0,0] # Number of channels
Notch_freq = arr_exp_summary[0,1] # Notch frequencey selected (in Hz)
Fs = arr_exp_summary[0,2] # Sampling freq (in Hz)
stim_start_time = arr_exp_summary[4,1] # Stimulation start
stim_start_time_original = arr_exp_summary[2,2]# original stimulation start time
n_stim_start = int(Fs * stim_start_time)# Stimulation start time in samples
Ntrials = arr_exp_summary[2,4] # Number of trials
stim_end_time = arr_exp_summary[2,1] + stim_start_time # End time of stimulation
time_seq = arr_exp_summary[4,0] # Time of one sequence in seconds
Seq_perTrial = arr_exp_summary[2,3] # Number of sequences per trial
total_time = time_seq * Seq_perTrial # Total time of the trial
print('Each sequence is: ', time_seq, 'sec')
time_seq = int(np.ceil(time_seq * Fs/2) * 2) # Time of one sequence in samples (rounded up to even)
# Extracting channel mapping info
df_chanMap = pd.read_excel(dir_chan_map,usecols=[0,1,2,3],header = None,sheet_name = 2)
arr_chanMap = df_chanMap.to_numpy() # 4 shank device 1x32 channels on each shank
df_chanList = pd.read_excel(dir_chan_list,header = 0)
chan_list = df_chanList.to_numpy()
chan_list = np.reshape(chan_list,(Num_chan,))
# First channel only
filename = os.path.join(dir_Bin2Trials,'Chan0.csv')
df_chan = pd.read_csv(filename,dtype = np.single)
arr_chan = df_chan.to_numpy()
Time = arr_chan[:,-1]
ADC_data = arr_chan[:,-2]
total_time = len(Time) # in samples
# --------------------- SET THESE PARAMETERS ------------------------------
normalize_PSD_flag = 0 # Do you want to normalize the PSD calculations with the baseline PSD
time_window = 80e-3 # Selecting a 80 ms time window for PSD calculation (MUA)
n_time_window = int(time_window * Fs) # Time in samples
n_chan = 128; # Total channels in the device
n_density = 32 # Number of channels on a single shank
electrode_spacing = 25 # Spacing in microns b/w electrodes
skip_trials = np.array([],dtype = np.int16) # Skip these trials
time_limit_spectrogram = stim_end_time + 5 # Specify the time limit (in seconds) for the plot of the spectrogram
time_start_spectrogram = 0.0 # For visual plotting (because bin2trial.py had 0.5 s extra)
decimate_factor = 20 # Factor for decimating for Morlet Transform
# f = pywt.scale2frequency('cmor2-1.5',np.arange(15,250,1)) * Fs/decimate_factor # Please check this to get the correct freuqncy band
Ntrials_copy = Ntrials - skip_trials.size
# Cortical depth plots
MUA_depth_peak = np.zeros((arr_chanMap.shape[0],arr_chanMap.shape[1],2))
MUA_depth_peak[:] = np.nan
MUA_depth_mean = np.zeros(arr_chanMap.shape)
MUA_depth_mean[:] = np.nan
LFP_depth_peak = np.zeros((arr_chanMap.shape[0],arr_chanMap.shape[1],4))
LFP_depth_peak[:] = np.nan
LFP_depth_mean = np.zeros((arr_chanMap.shape[0],arr_chanMap.shape[1],4))
LFP_depth_mean[:] = np.nan
MUA_depth_mean_post = np.zeros(arr_chanMap.shape)
MUA_depth_mean_post[:] = np.nan
LFP_depth_mean_post = np.zeros((arr_chanMap.shape[0],arr_chanMap.shape[1],4))
LFP_depth_mean_post[:] = np.nan
# Iterate over channels -------------------------------------------------
iter_progress = 0
for iter_chan in chan_list:
# finding location of this channel from the channel map
# first element is depth index and second element is shank index
chan_loc = np.reshape(np.where(iter_chan == arr_chanMap),(2,))
filename_str = 'Chan' + str(iter_chan) + '.csv'
filename = os.path.join(dir_Bin2Trials,filename_str)
df_chan = pd.read_csv(filename,dtype = np.single)
arr_chan = df_chan.to_numpy()
eEEG = arr_chan[:,:Ntrials] # Electrical recording
eEEG = np.transpose(eEEG)
eEEG = eEEG[:,0:int(time_limit_spectrogram*Fs)]
# Filtering signals
eEEG_filtered = filterSignal_notch(eEEG,Fs,60, axis_value = 1) # 60 Hz Notch
eEEG_filtered = filterSignal_notch(eEEG_filtered,Fs,120, axis_value = 1) # 120 Hz Notch
eEEG_MUA = filterSignal_MUA(eEEG_filtered,Fs,axis_value = 1) # MUA 0.3-3 Khz
eEEG_filtered = filterSignal_lowpassLFP(eEEG_filtered,Fs, axis_value = 1) # LFP (4-160 Hz)
# Compute power spectral density from short-time FT (MUA ONLY)
# f, t, Sxx = compute_PSD(eEEG_filtered, Fs, n_time_window, axis_value = 1)
f_MUA, t_stft, Sxx_MUA = compute_PSD(eEEG_MUA, Fs, n_time_window, axis_value = 1)
t_baseline = np.where(np.logical_and(t_stft>=(stim_start_time-1), t_stft<=stim_start_time-0.2))
t_baseline = np.asarray(t_baseline)
t_baseline = np.reshape(t_baseline,(t_baseline.size,))
# Compute power spectral density Morlet Wavelet Transform (LFP ONLY)
# frequencies = pywt.scale2frequency('cmor0.000004-3.0',np.arange(17,120,1)) / dt
eEEG_filtered = signal.decimate(eEEG_filtered, decimate_factor, ftype = 'iir', axis = 1) # decimation to reduce complexity
# cwt_arr, freq = pywt.cwt(eEEG_filtered,np.arange(3.50,60,0.1),'cmor2.0-0.5',sampling_period = decimate_factor/Fs, method = 'conv', axis = 1)
cwt_arr, freq = pywt.cwt(eEEG_filtered,np.arange(20,400,1),'cmor0.1-3.0',sampling_period = decimate_factor/Fs, method = 'fft', axis = 1)
cwt_arr = np.transpose(cwt_arr,axes = [1,0,2])
cwt_arr = np.abs(cwt_arr)
Time_mwt = np.linspace(0,time_limit_spectrogram,eEEG_filtered.shape[1])
t_baseline_cwt = np.where(np.logical_and(Time_mwt>=(stim_start_time-1), Time_mwt<=stim_start_time-0.2))
t_baseline_cwt = np.asarray(t_baseline_cwt)
t_baseline_cwt = np.reshape(t_baseline_cwt,(t_baseline_cwt.size,))
# freq_axis = np.where(np.logical_and(freq>=8, freq<=140))
# freq_axis = np.asarray(freq_axis)
# freq_axis = np.reshape(freq_axis,(freq_axis.size,))
# coeff = stats.zscore(coeff,axis = 2)
# coeff = np.mean(coeff, axis = 1)
#------------- LFP Alpha Band
f_Alpha = np.where((freq>=8) & (freq<15)) # finding the indices of 8-15 Hz band
f_Alpha = np.asarray(f_Alpha)
f_Alpha = np.reshape(f_Alpha,(f_Alpha.size,))
arr_ndPSD_Alpha = np.zeros((Ntrials,Time_mwt.size),dtype=np.single)
#------------- LFP Beta Band
f_Beta = np.where((freq>=16) & (freq<32)) # finding the indices of 16-32 Hz band
f_Beta = np.asarray(f_Beta)
f_Beta = np.reshape(f_Beta,(f_Beta.size,))
arr_ndPSD_Beta = np.zeros((Ntrials,Time_mwt.size),dtype=np.single)
#------------- LFP Gamma band
f_Gamma = np.where((freq>=32) & (freq<100)) # Find the indices of the Gamma band (40-100 Hz)
f_Gamma = np.asarray(f_Gamma)
f_Gamma = np.reshape(f_Gamma,(f_Gamma.size,))
arr_ndPSD_Gamma = np.zeros((Ntrials,Time_mwt.size),dtype=np.single)
#------------- LFP High frequency
f_high = np.where((freq>=100) & (freq<=140)) # finding the indices of 100-140 Hz LFP band
f_high = np.asarray(f_high)
f_high = np.reshape(f_high,(f_high.size,))
arr_ndPSD_high = np.zeros((Ntrials,Time_mwt.size),dtype=np.single)
arr_ndPSD = np.zeros((Ntrials,freq.size,Time_mwt.size),dtype=np.single) # for LFP
arr_ndPSD_MUA = np.zeros((Ntrials,f_MUA.size,t_stft.size),dtype=np.single) # for MUA
# Iterating over trials
if (skip_trials.size == 0):
for iter_trial in range(0,Ntrials):
# psd_bl = np.mean(Sxx[iter_trial,:,t_baseline[0:]],axis = 0) # Taking average over time
psd_bl_MUA = np.mean(Sxx_MUA[iter_trial,:,t_baseline[0:]],axis = 0)
# psd_bl = np.reshape(psd_bl,(psd_bl.size,1))
psd_bl_MUA = np.reshape(psd_bl_MUA,(psd_bl_MUA.size,1))
cwt_bsl = np.mean(cwt_arr[iter_trial,:,t_baseline_cwt[0:]], axis = 0)
cwt_bsl = np.reshape(cwt_bsl,(cwt_bsl.size,1))
if normalize_PSD_flag == 1:
# compute normalized change in PSD
# ndPSD = (Sxx[iter_trial,:,:] - psd_bl)/psd_bl # Normalzied change in PSD/Hz
ndPSD_MUA = (Sxx_MUA[iter_trial,:,:] - psd_bl_MUA)/psd_bl_MUA # Normalzied change in PSD/Hz for MUA
# compute normalized change in Morlet Transform
nd_cwt = (cwt_arr[iter_trial,:,:] - cwt_bsl)/cwt_bsl # Normalized change in cw morlet transform
else:
# compute change in PSD
# ndPSD = (Sxx[iter_trial,:,:] - psd_bl)/psd_bl # change in PSD/Hz
ndPSD_MUA = (Sxx_MUA[iter_trial,:,:] - psd_bl_MUA) # change in PSD/Hz for MUA
# compute normalized change in Morlet Transform
nd_cwt = (cwt_arr[iter_trial,:,:] - cwt_bsl) # change in cw morlet transform
# Average values of the corresponding frequency bands being analyzed
ndPSD_Alpha = np.mean(nd_cwt[f_Alpha[0]:f_Alpha[-1]+1,:],axis = 0)
ndPSD_Beta = np.mean(nd_cwt[f_Beta[0]:f_Beta[-1]+1,:],axis = 0)
ndPSD_Gamma = np.mean(nd_cwt[f_Gamma[0]:f_Gamma[-1]+1,:],axis = 0)
ndPSD_high = np.mean(nd_cwt[f_high[0]:f_high[-1]+1,:],axis = 0)
# storing in big array
arr_ndPSD_Alpha[iter_trial,:] = ndPSD_Alpha
arr_ndPSD_Beta[iter_trial,:] = ndPSD_Beta
arr_ndPSD_Gamma[iter_trial,:] = ndPSD_Gamma
arr_ndPSD_high[iter_trial,:] = ndPSD_high
# for spectrogram_ndPSD and spectrogram_ndPSD_MUA and continuous wavelet transform
arr_ndPSD_MUA[iter_trial,:,:] = ndPSD_MUA
arr_ndPSD[iter_trial,:,:] = nd_cwt
else:
for iter_trial in range(0,Ntrials):
if not (iter_trial == skip_trials).any():
# psd_bl = np.mean(Sxx[iter_trial,:,t_baseline[0:]],axis = 0) # Taking average over time
psd_bl_MUA = np.mean(Sxx_MUA[iter_trial,:,t_baseline[0:]],axis = 0)
# psd_bl = np.reshape(psd_bl,(psd_bl.size,1))
psd_bl_MUA = np.reshape(psd_bl_MUA,(psd_bl_MUA.size,1))
cwt_bsl = np.mean(cwt_arr[iter_trial,:,t_baseline_cwt[0:]], axis = 0)
cwt_bsl = np.reshape(cwt_bsl,(cwt_bsl.size,1))
if normalize_PSD_flag == 1:
# compute normalized change in PSD
ndPSD_MUA = (Sxx_MUA[iter_trial,:,:] - psd_bl_MUA)/psd_bl_MUA # Normalzied change in PSD/Hz for MUA
# compute normalized change in Morlet Transform
nd_cwt = (cwt_arr[iter_trial,:,:] - cwt_bsl)/cwt_bsl # Normalized change in cw morlet transform
else:
# compute change in PSD
ndPSD_MUA = (Sxx_MUA[iter_trial,:,:] - psd_bl_MUA) # change in PSD/Hz for MUA
# compute normalized change in Morlet Transform
nd_cwt = (cwt_arr[iter_trial,:,:] - cwt_bsl) # change in cw morlet transform
# Average values of the corresponding frequency bands being analyzed
ndPSD_Alpha = np.mean(nd_cwt[f_Alpha[0]:f_Alpha[-1]+1,:],axis = 0)
ndPSD_Beta = np.mean(nd_cwt[f_Beta[0]:f_Beta[-1]+1,:],axis = 0)
ndPSD_Gamma = np.mean(nd_cwt[f_Gamma[0]:f_Gamma[-1]+1,:],axis = 0)
ndPSD_high = np.mean(nd_cwt[f_high[0]:f_high[-1]+1,:],axis = 0)
# storing in big array
arr_ndPSD_Alpha[iter_trial,:] = ndPSD_Alpha
arr_ndPSD_Beta[iter_trial,:] = ndPSD_Beta
arr_ndPSD_Gamma[iter_trial,:] = ndPSD_Gamma
arr_ndPSD_high[iter_trial,:] = ndPSD_high
# for spectrogram_ndPSD and spectrogram_ndPSD_MUA and continuous wavelet transform
arr_ndPSD_MUA[iter_trial,:,:] = ndPSD_MUA
arr_ndPSD[iter_trial,:,:] = nd_cwt
# delete empty trials
arr_ndPSD_Alpha = np.delete(arr_ndPSD_Alpha,skip_trials,0)
arr_ndPSD_Beta = np.delete(arr_ndPSD_Beta,skip_trials,0)
arr_ndPSD_Gamma = np.delete(arr_ndPSD_Gamma,skip_trials,0)
arr_ndPSD_high = np.delete(arr_ndPSD_high,skip_trials,0)
arr_ndPSD_MUA = np.delete(arr_ndPSD_MUA,skip_trials,0)
arr_ndPSD = np.delete(arr_ndPSD,skip_trials,0)
# Averaging across trials
avg_ndPSD_Alpha = np.mean(arr_ndPSD_Alpha,axis=0)
avg_ndPSD_Beta = np.mean(arr_ndPSD_Beta,axis=0)
avg_ndPSD_Gamma = np.mean(arr_ndPSD_Gamma,axis=0)
avg_ndPSD_high = np.mean(arr_ndPSD_high,axis=0)
# Averaging across trials (true spectrogram)
f_axis = np.where((freq>=8) & (freq<=140))
f_axis = np.asarray(f_axis)
f_axis = np.reshape(f_axis,(f_axis.size,))
spectrogram_ndPSD = np.mean(arr_ndPSD[:,f_axis[0]:f_axis[-1]+1,:],axis = 0)
f_axis_MUA = np.where((f_MUA>=300) & (f_MUA<=3000))
f_axis_MUA = np.asarray(f_axis_MUA)
f_axis_MUA = np.reshape(f_axis_MUA,(f_axis_MUA.size,))
spectrogram_ndPSD_MUA = np.mean(arr_ndPSD_MUA[:,f_axis_MUA[0]:f_axis_MUA[-1]+1,:], axis=(0,1))
# spectrogram_ndPSD = np.transpose(spectrogram_ndPSD)
# Activation Time windows
t_activation = np.where(np.logical_and(Time_mwt>=stim_start_time, Time_mwt<=stim_end_time))
t_activation = np.asarray(t_activation)
t_activation = np.reshape(t_activation,(t_activation.size,))
t_activation_MUA = np.where(np.logical_and(t_stft>=stim_start_time, t_stft<=stim_end_time))
t_activation_MUA = np.asarray(t_activation_MUA)
t_activation_MUA = np.reshape(t_activation_MUA,(t_activation_MUA.size,))
MUA_depth_mean[chan_loc[0],chan_loc[1]] = np.mean(spectrogram_ndPSD_MUA[t_activation_MUA[0:]])
# MUA_depth_peak[chan_loc[0],chan_loc[1]] = np.amax(spectrogram_ndPSD_MUA[t_activation_MUA[0:]])
MUA_depth_peak[chan_loc[0],chan_loc[1],:] = detect_peak_basic(spectrogram_ndPSD_MUA[t_activation_MUA[0:]],2)
LFP_depth_mean[chan_loc[0],chan_loc[1],0] = np.mean(avg_ndPSD_Alpha[t_activation[0:]])
LFP_depth_peak[chan_loc[0],chan_loc[1],0] = np.amax(avg_ndPSD_Alpha[t_activation[0:]])
LFP_depth_mean[chan_loc[0],chan_loc[1],1] = np.mean(avg_ndPSD_Beta[t_activation[0:]])
LFP_depth_peak[chan_loc[0],chan_loc[1],1] = np.amax(avg_ndPSD_Beta[t_activation[0:]])
LFP_depth_mean[chan_loc[0],chan_loc[1],2] = np.mean(avg_ndPSD_Gamma[t_activation[0:]])
LFP_depth_peak[chan_loc[0],chan_loc[1],2] = np.amax(avg_ndPSD_Gamma[t_activation[0:]])
LFP_depth_mean[chan_loc[0],chan_loc[1],3] = np.mean(avg_ndPSD_high[t_activation[0:]])
LFP_depth_peak[chan_loc[0],chan_loc[1],3] = np.amax(avg_ndPSD_high[t_activation[0:]])
# Post activation Time windows
t_activation_post = np.where(np.logical_and(Time_mwt>=stim_end_time-time_window, Time_mwt<=stim_end_time+time_window))
t_activation_post =
|
np.asarray(t_activation_post)
|
numpy.asarray
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
from scipy.stats import norm
from cgpm.crosscat.engine import Engine
from cgpm.crosscat.state import State
from cgpm.mixtures.dim import Dim
from cgpm.mixtures.view import View
from cgpm.utils import general as gu
def gen_data_table(n_rows, view_weights, cluster_weights, cctypes, distargs,
separation, view_partition=None, rng=None):
"""Generates data, partitions, and Dim.
Parameters
----------
n_rows : int
Mumber of rows (data points) to generate.
view_weights : list<float>
An n_views length list of floats that sum to one. The weights indicate
the proportion of columns in each view.
cluster_weights : list<list<float>>
An n_views length list of n_cluster length lists that sum to one.
The weights indicate the proportion of rows in each cluster.
cctypes : list<str>
n_columns length list of string specifying the distribution types for
each column.
distargs : list
List of distargs for each column (see documentation for each data type
for info on distargs).
separation : list
An n_cols length list of values between [0,1], where seperation[i] is
the seperation of clusters in column i. Values closer to 1 imply higher
seperation.
Returns
-------
T : np.ndarray
An (n_cols, n_rows) matrix, where each row T[i,:] is the data for
column i (tranpose of a design matrix).
Zv : list
An n_cols length list of integers, where Zv[i] is the view assignment
of column i.
Zc : list<list>
An n_view length list of lists, where Zc[v][r] is the cluster assignment
of row r in view v.
Example
-------
>>> n_rows = 500
>>> view_weights = [.2, .8]
>>> cluster_weights = [[.3, .2, .5], [.4, .6]]
>>> cctypes = ['lognormal','normal','poisson','categorical',
... 'vonmises', 'bernoulli']
>>> distargs = [None, None, None, {'k':8}, None, None]
>>> separation = [.8, .7, .9, .6, .7, .85]
>>> T, Zv, Zc, dims = tu.gen_data_table(n_rows, view_weights,
... cluster_weights, dists, distargs, separation)
"""
if rng is None:
rng = gu.gen_rng()
n_cols = len(cctypes)
if view_partition:
Zv = list(view_partition)
else:
Zv = gen_partition(n_cols, view_weights, rng)
Zc = [gen_partition(n_rows, cw, rng) for cw in cluster_weights]
assert len(Zv) == n_cols
assert len(Zc) == len(set(Zv))
assert len(Zc[0]) == n_rows
T = np.zeros((n_cols, n_rows))
for col in xrange(n_cols):
cctype = cctypes[col]
args = distargs[col]
view = Zv[col]
Tc = _gen_data[cctype](
Zc[view], rng, separation=separation[col], distargs=args)
T[col] = Tc
return T, Zv, Zc
def gen_dims_from_structure(T, Zv, Zc, cctypes, distargs):
n_cols = len(Zv)
dims = []
for col in xrange(n_cols):
v = Zv[col]
cctype = cctypes[col]
dim_c = Dim(cctype, col, distargs=distargs[col])
dim_c.transition_hyper_grids(T[col])
dim_c.bulk_incorporate(T[col], Zc[v])
dims.append(dim_c)
return dims
def _gen_beta_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
K = np.max(Z)+1
alphas = np.linspace(.5 - .5*separation*.85, .5 + .5*separation*.85, K)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
alpha = alphas[cluster]
beta = (1.-alpha) * 20.* (norm.pdf(alpha, .5, .25))
alpha *= 20. * norm.pdf(alpha, .5, .25)
Tc[r] = rng.beta(alpha, beta)
return Tc
def _gen_normal_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
mu = cluster * (5.*separation)
sigma = 1.0
Tc[r] = rng.normal(loc=mu, scale=sigma)
return Tc
def _gen_normal_trunc_data(Z, rng, separation=.9, distargs=None):
l, h = distargs['l'], distargs['h']
max_draws = 100
n_rows = len(Z)
K = max(Z) + 1
mean = (l+h)/2.
bins = np.linspace(l, h, K+1)
bin_centers = [.5*(bins[i-1]+bins[i]) for i in xrange(1, len(bins))]
distances = [mean - bc for bc in bin_centers]
mus = [bc + (1-separation)*d for bc, d in zip(bin_centers, distances)]
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
sigma = 1
i = 0
while True:
i += 1
x = rng.normal(loc=mus[cluster], scale=sigma)
if l <= x <= h:
break
if max_draws < i:
raise ValueError('Could not generate normal_trunc data.')
Tc[r] = x
return Tc
def _gen_vonmises_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
num_clusters = max(Z)+1
sep = 2*math.pi / num_clusters
mus = [c*sep for c in xrange(num_clusters)]
std = sep/(5.*separation**.75)
k = 1 / (std*std)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
mu = mus[cluster]
Tc[r] = rng.vonmises(mu, k) + math.pi
return Tc
def _gen_poisson_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
lam = cluster * (4.*separation) + 1
Tc[r] = rng.poisson(lam)
return Tc
def _gen_exponential_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
mu = cluster * (4.*separation) + 1
Tc[r] = rng.exponential(mu)
return Tc
def _gen_geometric_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
Tc = np.zeros(n_rows)
K = np.max(Z)+1
ps = np.linspace(.5 - .5*separation*.85, .5 + .5*separation*.85, K)
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
Tc[r] = rng.geometric(ps[cluster]) -1
return Tc
def _gen_lognormal_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
if separation > .9:
separation = .9
Tc = np.zeros(n_rows)
for r in xrange(n_rows):
cluster = Z[r]
mu = cluster * (.9*separation**2)
Tc[r] = rng.lognormal(mean=mu, sigma=(1.-separation)/(cluster+1.))
return Tc
def _gen_bernoulli_data(Z, rng, separation=.9, distargs=None):
n_rows = len(Z)
Tc =
|
np.zeros(n_rows)
|
numpy.zeros
|
from copy import deepcopy
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.decomposition import PCA as skPCA
from sklearn.model_selection import BaseCrossValidator, KFold
from sklearn.model_selection._split import BaseShuffleSplit
from .ChemometricsScaler import ChemometricsScaler
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import matplotlib as mpl
import scipy.stats as st
import matplotlib.cm as cm
__author__ = 'gscorreia89'
from copy import deepcopy
class ChemometricsPCA(BaseEstimator):
"""
ChemometricsPCA object - Wrapper for sklearn.decomposition PCA algorithms, with tailored methods
for Chemometric Data analysis.
:param ncomps: Number of PCA components desired.
:type ncomps: int
:param sklearn.decomposition._BasePCA pca_algorithm: scikit-learn PCA algorithm to use (inheriting from _BasePCA).
:param scaler: The object which will handle data scaling.
:type scaler: ChemometricsScaler object, scaling/preprocessing objects from scikit-learn or None
:param kwargs pca_type_kwargs: Keyword arguments to be passed during initialization of pca_algorithm.
:raise TypeError: If the pca_algorithm or scaler objects are not of the right class.
"""
# Constant usage of kwargs might look excessive but ensures that most things from scikit-learn can be used directly
# no matter what PCA algorithm is used
def __init__(self, ncomps=2, pca_algorithm=skPCA, scaler=ChemometricsScaler(), **pca_type_kwargs):
try:
# Perform the check with is instance but avoid abstract base class runs. PCA needs number of comps anyway!
init_pca_algorithm = pca_algorithm(n_components=ncomps, **pca_type_kwargs)
if not isinstance(init_pca_algorithm, (BaseEstimator, TransformerMixin)):
raise TypeError("Use a valid scikit-learn PCA model please")
if not (isinstance(scaler, TransformerMixin) or scaler is None):
raise TypeError("Scikit-learn Transformer-like object or None")
if scaler is None:
scaler = ChemometricsScaler(0, with_std=False)
self.pca_algorithm = init_pca_algorithm
# Most initialized as None, before object is fitted.
self.scores = None
self.loadings = None
self._ncomps = ncomps
self._scaler = scaler
self.cvParameters = None
self.modelParameters = None
self._isfitted = False
except TypeError as terp:
print(terp.args[0])
raise terp
def fit(self, x, **fit_params):
"""
Perform model fitting on the provided x data matrix and calculate basic goodness-of-fit metrics.
Equivalent to scikit-learn's default BaseEstimator method.
:param x: Data matrix to fit the PCA model.
:type x: numpy.ndarray, shape [n_samples, n_features].
:param kwargs fit_params: Keyword arguments to be passed to the .fit() method of the core sklearn model.
:raise ValueError: If any problem occurs during fitting.
"""
try:
# This scaling check is always performed to ensure running model with scaling or with scaling == None
# always give consistent results (same type of data scale expected for fitting,
# returned by inverse_transform, etc
if self.scaler is not None:
xscaled = self.scaler.fit_transform(x)
self.pca_algorithm.fit(xscaled, **fit_params)
self.scores = self.pca_algorithm.transform(xscaled)
ss = np.sum((xscaled - np.mean(xscaled, 0)) ** 2)
predicted = self.pca_algorithm.inverse_transform(self.scores)
rss = np.sum((xscaled - predicted) ** 2)
# variance explained from scikit-learn stored as well
else:
self.pca_algorithm.fit(x, **fit_params)
self.scores = self.pca_algorithm.transform(x)
ss = np.sum((x - np.mean(x, 0)) ** 2)
predicted = self.pca_algorithm.inverse_transform(self.scores)
rss = np.sum((x - predicted) ** 2)
self.modelParameters = {'R2X': 1 - (rss / ss), 'VarExp': self.pca_algorithm.explained_variance_,
'VarExpRatio': self.pca_algorithm.explained_variance_ratio_}
# For "Normalised" DmodX calculation
resid_ssx = self._residual_ssx(x)
s0 = np.sqrt(resid_ssx.sum()/((self.scores.shape[0] - self.ncomps - 1)*(x.shape[1] - self.ncomps)))
self.modelParameters['S0'] = s0
# Kernel PCA and other non-linear methods might not have explicit loadings - safeguard against this
if hasattr(self.pca_algorithm, 'components_'):
self.loadings = self.pca_algorithm.components_
self._isfitted = True
except ValueError as verr:
raise verr
def fit_transform(self, x, **fit_params):
"""
Fit a model and return the scores, as per the scikit-learn's TransformerMixin method.
:param x: Data matrix to fit and project.
:type x: numpy.ndarray, shape [n_samples, n_features]
:param kwargs fit_params: Optional keyword arguments to be passed to the fit method.
:return: PCA projections (scores) corresponding to the samples in X.
:rtype: numpy.ndarray, shape [n_samples, n_comps]
:raise ValueError: If there are problems with the input or during model fitting.
"""
try:
self.fit(x, **fit_params)
return self.transform(x)
except ValueError as exp:
raise exp
def transform(self, x):
"""
Calculate the projections (scores) of the x data matrix. Similar to scikit-learn's TransformerMixin method.
:param x: Data matrix to fit and project.
:type x: numpy.ndarray, shape [n_samples, n_features]
:param kwargs transform_params: Optional keyword arguments to be passed to the transform method.
:return: PCA projections (scores) corresponding to the samples in X.
:rtype: numpy.ndarray, shape [n_samples, n_comps]
:raise ValueError: If there are problems with the input or during model fitting.
"""
try:
if self.scaler is not None:
xscaled = self.scaler.transform(x)
return self.pca_algorithm.transform(xscaled)
else:
return self.pca_algorithm.transform(x)
except ValueError as verr:
raise verr
def score(self, x, sample_weight=None):
"""
Return the average log-likelihood of all samples. Same as the underlying score method from the scikit-learn
PCA objects.
:param x: Data matrix to score model on.
:type x: numpy.ndarray, shape [n_samples, n_features]
:param numpy.ndarray sample_weight: Optional sample weights during scoring.
:return: Average log-likelihood over all samples.
:rtype: float
:raises ValueError: if the data matrix x provided is invalid.
"""
try:
# Not all sklearn pca objects have a "score" method...
score_method = getattr(self.pca_algorithm, "score", None)
if not callable(score_method):
raise NotImplementedError
# Scaling check for consistency
if self.scaler is not None:
xscaled = self.scaler.transform(x)
return self.pca_algorithm.score(xscaled, sample_weight)
else:
return self.pca_algorithm.score(x, sample_weight)
except ValueError as verr:
raise verr
def inverse_transform(self, scores):
"""
Transform scores to the original data space using the principal component loadings.
Similar to scikit-learn's default TransformerMixin method.
:param scores: The projections (scores) to be converted back to the original data space.
:type scores: numpy.ndarray, shape [n_samples, n_comps]
:return: Data matrix in the original data space.
:rtype: numpy.ndarray, shape [n_samples, n_features]
:raises ValueError: If the dimensions of score mismatch the number of components in the model.
"""
# Scaling check for consistency
if self.scaler is not None:
xinv_prescaled = self.pca_algorithm.inverse_transform(scores)
xinv = self.scaler.inverse_transform(xinv_prescaled)
return xinv
else:
return self.pca_algorithm.inverse_transform(scores)
@property
def ncomps(self):
try:
return self._ncomps
except AttributeError as atre:
raise atre
@ncomps.setter
def ncomps(self, ncomps=1):
"""
Setter for number of components.
:param int ncomps: Number of components to use in the model.
:raise AttributeError: If there is a problem changing the number of components and resetting the model.
"""
# To ensure changing number of components effectively resets the model
try:
self._ncomps = ncomps
self.pca_algorithm = clone(self.pca_algorithm, safe=True)
self.pca_algorithm.n_components = ncomps
self.modelParameters = None
self.loadings = None
self.scores = None
self.cvParameters = None
return None
except AttributeError as atre:
raise atre
@property
def scaler(self):
try:
return self._scaler
except AttributeError as atre:
raise atre
@scaler.setter
def scaler(self, scaler):
"""
Setter for the model scaler.
:param scaler: The object which will handle data scaling.
:type scaler: ChemometricsScaler object, scaling/preprocessing objects from scikit-learn or None
:raise AttributeError: If there is a problem changing the scaler and resetting the model.
:raise TypeError: If the new scaler provided is not a valid object.
"""
try:
if not (isinstance(scaler, TransformerMixin) or scaler is None):
raise TypeError("Scikit-learn Transformer-like object or None")
if scaler is None:
scaler = ChemometricsScaler(0, with_std=False)
self._scaler = scaler
self.pca_algorithm = clone(self.pca_algorithm, safe=True)
self.modelParameters = None
self.loadings = None
self.scores = None
self.cvParameters = None
return None
except AttributeError as atre:
raise atre
except TypeError as typerr:
raise typerr
def hotelling_T2(self, comps=None, alpha=0.05):
"""
Obtain the parameters for the Hotelling T2 ellipse at the desired significance level.
:param list comps:
:param float alpha: Significance level
:return: The Hotelling T2 ellipsoid radii at vertex
:rtype: numpy.ndarray
:raise AtributeError: If the model is not fitted
:raise ValueError: If the components requested are higher than the number of components in the model
:raise TypeError: If comps is not None or list/numpy 1d array and alpha a float
"""
try:
if self._isfitted is False:
raise AttributeError("Model is not fitted")
nsamples = self.scores.shape[0]
if comps is None:
ncomps = self.ncomps
ellips = self.scores[:, range(self.ncomps)] ** 2
ellips = 1 / nsamples * (ellips.sum(0))
else:
ncomps = len(comps)
ellips = self.scores[:, comps] ** 2
ellips = 1 / nsamples * (ellips.sum(0))
# F stat
fs = (nsamples - 1) / nsamples * ncomps * (nsamples ** 2 - 1) / (nsamples * (nsamples - ncomps))
fs = fs * st.f.ppf(1-alpha, ncomps, nsamples - ncomps)
hoteling_t2 = list()
for comp in range(ncomps):
hoteling_t2.append(np.sqrt((fs * ellips[comp])))
return np.array(hoteling_t2)
except AttributeError as atre:
raise atre
except ValueError as valerr:
raise valerr
except TypeError as typerr:
raise typerr
def _residual_ssx(self, x):
"""
:param x: Data matrix [n samples, m variables]
:return: The residual Sum of Squares per sample
"""
pred_scores = self.transform(x)
x_reconstructed = self.scaler.transform(self.inverse_transform(pred_scores))
xscaled = self.scaler.transform(x)
residuals = np.sum((xscaled - x_reconstructed)**2, axis=1)
return residuals
def x_residuals(self, x, scale=True):
"""
:param x: data matrix [n samples, m variables]
:param scale: Return the residuals in the scale the model is using or in the raw data scale
:return: X matrix model residuals
"""
pred_scores = self.transform(x)
x_reconstructed = self.scaler.transform(self.inverse_transform(pred_scores))
xscaled = self.scaler.transform(x)
x_residuals = np.sum((xscaled - x_reconstructed)**2, axis=1)
if scale:
x_residuals = self.scaler.inverse_transform(x_residuals)
return x_residuals
def dmodx(self, x):
"""
Normalised DmodX measure
:param x: data matrix [n samples, m variables]
:return: The Normalised DmodX measure for each sample
"""
resids_ssx = self._residual_ssx(x)
s = np.sqrt(resids_ssx/(self.loadings.shape[1] - self.ncomps))
dmodx =
|
np.sqrt((s/self.modelParameters['S0'])**2)
|
numpy.sqrt
|
import os
import numpy as np
from utils.constants import TZ_COND_DICT
from analysis import compute_stats, remove_none
from scipy.stats import pearsonr, sem, ttest_ind
from scipy import stats
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='white', palette='colorblind', context='poster')
from itertools import product
from models import LCALSTM as Agent
from task import SequenceLearning
from exp_ms import run_ms
from utils.params import P
from analysis import compute_cell_memory_similarity, process_cache, \
process_cache_ms, get_trial_cond_ids, get_roll_av, create_sim_dict, \
compute_cell_memory_similarity_stats, n_epoch_inpt_calc, get_trial_cond_ids
from utils.io import build_log_path, load_ckpt, pickle_save_dict, \
get_test_data_dir, get_test_data_fname, load_env_metadata, pickle_load_dict
log_root = '/Users/carsonwardell/Desktop/Thesis/log/' #'/tigress/cwardell/logs/learn-hippocampus/log/'
exp_name = 'Mental-Sims-v_old_5-actor_f-olda2c'
#exp_name = 'Mental-Sims-v_old_3.3-olda2c-frozenactor'
def_prob = None
n_def_tps = 0
n_examples = 128
seed = 0
supervised_epoch = 600
epoch_load = 1000
n_epochs = 350
n_branch = 4
n_param = 16
enc_size = 16
# enc_size_test = 8
enc_size_test = enc_size
n_event_remember = 2
penalty_random = 1
# testing param, ortho to the training directory
attach_cond = 0
# loading params
pad_len_load = 0
p_rm_ob_enc_load =0 #.3
p_rm_ob_rcl_load =0 #.3
# testing params
pad_len = 0
p_test = 0
p_rm_ob_enc_test = p_test
p_rm_ob_rcl_test = p_test
n_examples_test = 256
similarity_max_test = .9
similarity_min_test = 0
lr=8e-4
# load lca params
comp_val = .8
leak_val = 0
'''loop over conditions for testing'''
slience_recall_time = None
penalty_train = 5
penalty_test = np.array([2])
seed_num = 2
# get cond ids (same for all trials)
log_cond = 'DM'
cond_ids = get_trial_cond_ids(log_cond)
cond_ids['DM'] = True
memory_types = ['targ', 'lure']
gr_pal = sns.color_palette('colorblind')[2:4]
scramble_option = False
penalty = 5
seed_num = 4
T_total = 32
T_part = int(T_total/2)
p = P(
exp_name=exp_name, sup_epoch=supervised_epoch,
n_param=n_param, n_branch=n_branch, pad_len=pad_len_load,
def_prob=def_prob, n_def_tps=n_def_tps,
enc_size=enc_size, n_event_remember=n_event_remember,
penalty=penalty_train, penalty_random=penalty_random,
attach_cond=attach_cond, lr=lr,
p_rm_ob_enc=p_rm_ob_enc_load, p_rm_ob_rcl=p_rm_ob_rcl_load,
)
task = SequenceLearning(
n_param=p.env.n_param, n_branch=p.env.n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc_test, p_rm_ob_rcl=p_rm_ob_rcl_test,
similarity_max=similarity_max_test, similarity_min=similarity_min_test,
similarity_cap_lag=p.n_event_remember,
)
x_dim = task.x_dim
if attach_cond != 0:
x_dim += 1
# load the agent back
agent = Agent(
input_dim=x_dim, output_dim=p.a_dim,
rnn_hidden_dim=p.net.n_hidden, dec_hidden_dim=p.net.n_hidden_dec,
dict_len=p.net.dict_len
)
''' data logs'''
Log_caches = []
Log_full_caches = []
av_sims_data = []
origins_data = []
Log_sem_caches = []
all_sims_lengs = []
all_epoch_reward = []
Log_sim_cos = [None] * 40
Log_sim_lca = [None] * 40
'''pull data from all subjects'''
for idx, subj_id in enumerate(range(0,11)):
# create logging dirs
log_path, log_subpath = build_log_path(
subj_id, p, log_root=log_root, mkdir=False, verbose=False
)
# init env
log_subpath
env_data = load_env_metadata(log_subpath)
def_path = env_data['def_path']
p.env.def_path = def_path
p.update_enc_size(enc_size_test)
test_params = [penalty, pad_len, slience_recall_time]
test_data_dir, _ = get_test_data_dir(
log_subpath, epoch_load, test_params)
test_data_fname = get_test_data_fname(
n_examples, None, False)
fpath = os.path.join(test_data_dir, test_data_fname)
print(fpath)
print(subj_id)
dict = pickle_load_dict(fpath)
Log_full_caches.append(dict['Log_caches'][2])
Log_sem_caches.append(dict['Log_caches'][1])
Log_caches.append(dict['Log_caches'][0])
av_sims_data.append(dict['av_sims_data'])
origins_data.append(dict['origins_data'])
all_sims_lengs.append(dict['full_sim_data'][0])
all_epoch_reward.append(dict['full_sim_data'][1])
#'full_sim_data': [all_sims_lengs, all_epoch_reward]}
''' lets do learning curves first '''
# first average the data across subjs and get SEM
sa_sim_lengths = np.mean(av_sims_data, axis=0) #sa means averaged across subjs
sm_sim_lengths = sem(av_sims_data, axis=0) #sm means sem across subjs
# now process rolling averages
w = 3 # set a window
r_sims_lengs, r_epoch_reward = get_roll_av(sa_sim_lengths[0], sa_sim_lengths[1], w)
r_sims_lengs = np.asarray(r_sims_lengs)
r_epoch_reward = np.asarray(r_epoch_reward)
#fill in initial vals
r_sims_lengs[0:w] = np.reshape(sa_sim_lengths[0][0:w], (-1,1))
r_epoch_reward[0:w] = np.reshape(sa_sim_lengths[1][0:w], (-1,1))
# process for histogram
# now we plot
sk = 3 # skip num
f, ax = plt.subplots(1,1,figsize=(12, 6)) #, sharex=True)
ax.errorbar(
x=np.arange(n_epochs)[::sk],
y=r_sims_lengs[::sk],
yerr=sm_sim_lengths[0][::sk],
label = 'average simulation length',
alpha=.4,
)
ax.set_ylabel('simulation length')
ax.axhline(1, color='grey', linestyle='--')
ax.set_xlabel('epoch')
ax2 = ax.twinx()
ax2.errorbar(
x=np.arange(n_epochs)[::sk],
y=r_epoch_reward[::sk],
yerr=sm_sim_lengths[0][::sk],
label = 'average cumulative reward',
color = 'r',
alpha=.4
)
ax2.set_ylabel("average reward")
ax2.legend(loc='best', bbox_to_anchor=(1, 0.5))
ax.legend(loc='best', bbox_to_anchor=(1, 0.37))
f.show()
# plot again but next to eachother
# now lets do two plots
sk = 1
fig = plt.figure(figsize=(12,8))
AX = plt.GridSpec(2, 3)
AX.update(wspace = 1, hspace = 1)
ax1 = plt.subplot(AX[0,:2])
ax2 = plt.subplot(AX[1,:2])
ax3 = plt.subplot(AX[1,2:])
ax2.plot(np.arange(n_epochs)[::sk],
r_epoch_reward[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average cumulative reward',
color = 'gray',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_epoch_reward.flatten(), sm_sim_lengths[1].flatten())
lower = np.subtract(r_epoch_reward.flatten(),sm_sim_lengths[1].flatten())
ax2.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'gray',
alpha=.4,
linewidth=.1
)
ax1.plot(np.arange(n_epochs)[::sk],
r_sims_lengs[::sk],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
color = 'g',
alpha=1,
linewidth=1
)
# get upper and lower bounds
upper = np.add(r_sims_lengs.flatten(), sm_sim_lengths[0].flatten())
lower = np.subtract(r_sims_lengs.flatten(),sm_sim_lengths[0].flatten())
ax1.fill_between(np.arange(n_epochs),
upper,
lower,
color = 'g',
alpha=.4,
linewidth=.1
)
ax3.hist(all_sims_lengs[1][0,:], bins=16, label='first epoch',
alpha=.6, density=True, color = gr_pal[-1])
ax3.title.set_text("")
ax3.hist(all_sims_lengs[1][-1,:], bins=16, label="last epoch",
alpha=.6, density=True, color = gr_pal[3])
#ax3.legend(prop={'size': 20})
sns.despine()
fs = 19
ax2.set_ylabel("avg. cumulative reward", fontsize = fs)
ax2.set_xlabel("epochs", fontsize = fs)
ax1.set_ylabel("avg. simulation length", fontsize = fs)
ax1.set_xlabel("epochs", fontsize = fs)
ax1.axhline(1, color='grey', linestyle='--', alpha=.5)
ax3.set_xlabel("simulation length", fontsize = fs)
ax3.set_ylabel("proportion", fontsize = fs)
ax3.set_xticks([0,15])
ax3.set_yticks([0,.5,1])
#ax1.legend(loc='best')
#ax2.legend(loc='best')
sns.despine()
fig.tight_layout()
fig.show()
'''now plot input'''
np.shape(Log_full_caches)
np.shape(sem_c)
Log_caches_ = np.mean(Log_caches, axis=0)
inpt_sem = sem(Log_caches, axis=0)
Log_sem_caches_ = np.mean(Log_sem_caches, axis=0)
n_e = 1
mu_first, er_first, mu_last, er_last = n_epoch_inpt_calc(Log_caches_,
Log_sem_caches_,
n_e, axis=0)
print("mu shape:", np.shape(mu_first.flatten()))
print("err shape:", np.shape(er_first.flatten()))
f, ax = plt.subplots(1, 1, figsize=(5, 10))
ax.errorbar(
x=np.arange(n_param), y=mu_first, yerr=inpt_sem[0], label="first %d epochs" % (n_e))
ax.errorbar(
x=np.arange(n_param), y=mu_last, yerr=inpt_sem[-1], label="last %d epochs" % (n_e))
ax.legend()
#ax[0].set_ylim([-.05, .7])
ax.set_ylabel('input gate value')
ax.set_xlabel('Time')
ax.set_xticks(np.arange(0, p.env.n_param, p.env.n_param - 1))
'''Now plot sim origins '''
np.shape(origins_data)
av_origins = np.mean(origins_data, axis=0)
sem_origins = sem(origins_data, axis=0)
labels = ['target','lure',
'target/lure overlap', 'novel']
f, ax = plt.subplots(figsize=(15, 10)) #, sharex=True)
for orig in range(np.shape(av_origins)[0]):
upper = np.add(av_origins[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(av_origins[orig].flatten(), sem_origins[orig].flatten())
ax.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1
)
ax.plot(np.arange(n_epochs),
av_origins[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig]
)
ax.set_ylabel('% of total instances per epoch')
ax.set_xlabel('epoch')
sns.despine()
ax.legend(loc = 'center left', title = 'feature origins:',
title_fontsize = 'large')
f.tight_layout()
f.show()
'''plot sim origin and input together'''
fig = plt.figure(figsize=(17,8))
AX = plt.GridSpec(1, 5)
AX.update(wspace = 2, hspace = 0.5)
ax1 = plt.subplot(AX[0,:3])
ax2 = plt.subplot(AX[0,3:])
# first origins
for orig in range(np.shape(av_origins)[0]):
upper = np.add(av_origins[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(av_origins[orig].flatten(), sem_origins[orig].flatten())
ax1.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1,
orig_colors = colors[orig]
)
ax1.plot(np.arange(n_epochs),
av_origins[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig],
orig_colors = colors[orig]
)
ax1.set_ylabel('proportion of ouputted features')
ax1.set_xlabel('epoch')
ax1.set_yticks([.1,.3,.5,.7])
sns.despine()
#ax1.legend(loc = 'center left', title = 'feature origins:',
# prop={'size': 20}, ncol=2, title_fontsize = 'small')
#f.tight_layout()
#f.show()
#now inpt
Log_caches_ = np.mean(Log_caches, axis=0)
# get SEM
inpt_sem = sem(Log_caches, axis=0)
np.shape(inpt_sem)
n_e = 1
#mu_first, er_first, mu_last, er_last = n_epoch_inpt_calc(Log_caches_,
# Log_sem_caches_,)
labels = ['first epoch','last epoch']
ep_colors = [gr_pal[-1],gr_pal[3]]
conds = [0,-1]
for idx, cond in enumerate(conds):
upper = np.add(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
lower = np.subtract(Log_caches_[cond].flatten(), inpt_sem[cond].flatten())
ax2.fill_between(np.arange(T_part),
upper,
lower,
alpha=.3,
linewidth=.1,
color = ep_colors[idx]
)
ax2.plot(np.arange(T_part),
Log_caches_[cond],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=2,
label = labels[idx],
color = ep_colors[idx]
)
ax2.set_ylabel('input gate value')
ax2.set_xlabel('time')
#print(ax2.get_xticks())
ax2.set_xticks([0,15])
ax2.set_yticks([0,.3,.6])
ax2.axvline(2, color='grey', linestyle='--', alpha=.5)
ax1.axhline(.75, color='grey', linestyle='--', alpha=.5)
sns.despine()
#ax2.legend(prop={'size': 20})#loc = 'center left', title = 'feature origins:',
#title_fontsize = 'large')
'''plot ratio'''
log_ratios = []
for i in range(0,11):
sub_data = origins_data[i]
log_ratios.append(sub_data[0]/sub_data[1])
av_ratio = np.mean(log_ratios, axis=0)
sem_ratio = sem(log_ratios, axis=0)
f_l_ratios = [av_ratio[0],av_ratio[-1]]
f_l_err = [sem_ratio[0],sem_ratio[-1]]
# calculate significance between first and last
p_val = ttest_ind(np.asarray(log_ratios)[:,0],np.asarray(log_ratios)[:,-1])
print(p_val)
np.shape(np.asarray(log_ratios)[:,0])
labels = ['first epoch','last epoch']
f, ax = plt.subplots(1,1,figsize=(6, 6)) #, sharex=True)
ax.bar(
x=(np.arange(2)),
height=f_l_ratios,
yerr=f_l_err,
color = ep_colors)
ax.set_xticks(np.arange(len(labels)))
ax.set_ylabel('target / lure ratio')
ax.set_xticklabels(labels, fontsize=20)
ax.set_yticks([0,.5,1,1.5])
ax3.axhline(1, color='grey', linestyle='--', alpha=.5)
sns.despine()
'''single origin example and novel/mem1 ratio'''
labels = ['target','lure',
'target/lure overlap', 'novel']
orig_colors = [gr_pal[0],gr_pal[1], gr_pal[2], gr_pal[5]]
plot_data = origins_data[1]
f, ax = plt.subplots(1,1, figsize=(6,6))
for orig in range(np.shape(av_origins)[0]):
upper = np.add(plot_data[orig].flatten(), sem_origins[orig].flatten())
lower = np.subtract(plot_data[orig].flatten(), sem_origins[orig].flatten())
ax.fill_between(np.arange(n_epochs),
upper,
lower,
alpha=.4,
linewidth=.1,
color = orig_colors[orig]
)
ax.plot(np.arange(n_epochs),
plot_data[orig],
#yerr=sm_sim_lengths[0][::sk],
#label = 'average simulation length',
alpha=1,
linewidth=1,
label = labels[orig],
color = orig_colors[orig]
)
ax.set_ylabel('proportion of ouputted features')
ax.set_xlabel('epoch')
ax.set_yticks([.1,.3,.5,.7])
sns.despine()
# process data
log_ratios_nt = [] # novel/target log
for i in range(0,11):
sub_data = origins_data[i]
log_ratios_nt.append(sub_data[0]/sub_data[-1])
av_ratio_nt = np.mean(log_ratios_nt, axis=0)
sem_ratio_nt = sem(log_ratios_nt, axis=0)
f_l_ratios_nt = [av_ratio_nt[0],av_ratio_nt[-1]]
f_l_err_nt = [sem_ratio_nt[0],sem_ratio_nt[-1]]
p_val = ttest_ind(np.asarray(log_ratios)[:,0],np.asarray(log_ratios)[:,-1])
print(p_val)
f, ax = plt.subplots(1,1, figsize=(6,6))
labels = ['first epoch','last epoch']
ax.bar(
x=(np.arange(2)),
height=f_l_ratios_nt,
yerr=f_l_err_nt,
color = ep_colors)
''' final plot '''
# first, reprocess data for sim lengths
log_ratios = []
for i in range(0,11):
sub_data = origins_data[i]
log_ratios.append(sub_data[0]/sub_data[1])
av_ratio =
|
np.mean(log_ratios, axis=0)
|
numpy.mean
|
# Class responsible for constructing segment objects
import numpy as np
from .Endpoint import Endpoint
from .Intersection import Intersection
from .helper import orientation
class Segment:
def __init__(self, left, right):
self.node = None
if left < right:
self.leftPoint = Endpoint(left[0], left[1], True, self)
self.rightPoint = Endpoint(right[0], right[1], False, self)
else:
self.leftPoint = Endpoint(right[0], right[1], True, self)
self.rightPoint = Endpoint(left[0], left[1], False, self)
def __str__(self):
return "{{{}, {}}}".format(self.leftPoint, self.rightPoint)
def __eq__(self, o):
return self.leftPoint == o.leftPoint and self.rightPoint == o.rightPoint
def __ne__(self, o):
return o == None or (not self == o)
def __repr__(self):
return "{{{}, {}}}".format(self.leftPoint, self.rightPoint)
def getEndpoints(self):
return (self.leftPoint, self.rightPoint)
# Just tells us if the given point is one of the endpoints
def is_endpoint(self, pt):
left = self.leftPoint.x == pt[0] and self.leftPoint.y == pt[1]
right = self.rightPoint.x == pt[0] and self.rightPoint.y == pt[1]
return left or right
# I think the intersection code should be here.
# Check for an intersection between segments A and B.
# If there is no intersection, then return None
def intersects(segA, segB):
# Put endpoints in a useable format
a_left = np.array([segA.leftPoint.x, segA.leftPoint.y, 1])
a_right =
|
np.array([segA.rightPoint.x, segA.rightPoint.y, 1])
|
numpy.array
|
"""
Genetic Algorithms for Digital Signal Processing
Created on Mon Oct 05 20:01:05 2020
Last Edited on Mon Oct 12 2020 by <NAME>
TODO tidy up this code and to finalise it. Add up the third FIR filter method in here too.
"""
from os import major
import numpy as np
import matplotlib
from scipy import signal
from scipy.fftpack import fft
import matplotlib.pyplot as plt
import DSP_GA as ga
class DSP_Signal():
def __init__(self, filename, fs=1024, N_Coeff=400):
file = open(filename, "r")
self.y_0 = []
for line in file:
words = line.split(" ")
for word in words:
if word != "":
self.y_0.append(float(word))
self.fs = fs
self.N = len(self.y_0)
self.N_2 = int(self.N/2)
self.t = [x/self.fs for x in list(range(0, self.N))]
self.f = [x*self.fs/self.N for x in list(range(0, self.N_2))]
self.P_0 = np.var(self.y_0)
self.FFT_0 = fft(self.y_0)
self.N_Coeff = N_Coeff # Number of coefficients
#Window Filtering method for the data class
def WF(self, GA_data):
#GA Data: [noise_f_1, noise_f_2, width]
# Window Filtering
self.width_WF = 8 # Width of stop band, Hz
self.band_1 = [GA_data[0] -GA_data[2]/2, GA_data[0]+GA_data[2]/2] # Define band 1 bounds
self.band_2 = [GA_data[1] -GA_data[2]/2, GA_data[1]+GA_data[2]/2] # Define band 2 bounds
self.filter1_WF = signal.firwin(self.N_Coeff+1, self.band_1, window='hann', pass_zero='bandstop', fs=self.fs) # Filter for noise frequency 1
self.filter2_WF = signal.firwin(self.N_Coeff+1, self.band_2, window='hann', pass_zero='bandstop', fs=self.fs) # Filter for noise frequency 2
self.filter_WF = signal.convolve(self.filter1_WF, self.filter2_WF) # Combined filter for noise frequencies
self.y_WF = signal.lfilter(self.filter_WF, 1, self.y_0) # Apply noise filters to original data
self.f_WF, self.h_WF = signal.freqz(self.filter_WF, 1, fs=self.fs) #
self.FFT_WF = fft(self.y_WF)
return self.SNR(self.y_WF)
#Parks McLellan Filtering Method
def PM(self, GA_data, TW =3, BW=5):
# Filter Bands for filtering frequency 1 & 2
f_1 = GA_data[0]
f_2 = GA_data[1]
if len(GA_data) > 2:
TW = GA_data[2]
if len(GA_data) > 3:
BW = GA_data[3]
band1_PM = [0, f_1 -BW/2-TW, f_1 -BW/2, f_1+BW/2, f_1+BW/2+TW, self.fs/2]
band2_PM = [0, f_2 -BW/2-TW, f_2 -BW/2, f_2+BW/2, f_2+BW/2+TW, self.fs/2]
gain_PM = [1, 0, 1]
# Create filters for filtering frequency 1 & 2
filter1_PM = signal.remez(self.N_Coeff+1, band1_PM, gain_PM, fs=self.fs) # Filter frequency 1
filter2_PM = signal.remez(self.N_Coeff+1, band2_PM, gain_PM, fs=self.fs) # Filter frequency 2
filter_PM = signal.convolve(filter1_PM, filter2_PM) # Combined Filter
self.y_PM = signal.lfilter(filter_PM, 1, self.y_0) # Filter original data in time domain
self.f_PM, self.h_PM = signal.freqz(filter_PM, 1, fs=self.fs) # Return filter frequency response
self.FFT_PM = fft(self.y_PM) # Filtered data frequency domain response
return self.SNR(self.y_PM)
# TODO Frequency Sampling Filtering Method. THIS IS COPIED FROM ASSIGNMENT I.
def FS(self, fs):
trans_FS = 4 # Width of transition from pass band to stop band, Hz
width_FS = 8 # Width of the stop band, Hz
band1_FS = [0, noise_f[0] -width_FS/2-trans_FS, noise_f[0] -width_FS/2, noise_f[0]+width_FS/2, noise_f[0]+width_FS/2+trans_FS, fs/2]
band2_FS = [0, noise_f[1] -width_FS/2-trans_FS, noise_f[1] -width_FS/2, noise_f[1]+width_FS/2, noise_f[1]+width_FS/2+trans_FS, fs/2]
gain_FS = [1, 1, 0, 0, 1, 1] # Gain coefficients of bands
filter1_FS = signal.firwin2(N_Coeff+1, band1_FS, gain_FS, fs=fs) # Filter for noise frequency 1
filter2_FS = signal.firwin2(N_Coeff+1, band2_FS, gain_FS, fs=fs) # Filter for noise frequency 2
filter_FS = signal.convolve(filter1_FS, filter2_FS) # Filter for both noise frequencies
y_FS = signal.lfilter(filter_FS, 1, y_0) # Apply filter to time domain data
f_FS, h_FS = signal.freqz(filter_FS, 1, fs=fs) # Filter Response
FFT_FS = fft(y_FS) # Filtered Frequency Domain Response
return 0
# TODO maybe add IIR filtering method in here but that might be to much. Don't know tho.
def IIR(self, fs):
# The undesired frequencies and desired bandwidth of
freq1 = 31.456
freq2 = 74.36
BW = 5
deg1 = 2 * np.pi * (freq1 / fs)
deg2 = 2 * np.pi * (freq2 / fs)
r = 1 - (BW / fs) * np.pi
# Assign the coefficients for first and second filters
a = 1 * 1
b = (1 * -np.exp(-deg1 * 1j)) + (1 * -np.exp(deg1 * 1j))
c = (1 * -np.exp(-deg1 * 1j)) * (1 * -np.exp(deg1 * 1j))
d = 1 * 1 * 1j
e = (-r * np.exp(-deg1 * 1j)) + (-r * np.exp(deg1 * 1j))
f = (-r * np.exp(-deg1 * 1j)) * (-r * np.exp(deg1 * 1j))
g = 1 * 1
h = (-1 * np.exp(-deg2 * 1j)) + (-1 * np.exp(deg2 * 1j))
ii = (-1 * np.exp(-deg2 * 1j)) * (-1 * np.exp(deg2 * 1j))
j = 1 * 1
k = (-r * np.exp(-deg2 * 1j)) + (-r * np.exp(deg2 * 1j))
l = (-r * np.exp(-deg2 * 1j)) * (-r * np.exp(deg2 * 1j))
# Calculte the gain of the overall transfer function
Wf = 2 * np.pi * 10
ND_array = [np.exp(0), np.exp(np.i * Wf), np.exp(-2 * Wf)]
H_Z1_dot = np.dot(ND_array,[a, b, c])
H_Z2_dot = np.dot(ND_array, [d, e, f])
Gain = abs(H_Z2_dot / H_Z1_dot)
# convlute the the de/numerator of the first transfer function with de/numerator of the second funcion
NUM_Z = np.array( np.convolve( [a, b, c], [g, h, ii] ) )
DEN_Z = np.array( np.convolve( [d, e, f], [j, k, l] ) )
w, H = signal.freqz(Gain * NUM_Z, DEN_Z, self.N)
f = fs * w / (2 * np.pi)
return 0
#Returns a Signal to Noise Ratio for a given input Power
def SNR (self, y):
return self.P_0 - np.var(y)
# Plots a Fast Fourier Transform for simple graphing
def FFTplot(self, f, FFT, title="ECG Signal Frequency Spectrum"):
plt.figure()
plt.plot(f, abs(FFT)[:self.N_2])
plt.xlabel("Frequency (Hz)")
plt.ylabel("Voltage (uV)")
plt.title(title)
plt.show()
#The GA_filter function filters an input waveform
def GA_filter(waveform, input_num, solutions_per_population, mating_parent_number, num_generations):
# Defining the population size.
pop_size = (solutions_per_population,input_num) # The population will have sol_per_pop chromosome where each chromosome has num_weights genes.
#Creating the initial population.
new_population = ga.create_population(pop_size)
best_outputs = []
for generation in range(num_generations):
# Measuring the fitness of each chromosome in the population.
fitness = ga.cal_pop_fitness(waveform, new_population)
# The best result in the current iteration.
best_outputs.append(
|
np.max(fitness)
|
numpy.max
|
#!/usr/bin/env python
# coding=utf-8
import sys
import os
import numpy as np
import lmoments as lmom
from lmoments import *
#from math import *
from RegscorePy import *
import time
# ==============================================================
# Arguments
# ==============================================================
years = int(sys.argv[1])
yeare = int(sys.argv[2])
ysize = int(sys.argv[3])
xsize = int(sys.argv[4])
outdir = sys.argv[5]
var = sys.argv[6]
FUNC = sys.argv[7]
try:
norm = sys.argv[8]
except:
norm = ''
yearlist= np.arange(years, yeare+1)
# ==============================================================
def progressbar(it, prefix="", size=60, file=sys.stdout):
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
# ==============================================================
# Prepare the maximum values
# ==============================================================
datay = []
for year in yearlist:
FDAT = outdir+ '/amax/'+ var + str(year) + '_anmax.bin'
dataa = np.fromfile(FDAT, 'float32').reshape(ysize,xsize)
datay = np.append(datay,dataa)
del dataa
def normalization(data, norm):
data_back = np.copy(data)
if np.nanmean(data) == -9999:
data_back[:] = -9999
else:
if norm == "_norm":
data_back = (data - np.nanmean(data)) / np.nanstd(data)
elif norm == "_maxmin":
data_back = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
return data_back
datm = datay.reshape(-1,ysize,xsize)
for i in range(ysize):
for j in range(xsize):
#datm[:,i,j] = sorted(datm[:,i,j],reverse=True)
datm[:,i,j] = sorted(datm[:,i,j])
if norm != '':
datm[:,i,j] = normalization(datm[:,i,j], norm)
# ==============================================================
# Calculate the parameters for each function
# 1. GEV
# ==============================================================
para1 = np.zeros((ysize, xsize))
para2 = np.zeros((ysize, xsize))
para3 = np.zeros((ysize, xsize))
para4 = np.zeros((ysize, xsize))
para5 =
|
np.zeros((ysize, xsize))
|
numpy.zeros
|
import os
import gdal
import numpy as np
import warnings
def main(input_folder, output_folder, Date):
# Do not show warnings
warnings.filterwarnings('ignore')
import pyWAPOR.ETLook as ETLook
import pyWAPOR.Functions.Processing_Functions as PF
import pyWAPOR.ETLook.outputs as out
# Define Date string
Date_str = "%d%02d%02d" %(Date.year, Date.month, Date.day)
# Input folder Date
input_folder_date = os.path.join(input_folder, Date_str)
############################ Define inputs ################################
#input_files
ALBEDO_filename = os.path.join(input_folder_date, "ALBEDO_%s.tif" %Date_str)
NDVI_filename = os.path.join(input_folder_date, "NDVI_%s.tif" %Date_str)
LST_filename = os.path.join(input_folder_date, "LST_%s.tif" %Date_str)
Time_filename = os.path.join(input_folder_date, "Time_%s.tif" %Date_str)
Lat_filename = os.path.join(input_folder_date, "Lat_%s.tif" %Date_str)
Lon_filename = os.path.join(input_folder_date, "Lon_%s.tif" %Date_str)
DEM_filename = os.path.join(input_folder_date, "DEM.tif")
Slope_filename = os.path.join(input_folder_date, "Slope.tif")
Aspect_filename = os.path.join(input_folder_date, "Aspect.tif")
LandMask_filename = os.path.join(input_folder_date, "LandMask.tif")
Bulk_filename =os.path.join(input_folder_date, "Bulk_Stomatal_resistance.tif")
MaxObs_filename = os.path.join(input_folder_date, "Maximum_Obstacle_Height.tif")
Pair_24_0_filename = os.path.join(input_folder_date, "Pair_24_0_%s.tif" %Date_str)
Pair_inst_0_filename = os.path.join(input_folder_date, "Pair_inst_0_%s.tif" %Date_str)
Pair_inst_filename = os.path.join(input_folder_date, "Pair_inst_%s.tif" %Date_str)
Pre_filename = os.path.join(input_folder_date, "Precipitation_%s.tif" %Date_str)
Hum_24_filename = os.path.join(input_folder_date, "qv_24_%s.tif" %Date_str)
Hum_inst_filename = os.path.join(input_folder_date, "qv_inst_%s.tif" %Date_str)
Tair_24_filename = os.path.join(input_folder_date, "tair_24_%s.tif" %Date_str)
Tair_inst_filename = os.path.join(input_folder_date,"tair_inst_%s.tif" %Date_str)
Tair_amp_filename = os.path.join(input_folder_date, "Tair_amp_%s.tif" %Date_str)
Wind_24_filename = os.path.join(input_folder_date, "wind_24_%s.tif" %Date_str)
Wind_inst_filename = os.path.join(input_folder_date, "wind_inst_%s.tif" %Date_str)
WatCol_inst_filename = os.path.join(input_folder_date, "wv_inst_%s.tif" %Date_str)
Trans_24_filename = os.path.join(input_folder_date, "Trans_24_%s.tif" %Date_str)
############################ Define outputs ###############################
# Output folder Date
output_folder_date = os.path.join(output_folder, Date_str)
if not os.path.exists(output_folder_date):
os.makedirs(output_folder_date)
#output_files
vc_filename = os.path.join(output_folder_date, "vc_%s.tif" %Date_str)
lai_filename = os.path.join(output_folder_date, "LAI_%s.tif" %Date_str)
lai_eff_filename= os.path.join(output_folder_date, "LAI_eff_%s.tif" %Date_str)
sf_soil_filename = os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
lat_filename= os.path.join(output_folder_date, "lat_%s.tif" %Date_str)
slope_filename= os.path.join(output_folder_date, "slope_%s.tif" %Date_str)
aspect_filename = os.path.join(output_folder_date, "aspect_%s.tif" %Date_str)
ra_24_toa_filename = os.path.join(output_folder_date, "ra_24_toa_%s.tif" %Date_str)
ws_filename = os.path.join(output_folder_date, "ws_%s.tif" %Date_str)
diffusion_index_filename = os.path.join(output_folder_date, "diffusion_index_%s.tif" %Date_str)
ra_24_filename = os.path.join(output_folder_date, "ra_24_%s.tif" %Date_str)
stress_rad_filename = os.path.join(output_folder_date, "stress_rad_%s.tif" %Date_str)
p_air_24_filename = os.path.join(output_folder_date, "p_air_24_%s.tif" %Date_str)
vp_24_filename = os.path.join(output_folder_date, "vp_24_%s.tif" %Date_str)
svp_24_filename = os.path.join(output_folder_date, "svp_24_%s.tif" %Date_str)
vpd_24_filename = os.path.join(output_folder_date, "vpd_24_%s.tif" %Date_str)
stress_vpd_filename = os.path.join(output_folder_date, "stress_vpd_%s.tif" %Date_str)
stress_temp_filename = os.path.join(output_folder_date, "stress_temp_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
t_air_k_24_filename = os.path.join(output_folder_date, "t_air_k_24_%s.tif" %Date_str)
l_net_filename = os.path.join(output_folder_date, "l_net_%s.tif" %Date_str)
int_mm_filename = os.path.join(output_folder_date, "int_mm_%s.tif" %Date_str)
lh_24_filename = os.path.join(output_folder_date, "lh_24_%s.tif" %Date_str)
int_wm2_filename = os.path.join(output_folder_date, "int_wm2_%s.tif" %Date_str)
rn_24_filename = os.path.join(output_folder_date, "rn_24_%s.tif" %Date_str)
rn_24_canopy_filename= os.path.join(output_folder_date, "rn_24_canopy_%s.tif" %Date_str)
t_air_k_i_filename = os.path.join(output_folder_date, "t_air_k_i_%s.tif" %Date_str)
vp_i_filename = os.path.join(output_folder_date, "vp_i_%s.tif" %Date_str)
ad_moist_i_filename= os.path.join(output_folder_date, "ad_moist_i_%s.tif" %Date_str)
ad_dry_i_filename = os.path.join(output_folder_date, "ad_dry_i_%s.tif" %Date_str)
ad_i_filename= os.path.join(output_folder_date, "ad_i_%s.tif" %Date_str)
u_b_i_bare_filename= os.path.join(output_folder_date, "u_b_i_bare_%s.tif" %Date_str)
lon_filename= os.path.join(output_folder_date, "lon_%s.tif" %Date_str)
ha_filename= os.path.join(output_folder_date, "ha_%s.tif" %Date_str)
ied_filename= os.path.join(output_folder_date, "ied_%s.tif" %Date_str)
h0_filename = os.path.join(output_folder_date, "h0_%s.tif" %Date_str)
h0ref_filename = os.path.join(output_folder_date, "h0ref_%s.tif" %Date_str)
m_filename = os.path.join(output_folder_date, "m_%s.tif" %Date_str)
rotm_filename = os.path.join(output_folder_date, "rotm_%s.tif" %Date_str)
Tl2_filename = os.path.join(output_folder_date, "Tl2_%s.tif" %Date_str)
B0c_filename = os.path.join(output_folder_date, "B0c_%s.tif" %Date_str)
Bhc_filename = os.path.join(output_folder_date, "Bhc_%s.tif" %Date_str)
Dhc_filename = os.path.join(output_folder_date, "Dhc_%s.tif" %Date_str)
ra_hor_clear_i_filename = os.path.join(output_folder_date, "ra_hor_clear_i_%s.tif" %Date_str)
emiss_atm_i_filename = os.path.join(output_folder_date, "emiss_atm_i_%s.tif" %Date_str)
rn_bare_filename = os.path.join(output_folder_date, "rn_bare_%s.tif" %Date_str)
rn_full_filename= os.path.join(output_folder_date, "rn_full_%s.tif" %Date_str)
u_b_i_full_filename = os.path.join(output_folder_date, "u_b_i_full_%s.tif" %Date_str)
u_star_i_bare_filename = os.path.join(output_folder_date, "u_star_i_bare_%s.tif" %Date_str)
u_star_i_full_filename = os.path.join(output_folder_date, "u_star_i_full_%s.tif" %Date_str)
u_i_soil_filename = os.path.join(output_folder_date, "u_i_soil_%s.tif" %Date_str)
ras_filename = os.path.join(output_folder_date, "ras_%s.tif" %Date_str)
raa_filename = os.path.join(output_folder_date, "raa_%s.tif" %Date_str)
rac_filename= os.path.join(output_folder_date, "rac_%s.tif" %Date_str)
t_max_bare_filename = os.path.join(output_folder_date, "t_max_bare_%s.tif" %Date_str)
t_max_full_filename= os.path.join(output_folder_date, "t_max_full_%s.tif" %Date_str)
w_i_filename = os.path.join(output_folder_date, "w_i_%s.tif" %Date_str)
t_dew_i_filename = os.path.join(output_folder_date, "t_dew_i_%s.tif" %Date_str)
t_wet_i_filename = os.path.join(output_folder_date, "t_wet_i_%s.tif" %Date_str)
t_wet_k_i_filename = os.path.join(output_folder_date, "t_wet_k_i_%s.tif" %Date_str)
lst_max_filename = os.path.join(output_folder_date, "lst_max_%s.tif" %Date_str)
se_root_filename = os.path.join(output_folder_date, "se_root_%s.tif" %Date_str)
stress_moist_filename= os.path.join(output_folder_date, "stress_moist_%s.tif" %Date_str)
r_canopy_0_filename= os.path.join(output_folder_date, "r_canopy_0_%s.tif" %Date_str)
r_canopy_filename= os.path.join(output_folder_date, "r_canopy_%s.tif" %Date_str)
z_obst_filename = os.path.join(output_folder_date, "z_obst_%s.tif" %Date_str)
z_oro_filename = os.path.join(output_folder_date, "z_oro_%s.tif" %Date_str)
z0m_filename = os.path.join(output_folder_date, "z0m_%s.tif" %Date_str)
ra_canopy_init_filename = os.path.join(output_folder_date, "ra_canopy_init_%s.tif" %Date_str)
u_b_24_filename = os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
disp_filename = os.path.join(output_folder_date, "disp_%s.tif" %Date_str)
u_star_24_init_filename = os.path.join(output_folder_date, "u_star_24_init_%s.tif" %Date_str)
ad_dry_24_filename = os.path.join(output_folder_date, "ad_dry_24_%s.tif" %Date_str)
ad_moist_24_filename = os.path.join(output_folder_date, "ad_moist_24_%s.tif" %Date_str)
ad_24_filename = os.path.join(output_folder_date, "ad_24_%s.tif" %Date_str)
psy_24_filename = os.path.join(output_folder_date, "psy_24_%s.tif" %Date_str)
ssvp_24_filename = os.path.join(output_folder_date, "ssvp_24_%s.tif" %Date_str)
t_24_init_filename = os.path.join(output_folder_date, "t_24_init_%s.tif" %Date_str)
h_canopy_24_init_filename= os.path.join(output_folder_date, "h_canopy_24_init_%s.tif" %Date_str)
t_24_filename= os.path.join(output_folder_date, "t_24_%s.tif" %Date_str)
t_24_mm_filename= os.path.join(output_folder_date, "t_24_mm_%s.tif" %Date_str)
sf_soil_filename= os.path.join(output_folder_date, "sf_soil_%s.tif" %Date_str)
rn_24_soil_filename= os.path.join(output_folder_date, "rn_24_soil_%s.tif" %Date_str)
r_soil_filename= os.path.join(output_folder_date, "r_soil_%s.tif" %Date_str)
ra_soil_init_filename= os.path.join(output_folder_date, "ra_soil_init_%s.tif" %Date_str)
u_b_24_filename= os.path.join(output_folder_date, "u_b_24_%s.tif" %Date_str)
u_star_24_soil_init_filename= os.path.join(output_folder_date, "u_star_24_soil_init_%s.tif" %Date_str)
g0_bs_filename= os.path.join(output_folder_date, "g0_bs_%s.tif" %Date_str)
g0_24_filename= os.path.join(output_folder_date, "g0_24_%s.tif" %Date_str)
e_24_init_filename= os.path.join(output_folder_date, "e_24_init_%s.tif" %Date_str)
h_soil_24_init_filename= os.path.join(output_folder_date, "h_soil_24_init_%s.tif" %Date_str)
e_24_filename= os.path.join(output_folder_date, "e_24_%s.tif" %Date_str)
e_24_mm_filename= os.path.join(output_folder_date, "e_24_mm_%s.tif" %Date_str)
et_24_mm_filename= os.path.join(output_folder_date, "et_24_mm_%s.tif" %Date_str)
rn_24_grass_filename= os.path.join(output_folder_date, "rn_24_grass_%s.tif" %Date_str)
et_ref_24_filename= os.path.join(output_folder_date, "et_ref_24_%s.tif" %Date_str)
et_ref_24_mm_filename= os.path.join(output_folder_date, "et_ref_24_mm_%s.tif" %Date_str)
########################## Open input rasters #############################
dest_lst = gdal.Open(LST_filename)
lst = dest_lst.GetRasterBand(1).ReadAsArray()
lst[lst == -9999] = np.nan
dest_albedo = gdal.Open(ALBEDO_filename)
r0 = dest_albedo.GetRasterBand(1).ReadAsArray()
r0[np.isnan(lst)] = np.nan
dest_ndvi = gdal.Open(NDVI_filename)
ndvi = dest_ndvi.GetRasterBand(1).ReadAsArray()
ndvi[np.isnan(lst)] = np.nan
desttime = gdal.Open(Time_filename)
dtime = desttime.GetRasterBand(1).ReadAsArray()
dtime[np.isnan(lst)] = np.nan
dest_lat = gdal.Open(Lat_filename)
lat_deg = dest_lat.GetRasterBand(1).ReadAsArray()
lat_deg[np.isnan(lst)] = np.nan
dest_lon = gdal.Open(Lon_filename)
lon_deg = dest_lon.GetRasterBand(1).ReadAsArray()
lon_deg[np.isnan(lst)] = np.nan
dest_dem = gdal.Open(DEM_filename)
z = dest_dem.GetRasterBand(1).ReadAsArray()
z[np.isnan(lst)] = np.nan
dest_slope = gdal.Open(Slope_filename)
slope_deg = dest_slope.GetRasterBand(1).ReadAsArray()
slope_deg[np.isnan(lst)] = np.nan
dest_aspect = gdal.Open(Aspect_filename)
aspect_deg = dest_aspect.GetRasterBand(1).ReadAsArray()
aspect_deg[np.isnan(lst)] = np.nan
dest_lm = gdal.Open(LandMask_filename)
land_mask = dest_lm.GetRasterBand(1).ReadAsArray()
land_mask[np.isnan(lst)] = np.nan
#dest_bulk = gdal.Open(Bulk_filename)
#bulk = dest_bulk.GetRasterBand(1).ReadAsArray()
dest_maxobs = gdal.Open(MaxObs_filename)
z_obst_max = dest_maxobs.GetRasterBand(1).ReadAsArray()
z_obst_max[np.isnan(lst)] = np.nan
dest_pairsea24 = gdal.Open(Pair_24_0_filename)
p_air_0_24 = dest_pairsea24.GetRasterBand(1).ReadAsArray()
p_air_0_24 = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_24)
p_air_0_24[np.isnan(lst)] = np.nan
dest_pairseainst = gdal.Open(Pair_inst_0_filename)
p_air_0_i = dest_pairseainst.GetRasterBand(1).ReadAsArray()
p_air_0_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_0_i)
p_air_0_i[np.isnan(lst)] = np.nan
dest_pairinst = gdal.Open(Pair_inst_filename)
p_air_i = dest_pairinst.GetRasterBand(1).ReadAsArray()
p_air_i = ETLook.meteo.air_pressure_kpa2mbar(p_air_i)
p_air_i[np.isnan(lst)] = np.nan
dest_precip = gdal.Open(Pre_filename)
P_24 = dest_precip.GetRasterBand(1).ReadAsArray()
P_24[np.isnan(lst)] = np.nan
dest_hum24 = gdal.Open(Hum_24_filename)
qv_24 = dest_hum24.GetRasterBand(1).ReadAsArray()
qv_24[np.isnan(lst)] = np.nan
dest_huminst = gdal.Open(Hum_inst_filename)
qv_i = dest_huminst.GetRasterBand(1).ReadAsArray()
qv_i[np.isnan(lst)] = np.nan
dest_tair24 = gdal.Open(Tair_24_filename)
t_air_k_24 = dest_tair24.GetRasterBand(1).ReadAsArray()
t_air_24 = ETLook.meteo.air_temperature_celcius(t_air_k_24)
#t_air_24 = ETLook.meteo.disaggregate_air_temperature_daily(t_air_24_coarse, z, z_coarse, lapse)
t_air_24[np.isnan(lst)] = np.nan
dest_tairinst = gdal.Open(Tair_inst_filename)
t_air_k_i = dest_tairinst.GetRasterBand(1).ReadAsArray()
t_air_i = ETLook.meteo.air_temperature_celcius(t_air_k_i)
t_air_i[np.isnan(lst)] = np.nan
dest_tairamp = gdal.Open(Tair_amp_filename)
t_amp_year = dest_tairamp.GetRasterBand(1).ReadAsArray()
t_amp_year[np.isnan(lst)] = np.nan
dest_wind24 = gdal.Open(Wind_24_filename)
u_24 = dest_wind24.GetRasterBand(1).ReadAsArray()
u_24[np.isnan(lst)] = np.nan
dest_windinst = gdal.Open(Wind_inst_filename)
u_i = dest_windinst.GetRasterBand(1).ReadAsArray()
u_i[np.isnan(lst)] = np.nan
dest_watcol = gdal.Open(WatCol_inst_filename)
wv_i = dest_watcol.GetRasterBand(1).ReadAsArray()
wv_i[np.isnan(lst)] = np.nan
dest_trans = gdal.Open(Trans_24_filename)
trans_24 = dest_trans.GetRasterBand(1).ReadAsArray()
trans_24[np.isnan(lst)] = np.nan
# example file
geo_ex = dest_albedo.GetGeoTransform()
proj_ex = dest_albedo.GetProjection()
########################## Open input constants ###########################
doy = int(Date.strftime("%j"))
aod550_i = 0.01 # https://ladsweb.modaps.eosdis.nasa.gov/archive/allData/61/MOD04_L2 heb niet echt een standaard product hiervan gevonden
se_top = 0.5
porosity = 0.4
'''
http://lawr.ucdavis.edu/classes/SSC100/probsets/pset01.html
6. Calculate the porosity of a soil sample that has a bulk density of 1.35 g/cm3. Assume the particle density is 2.65 g/cm3.
Porosity = (1-(r b/r d) x 100 = (1-(1.35/2.65)) x 100 = 49%
'''
# Create QC array
QC = np.ones(lst.shape)
QC[np.isnan(lst)] = np.nan
# page 31 flow diagram
# **effective_leaf_area_index**************************************************
# constants or predefined:
nd_min = 0.125
nd_max = 0.8
vc_pow = 0.7
vc_min = 0
vc_max = 0.9677324224821418
lai_pow = -0.45
# **atmospheric canopy resistance***********************************************
# constants or predefined:
diffusion_slope = -1.33
diffusion_intercept = 1.15
t_opt = 25 # optimal temperature for plant growth
t_min = 0 # minimal temperature for plant growth
t_max = 50 # maximal temperature for plant growth
vpd_slope = -0.3
rs_min = 70
rcan_max = 1000000
# **net radiation canopy******************************************************
# constants or predefined:
vp_slope = 0.14
vp_offset = 0.34
lw_slope = 1.35
lw_offset = 0.35
int_max = 0.2
# **canopy resistance***********************************************************
# constants or predefined:
z_obs = 2
z_b = 100
z0m_bare = 0.001
r0_bare = 0.38
r0_full = 0.18
tenacity = 1.5
disp_bare = 0.0
disp_full = 0.667
fraction_h_bare = 0.65
fraction_h_full = 0.95
z0m_full = 0.1
# **initial canopy aerodynamic resistance***********************************************************
# constants or predefined:
ndvi_obs_min = 0.25
ndvi_obs_max = 0.75
obs_fr = 0.25
dem_resolution = 250
# **ETLook.unstable.initial_friction_velocity_daily***********************************************************
# constants or predefined:
c1 = 1
# **ETLook.unstable.transpiration***********************************************************
# constants or predefined:
iter_h = 3
# **ETLook.resistance.soil_resistance***********************************************************
# constants or predefined:
r_soil_pow = -2.1
r_soil_min = 800
# **ETLook.unstable.initial_sensible_heat_flux_soil_daily***********************************************************
# constants or predefined:
#porosity = 0.4 #Note: soil dependent
#se_top = 1.0 #Note should be input !
rn_slope = 0.92
rn_offset = -61.0
# **ETLook.unstable.evaporation***********************************************************
# constants or predefined:
r0_grass = 0.23
######################## MODEL ETLOOK #########################################
# **effective_leaf_area_index**************************************************
vc = ETLook.leaf.vegetation_cover(ndvi, nd_min, nd_max, vc_pow)
lai = ETLook.leaf.leaf_area_index(vc, vc_min, vc_max, lai_pow)
lai_eff = ETLook.leaf.effective_leaf_area_index(lai)
vc[np.isnan(QC)] = np.nan
lai[np.isnan(QC)] = np.nan
lai_eff[np.isnan(QC)] = np.nan
if out.vc == 1:
PF.Save_as_tiff(vc_filename, vc, geo_ex, proj_ex)
if out.lai == 1:
PF.Save_as_tiff(lai_filename, lai, geo_ex, proj_ex)
if out.lai_eff == 1:
PF.Save_as_tiff(lai_eff_filename, lai_eff, geo_ex, proj_ex)
#*******TRANSPIRATION COMPONENT****************************************************************
# **soil fraction**************************************************************
sf_soil = ETLook.radiation.soil_fraction(lai)
sf_soil[np.isnan(QC)] = np.nan
if out.sf_soil == 1:
PF.Save_as_tiff(sf_soil_filename, sf_soil, geo_ex, proj_ex)
# **atmospheric canopy resistance***********************************************
iesd = ETLook.solar_radiation.inverse_earth_sun_distance(doy)
sc = ETLook.solar_radiation.seasonal_correction(doy)
day_angle = ETLook.clear_sky_radiation.day_angle(doy)
decl = ETLook.solar_radiation.declination(doy)
lat = ETLook.solar_radiation.latitude_rad(lat_deg)
slope = ETLook.solar_radiation.slope_rad(slope_deg)
aspect = ETLook.solar_radiation.aspect_rad(aspect_deg)
ra_24_toa = ETLook.solar_radiation.daily_solar_radiation_toa(sc, decl, iesd, lat, slope, aspect)
ws = ETLook.solar_radiation.sunset_hour_angle(lat, decl)
ra_24_toa_flat = ETLook.solar_radiation.daily_solar_radiation_toa_flat(decl, iesd, lat, ws)
diffusion_index = ETLook.solar_radiation.diffusion_index(trans_24, diffusion_slope, diffusion_intercept)
# choose one of the two options below
#ra_24 = ETLook.solar_radiation.daily_solar_radiation_flat(ra_24_toa_flat, trans_24)
ra_24 = ETLook.solar_radiation.daily_total_solar_radiation(ra_24_toa, ra_24_toa_flat, diffusion_index, trans_24)
stress_rad = ETLook.stress.stress_radiation(ra_24)
p_air_24 = ETLook.meteo.air_pressure_daily(z, p_air_0_24)
vp_24 = ETLook.meteo.vapour_pressure_from_specific_humidity_daily(qv_24, p_air_24)
svp_24 = ETLook.meteo.saturated_vapour_pressure_daily(t_air_24)
vpd_24 = ETLook.meteo.vapour_pressure_deficit_daily(svp_24, vp_24)
stress_vpd = ETLook.stress.stress_vpd(vpd_24, vpd_slope)
stress_temp = ETLook.stress.stress_temperature(t_air_24, t_opt, t_min, t_max)
r_canopy_0 = ETLook.resistance.atmospheric_canopy_resistance(lai_eff, stress_rad, stress_vpd, stress_temp, rs_min, rcan_max)
# Save as tiff files
lat[np.isnan(QC)] = np.nan
slope[np.isnan(QC)] = np.nan
aspect[np.isnan(QC)] = np.nan
ra_24_toa[np.isnan(QC)] = np.nan
ws[np.isnan(QC)] = np.nan
ra_24_toa_flat[np.isnan(QC)] = np.nan
diffusion_index[np.isnan(QC)] = np.nan
ra_24[np.isnan(QC)] = np.nan
stress_rad[np.isnan(QC)] = np.nan
p_air_24[np.isnan(QC)] = np.nan
vp_24[np.isnan(QC)] = np.nan
svp_24[np.isnan(QC)] = np.nan
vpd_24[np.isnan(QC)] = np.nan
stress_vpd[np.isnan(QC)] = np.nan
stress_temp[np.isnan(QC)] = np.nan
r_canopy_0[np.isnan(QC)] = np.nan
if out.lat == 1:
PF.Save_as_tiff(lat_filename, lat, geo_ex, proj_ex)
if out.slope == 1:
PF.Save_as_tiff(slope_filename, slope, geo_ex, proj_ex)
if out.aspect == 1:
PF.Save_as_tiff(aspect_filename, aspect, geo_ex, proj_ex)
if out.ws == 1:
PF.Save_as_tiff(ws_filename, ws, geo_ex, proj_ex)
if out.ra_24_toa == 1:
PF.Save_as_tiff(ra_24_toa_filename, ra_24_toa, geo_ex, proj_ex)
if out.diffusion_index == 1:
PF.Save_as_tiff(diffusion_index_filename, diffusion_index, geo_ex, proj_ex)
if out.ra_24 == 1:
PF.Save_as_tiff(ra_24_filename, ra_24, geo_ex, proj_ex)
if out.stress_rad == 1:
PF.Save_as_tiff(stress_rad_filename, stress_rad, geo_ex, proj_ex)
if out.p_air_24 == 1:
PF.Save_as_tiff(p_air_24_filename, p_air_24, geo_ex, proj_ex)
if out.vp_24 == 1:
PF.Save_as_tiff(vp_24_filename, vp_24, geo_ex, proj_ex)
if out.svp_24 == 1:
PF.Save_as_tiff(svp_24_filename, svp_24, geo_ex, proj_ex)
if out.vpd_24 == 1:
PF.Save_as_tiff(vpd_24_filename, vpd_24, geo_ex, proj_ex)
if out.stress_vpd == 1:
PF.Save_as_tiff(stress_vpd_filename, stress_vpd, geo_ex, proj_ex)
if out.stress_temp == 1:
PF.Save_as_tiff(stress_temp_filename, stress_temp, geo_ex, proj_ex)
if out.r_canopy_0 == 1:
PF.Save_as_tiff(r_canopy_0_filename, r_canopy_0, geo_ex, proj_ex)
# **net radiation canopy******************************************************
t_air_k_24 = ETLook.meteo.air_temperature_kelvin_daily(t_air_24)
# select one of the below two
#l_net = ETLook.radiation.longwave_radiation_fao_etref(t_air_k_24, vp_24, trans_24)
l_net = ETLook.radiation.longwave_radiation_fao(t_air_k_24, vp_24, trans_24, vp_slope, vp_offset, lw_slope, lw_offset)
int_mm = ETLook.evapotranspiration.interception_mm(P_24, vc, lai, int_max)
lh_24 = ETLook.meteo.latent_heat_daily(t_air_24)
int_wm2 = ETLook.radiation.interception_wm2(int_mm, lh_24)
rn_24 = ETLook.radiation.net_radiation(r0, ra_24, l_net, int_wm2)
rn_24_canopy = ETLook.radiation.net_radiation_canopy(rn_24, sf_soil)
# Save as tiff files
t_air_k_24[np.isnan(QC)] = np.nan
l_net[np.isnan(QC)] = np.nan
int_mm[np.isnan(QC)] = np.nan
lh_24[np.isnan(QC)] = np.nan
int_wm2[np.isnan(QC)] = np.nan
rn_24[np.isnan(QC)] = np.nan
rn_24_canopy[np.isnan(QC)] = np.nan
if out.t_air_k_24 == 1:
PF.Save_as_tiff(t_air_k_24_filename, t_air_k_24, geo_ex, proj_ex)
if out.l_net == 1:
PF.Save_as_tiff(l_net_filename, l_net, geo_ex, proj_ex)
if out.int_mm == 1:
PF.Save_as_tiff(int_mm_filename, int_mm, geo_ex, proj_ex)
if out.lh_24 == 1:
PF.Save_as_tiff(lh_24_filename, lh_24, geo_ex, proj_ex)
if out.int_wm2 == 1:
PF.Save_as_tiff(int_wm2_filename, int_wm2, geo_ex, proj_ex)
if out.rn_24 == 1:
PF.Save_as_tiff(rn_24_filename, rn_24, geo_ex, proj_ex)
if out.rn_24_canopy == 1:
PF.Save_as_tiff(rn_24_canopy_filename, rn_24_canopy, geo_ex, proj_ex)
# **canopy resistance***********************************************************
t_air_k_i = ETLook.meteo.air_temperature_kelvin_inst(t_air_i)
vp_i = ETLook.meteo.vapour_pressure_from_specific_humidity_inst(qv_i, p_air_i)
ad_moist_i = ETLook.meteo.moist_air_density_inst(vp_i, t_air_k_i)
ad_dry_i = ETLook.meteo.dry_air_density_inst(p_air_i, vp_i, t_air_k_i)
ad_i = ETLook.meteo.air_density_inst(ad_dry_i, ad_moist_i)
u_b_i_bare = ETLook.soil_moisture.wind_speed_blending_height_bare(u_i, z0m_bare, z_obs, z_b)
lon = ETLook.solar_radiation.longitude_rad(lon_deg)
ha = ETLook.solar_radiation.hour_angle(sc, dtime, lon)
I0 = ETLook.clear_sky_radiation.solar_constant()
ied = ETLook.clear_sky_radiation.inverse_earth_sun_distance(day_angle)
h0 = ETLook.clear_sky_radiation.solar_elevation_angle(lat, decl, ha)
h0ref = ETLook.clear_sky_radiation.solar_elevation_angle_refracted(h0)
m = ETLook.clear_sky_radiation.relative_optical_airmass(p_air_i, p_air_0_i, h0ref)
rotm = ETLook.clear_sky_radiation.rayleigh_optical_thickness(m)
Tl2 = ETLook.clear_sky_radiation.linke_turbidity(wv_i, aod550_i, p_air_i, p_air_0_i)
G0 = ETLook.clear_sky_radiation.extraterrestrial_irradiance_normal(I0, ied)
B0c = ETLook.clear_sky_radiation.beam_irradiance_normal_clear(G0, Tl2, m, rotm, h0)
Bhc = ETLook.clear_sky_radiation.beam_irradiance_horizontal_clear(B0c, h0)
Dhc = ETLook.clear_sky_radiation.diffuse_irradiance_horizontal_clear(G0, Tl2, h0)
ra_hor_clear_i = ETLook.clear_sky_radiation.ra_clear_horizontal(Bhc, Dhc)
emiss_atm_i = ETLook.soil_moisture.atmospheric_emissivity_inst(vp_i, t_air_k_i)
rn_bare = ETLook.soil_moisture.net_radiation_bare(ra_hor_clear_i, emiss_atm_i, t_air_k_i, lst, r0_bare)
rn_full = ETLook.soil_moisture.net_radiation_full(ra_hor_clear_i, emiss_atm_i, t_air_k_i, lst, r0_full)
h_bare = ETLook.soil_moisture.sensible_heat_flux_bare(rn_bare, fraction_h_bare)
h_full = ETLook.soil_moisture.sensible_heat_flux_full(rn_full, fraction_h_full)
u_b_i_full = ETLook.soil_moisture.wind_speed_blending_height_full_inst(u_i, z0m_full, z_obs, z_b)
u_star_i_bare = ETLook.soil_moisture.friction_velocity_bare_inst(u_b_i_bare, z0m_bare, disp_bare, z_b)
u_star_i_full = ETLook.soil_moisture.friction_velocity_full_inst(u_b_i_full, z0m_full, disp_full, z_b)
L_bare = ETLook.soil_moisture.monin_obukhov_length_bare(h_bare, ad_i, u_star_i_bare, t_air_k_i)
L_full = ETLook.soil_moisture.monin_obukhov_length_full(h_full, ad_i, u_star_i_full, t_air_k_i)
u_i_soil = ETLook.soil_moisture.wind_speed_soil_inst(u_i, L_bare, z_obs)
ras = ETLook.soil_moisture.aerodynamical_resistance_soil(u_i_soil)
raa = ETLook.soil_moisture.aerodynamical_resistance_bare(u_i, L_bare, z0m_bare, disp_bare, z_obs)
rac = ETLook.soil_moisture.aerodynamical_resistance_full(u_i, L_full, z0m_full, disp_full, z_obs)
t_max_bare = ETLook.soil_moisture.maximum_temperature_bare(ra_hor_clear_i, emiss_atm_i, t_air_k_i, ad_i, raa, ras, r0_bare)
t_max_full = ETLook.soil_moisture.maximum_temperature_full(ra_hor_clear_i, emiss_atm_i, t_air_k_i, ad_i, rac, r0_full)
w_i = ETLook.soil_moisture.dew_point_temperature_inst(vp_i)
t_dew_i = ETLook.soil_moisture.dew_point_temperature_inst(vp_i)
t_wet_i = ETLook.soil_moisture.wet_bulb_temperature_inst(t_air_i, t_dew_i)
t_wet_k_i = ETLook.meteo.wet_bulb_temperature_kelvin_inst(t_wet_i)
lst_max = ETLook.soil_moisture.maximum_temperature(t_max_bare, t_max_full, vc)
se_root = ETLook.soil_moisture.soil_moisture_from_maximum_temperature(lst_max, lst, t_wet_k_i)
stress_moist = ETLook.stress.stress_moisture(se_root, tenacity)
r_canopy_0 = ETLook.resistance.atmospheric_canopy_resistance(lai_eff, stress_rad, stress_vpd, stress_temp, rs_min, rcan_max)
r_canopy = ETLook.resistance.canopy_resistance(r_canopy_0, stress_moist, rcan_max)
# Save as tiff files
t_air_k_i[np.isnan(QC)] = np.nan
vp_i[np.isnan(QC)] = np.nan
ad_moist_i[np.isnan(QC)] = np.nan
ad_dry_i[np.isnan(QC)] = np.nan
ad_i[np.isnan(QC)] = np.nan
u_b_i_bare[np.isnan(QC)] = np.nan
lon[np.isnan(QC)] = np.nan
ha[np.isnan(QC)] = np.nan
h0[np.isnan(QC)] = np.nan
h0ref[np.isnan(QC)] = np.nan
m[np.isnan(QC)] = np.nan
rotm[np.isnan(QC)] = np.nan
Tl2[np.isnan(QC)] = np.nan
B0c[np.isnan(QC)] = np.nan
Bhc[np.isnan(QC)] = np.nan
Dhc[np.isnan(QC)] = np.nan
ra_hor_clear_i[np.isnan(QC)] = np.nan
emiss_atm_i[np.isnan(QC)] = np.nan
rn_bare[np.isnan(QC)] = np.nan
rn_full[np.isnan(QC)] = np.nan
u_b_i_full[np.isnan(QC)] = np.nan
u_star_i_bare[np.isnan(QC)] = np.nan
u_star_i_full[np.isnan(QC)] = np.nan
u_i_soil[np.isnan(QC)] = np.nan
ras[np.isnan(QC)] = np.nan
raa[np.isnan(QC)] = np.nan
rac[np.isnan(QC)] = np.nan
t_max_bare[
|
np.isnan(QC)
|
numpy.isnan
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import sys
import networkx as nx
import re
import numpy as np
import itertools
_s = re.compile('\s+')
_p = re.compile('(\d+)\s+(\d+)')
def lsqp(atoms):
com = atoms.mean(axis=0)
#u, d, v = np.linalg.svd(atoms-com)
axes = np.zeros((len(atoms), 3))
for i in range(len(atoms)):
p1 = atoms[i]
if i == len(atoms)-1:
p2 = atoms[0]
else:
p2 = atoms[i+1]
a = np.cross(p1, p2)
axes += a
u, d, v = np.linalg.svd(axes)
i = 0
d = -np.dot(v[i], com)
n = -np.array((v[i,0], v[i,1], d))/v[i,2]
return v[i], com, n
def intriangle(triangle, axis, u, p):
# http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm
p1, p2, p3 = triangle
w0 = p - p1
a = -np.dot(axis, w0)
b = np.dot(axis, u)
if (abs(b) < 0.01): return False
r = a / b
if r < 0.0: return False
if r > 1.0: return False
I = p + u * r
u = p2 - p1
v = p3 - p1
uu = np.dot(u, u)
uv = np.dot(u, v)
vv = np.dot(v, v)
w = I - p1
wu = np.dot(w, u)
wv = np.dot(w, v)
D = uv * uv - uu * vv
s = (uv * wv - vv * wu)/D
if (s < 0 or s > 1): return False
t = (uv * wu - uu * wv)/D
if (t < 0 or (s+t) > 1): return False
return True
def build_topology(psffile):
g = nx.Graph()
flag = 0
for line in open(psffile).readlines():
if flag == 0 and line.strip().endswith('NATOM'):
natom = int(line.strip().split()[0])
g.natom = natom
flag = 1
continue
if flag == 0 and line.strip().endswith('bonds'):
flag = 2
continue
if flag == 1 and not line.strip(): flag = 0
if flag == 2 and not line.strip(): break
if flag == 1:
num, segid, resid, resname, name = _s.split(line)[1:6]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
g.add_node(int(num), {'segid': segid, 'resname': resname, 'name': name, 'resid': resid})
if flag == 2:
for pair in _p.findall(line):
num1, num2 = map(int, pair)
if g.has_node(num1) and g.has_node(num2): g.add_edge(num1, num2)
return g
def build_atomtable(psf, crdfile):
crds = {}
flag = 0
for line in open(crdfile).readlines():
if line.startswith('*'): continue
if flag == 0:
flag = 1
continue
if flag == 1 and not line.strip(): break
if flag == 1:
num, resid, resname, name, x, y, z, segid = _s.split(line.strip())[:8]
if resname.startswith('TIP3'): continue
if name.startswith('H'): continue
if psf.node[int(num)]['name'] != name: raise AtomMismatch("%d %s != %d %s" % (int(num), psf.node[int(num)]['name'], int(num), name))
crds[int(num)] = np.array((float(x), float(y), float(z)))
return crds
class AtomMismatch(Exception):
pass
def check_ring_penetration(psf, crd, pbc=[], xtl='rect', verbose=0):
# ring penetration test
# 1. find rings
# 2. build least square plane
# 3. project atoms ring constituent atoms onto the plane and build convex
# 4. find two bonded atoms that are at the opposite side of the plane
# 5. determine the point of intersection is enclosed in the ring
#
molecules = nx.connected_component_subgraphs(psf)
allatoms = np.array([crd[num] for num in psf.nodes()])
atoms_map = np.array([num for num in psf.nodes()])
natoms = len(allatoms)
if pbc:
atoms_map_reverse = {}
for i,num in enumerate(psf.nodes()):
atoms_map_reverse[num] = i
a = float(pbc[0])
b = float(pbc[1])
n = len(allatoms)
if xtl == 'rect':
allatoms = np.tile(allatoms, (9,1))
op = ((a,0),(a,b),(0,b),(-a,b),(-a,0),(-a,-b),(0,-b),(a,-b))
for i in range(8):
x,y = op[i]
allatoms[n*(i+1):n*(i+2),0] += x
allatoms[n*(i+1):n*(i+2),1] += y
atoms_map = np.tile(atoms_map, 9)
if xtl =='hexa':
allatoms = np.tile(allatoms, (7,1))
rot = lambda theta: np.matrix(((np.cos(np.radians(theta)), -np.sin(np.radians(theta))),
(np.sin(np.radians(theta)), np.cos(np.radians(theta)))))
op = (rot(15), rot(75), rot(135), rot(195), rot(255), rot(315))
d = np.array((a, 0))
for i in range(6):
xy = np.dot(d, op[i])
allatoms[n*(i+1):n*(i+2),:2] = allatoms[n*(i+1):n*(i+2),:2] + xy
atoms_map = np.tile(atoms_map, 7)
# print out image atoms
#fp = open('image.pdb', 'w')
#for i,atom in enumerate(allatoms):
# x, y, z = atom
# fp.write("HETATM%5d %-3s %3s %4d %8.3f%8.3f%8.3f 0.00 0.00 \n" % (i, 'C', 'DUM', i, x, y, z))
pen_pairs = []
pen_cycles = []
for m in molecules:
cycles = nx.cycle_basis(m)
if not cycles: continue
for cycle in cycles:
flag = False
atoms = np.array([crd[num] for num in cycle])
if len(set([psf.node[num]['resid'] for num in cycle])) > 1: continue
if verbose:
num = cycle[0]
print('found ring:', psf.node[num]['segid'], psf.node[num]['resid'], psf.node[num]['resname'])
# build least square fit plane
axis, com, n = lsqp(atoms)
# project atoms to the least square fit plane
for i,atom in enumerate(atoms):
w = np.dot(axis, atom-com)*axis + com
atoms[i] = com + (atom - w)
maxd = np.max(np.sqrt(np.sum(np.square(atoms - com), axis=1)))
d = np.sqrt(np.sum(np.square(allatoms-com), axis=1))
nums = np.squeeze(np.argwhere(d < 3))
# find two bonded atoms that are at the opposite side of the plane
for num in nums:
num1 = atoms_map[num]
for num2 in psf[num1]:
if num1 in cycle or num2 in cycle: continue
if num > natoms:
# image atoms
offset = int(num / natoms)
crd1 = allatoms[num]
crd2 = allatoms[atoms_map_reverse[num2] + offset * natoms]
else:
crd1 = crd[num1]
crd2 = crd[num2]
v1 = np.dot(crd1 - com, axis)
v2 = np.dot(crd2 - com, axis)
if v1 * v2 > 0: continue
# point of intersection of the least square fit plane
s = -np.dot(axis, crd1-com)/np.dot(axis, crd2-crd1)
p = crd1 + s*(crd2-crd1)
d = np.sqrt(np.sum(np.square(p-com)))
if d > maxd: continue
if verbose:
print('found potentially pentrarting bond:', psf.node[num1]['segid'], psf.node[num1]['resid'], psf.node[num1]['resname'], psf.node[num1]['name'], psf.node[num2]['name'])
d = 0
for i in range(0, len(atoms)):
p1 = atoms[i] - p
try: p2 = atoms[i+1] - p
except: p2 = atoms[0] - p
d += np.arccos(np.dot(p1, p2)/np.linalg.norm(p1)/np.linalg.norm(p2))
wn = d/2/np.pi
if wn > 0.9 and wn < 1.1:
# we have a case
pen_pairs.append((num1, num2))
pen_cycles.append(cycle)
flag = True
break
if flag: break
return pen_pairs, pen_cycles
def find_alpha_shape(atoms, alpha=-1, verbose=0):
# build alpha-shape of the protein
# requires <NAME>'s Hull program
# http://netlib.sandia.gov/voronoi/hull.html
import subprocess as sp
pid = sp.Popen([hull, '-A', '-oN'], stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, close_fds=True) # Delaunay triangulation
pid.communicate("\n".join(["%12.8f %12.8f %12.8f" % tuple(atom) for atom in atoms]))
flag = False
facets = []
facets_crds = []
vertices = set([])
h = nx.Graph()
for line in open('hout-alf').readlines():
if flag:
i, j, k = map(int, line.strip().split())
facets.append((i, j, k))
facets_crds.append(np.array((atoms[i], atoms[j], atoms[k])))
[vertices.add(x) for x in (i,j,k)]
h.add_edges_from([(i, j), (i, k), (j, k)])
if line.startswith("%") and 'hull' in line: flag = True
facets_crds =
|
np.array(facets_crds)
|
numpy.array
|
import unittest
import numpy as np
import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (CumsumOp, cumsum, CumprodOp, cumprod,
CpuContiguous, cpu_contiguous, BinCountOp,
bincount, DiffOp, diff, squeeze, compress,
RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal,
FillDiagonalOffset, fill_diagonal_offset,
to_one_hot, Unique)
from theano import tensor as T
from theano import config, tensor, function
from theano.tests.unittest_tools import attr
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_16 = bool(numpy_ver >= [1, 6])
def test_cpu_contiguous():
a = T.fmatrix('a')
i = T.iscalar('i')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
f = theano.function([a, i], cpu_contiguous(a.reshape((5,4))[::i]))
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, CpuContiguous) for node in topo])
assert f(a_val, 1).flags['C_CONTIGUOUS']
assert f(a_val, 2).flags['C_CONTIGUOUS']
assert f(a_val, 3).flags['C_CONTIGUOUS']
class TestCumsumOp(utt.InferShapeTester):
def setUp(self):
super(TestCumsumOp, self).setUp()
self.op_class = CumsumOp
self.op = CumsumOp()
def test_cumsumOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumsum, x, axis=3)
self.assertRaises(ValueError, cumsum, x, axis=-4)
f = theano.function([x], cumsum(x))
assert np.allclose(np.cumsum(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumsum(x, axis=axis))
assert np.allclose(np.cumsum(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumsum(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a], eps=4e-4)
class TestCumprodOp(utt.InferShapeTester):
def setUp(self):
super(TestCumprodOp, self).setUp()
self.op_class = CumprodOp
self.op = CumprodOp()
def test_CumprodOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumprod, x, axis=3)
self.assertRaises(ValueError, cumprod, x, axis=-4)
f = theano.function([x], cumprod(x))
assert np.allclose(np.cumprod(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumprod(x, axis=axis))
assert np.allclose(np.cumprod(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumprod(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestBinCountOp(utt.InferShapeTester):
def setUp(self):
super(TestBinCountOp, self).setUp()
self.op_class = BinCountOp
self.op = BinCountOp()
def test_bincountFn(self):
w = T.vector('w')
def ref(data, w=None, minlength=None):
size = data.max() + 1
if minlength:
size = max(size, minlength)
if w is not None:
out = np.zeros(size, dtype=w.dtype)
for i in range(data.shape[0]):
out[data[i]] += w[i]
else:
out = np.zeros(size, dtype=a.dtype)
for i in range(data.shape[0]):
out[data[i]] += 1
return out
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
x = T.vector('x', dtype=dtype)
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], bincount(x))
f2 = theano.function([x, w], bincount(x, weights=w))
assert (ref(a) == f1(a)).all()
assert np.allclose(ref(a, weights), f2(a, weights))
f3 = theano.function([x], bincount(x, minlength=55))
f4 = theano.function([x], bincount(x, minlength=5))
assert (ref(a, minlength=55) == f3(a)).all()
assert (ref(a, minlength=5) == f4(a)).all()
# skip the following test when using unsigned ints
if not dtype.startswith('u'):
a[0] = -1
f5 = theano.function([x], bincount(x, assert_nonneg=True))
self.assertRaises(AssertionError, f5, a)
def test_bincountOp(self):
w = T.vector('w')
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], BinCountOp()(x, weights=None))
f2 = theano.function([x, w], BinCountOp()(x, weights=w))
assert (np.bincount(a) == f1(a)).all()
assert np.allclose(np.bincount(a, weights=weights),
f2(a, weights))
if not numpy_16:
continue
f3 = theano.function([x], BinCountOp(minlength=23)(x, weights=None))
f4 = theano.function([x], BinCountOp(minlength=5)(x, weights=None))
assert (np.bincount(a, minlength=23) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all()
@attr('slow')
def test_infer_shape(self):
for dtype in tensor.discrete_dtypes:
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
self._compile_and_check(
[x],
[BinCountOp()(x,None)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
weights = np.random.random((25,)).astype(config.floatX)
self._compile_and_check(
[x],
[BinCountOp()(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
if not numpy_16:
continue
self._compile_and_check(
[x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check(
[x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
class TestDiffOp(utt.InferShapeTester):
nb = 10 # Number of time iterating for n
def setUp(self):
super(TestDiffOp, self).setUp()
self.op_class = DiffOp
self.op = DiffOp()
def test_diffOp(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
f = theano.function([x], diff(x))
assert np.allclose(np.diff(a), f(a))
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
g = theano.function([x], diff(x, n=k, axis=axis))
assert np.allclose(np.diff(a, n=k, axis=axis), g(a))
def test_infer_shape(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
self._compile_and_check([x],
[diff(x, n=k, axis=axis)],
[a],
self.op_class)
def test_grad(self):
x = T.vector('x')
a = np.random.random(50).astype(config.floatX)
theano.function([x], T.grad(T.sum(diff(x)), x))
utt.verify_grad(self.op, [a])
for k in range(TestDiffOp.nb):
theano.function([x], T.grad(T.sum(diff(x, n=k)), x))
utt.verify_grad(DiffOp(n=k), [a], eps=7e-3)
class SqueezeTester(utt.InferShapeTester):
shape_list = [(1, 3),
(1, 2, 3),
(1, 5, 1, 1, 6)]
broadcast_list = [[True, False],
[True, False, False],
[True, False, True, True, False]]
def setUp(self):
super(SqueezeTester, self).setUp()
self.op = squeeze
def test_op(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], self.op(variable))
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
def test_infer_shape(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle,
warn=False)
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
utt.verify_grad(self.op, [data])
def test_var_interface(self):
# same as test_op, but use a_theano_var.squeeze.
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], variable.squeeze())
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class CompressTester(utt.InferShapeTester):
axis_list = [None,
-1,
0,
0,
0,
1]
cond_list = [[1, 0, 1, 0, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 0],
[],
[0, 0, 0, 0],
[1, 1, 0, 1, 0]]
shape_list = [(2, 3),
(4, 3),
(4, 3),
(4, 3),
(4, 3),
(3, 5)]
def setUp(self):
super(CompressTester, self).setUp()
self.op = compress
def test_op(self):
for axis, cond, shape in zip(self.axis_list, self.cond_list,
self.shape_list):
cond_var = theano.tensor.ivector()
data = numpy.random.random(size=shape).astype(theano.config.floatX)
data_var = theano.tensor.matrix()
f = theano.function([cond_var, data_var],
self.op(cond_var, data_var, axis=axis))
expected = numpy.compress(cond, data, axis=axis)
tested = f(cond, data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class TestRepeatOp(utt.InferShapeTester):
def _possible_axis(self, ndim):
return [None] + list(range(ndim)) + [-i for i in range(ndim)]
def setUp(self):
super(TestRepeatOp, self).setUp()
self.op_class = RepeatOp
self.op = RepeatOp()
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
ptr_bitwidth = theano.gof.local_bitwidth()
if ptr_bitwidth == 64:
self.numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
self.numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
def test_repeatOp(self):
for ndim in range(3):
x = T.TensorType(config.floatX, [False] * ndim)()
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if (dtype == 'uint64' or
(dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1)):
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
#check when r is a list of single integer, e.g. [3].
r = np.random.random_integers(10, size=()).astype(dtype) + 2
f = theano.function([x],
repeat(x, [r], axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
# check when r is theano tensortype that broadcastable is (True,)
r_var = theano.tensor.TensorType(broadcastable=(True,),
dtype=dtype)()
r = np.random.random_integers(5, size=(1,)).astype(dtype)
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r[0], axis=axis),
f(a, r))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
@attr('slow')
def test_infer_shape(self):
for ndim in range(4):
x = T.TensorType(config.floatX, [False] * ndim)()
shp = (numpy.arange(ndim) + 1) * 5
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.random_integers(
5, size=a.shape[axis]).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
def test_grad(self):
for ndim in range(3):
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
utt.verify_grad(lambda x: RepeatOp(axis=axis)(x, 3), [a])
def test_broadcastable(self):
x = T.TensorType(config.floatX, [False, True, False])()
r = RepeatOp(axis=1)(x, 2)
self.assertEqual(r.broadcastable, (False, False, False))
r = RepeatOp(axis=1)(x, 1)
self.assertEqual(r.broadcastable, (False, True, False))
r = RepeatOp(axis=0)(x, 2)
self.assertEqual(r.broadcastable, (False, True, False))
class TestBartlett(utt.InferShapeTester):
def setUp(self):
super(TestBartlett, self).setUp()
self.op_class = Bartlett
self.op = bartlett
def test_perform(self):
x = tensor.lscalar()
f = function([x], self.op(x))
M = numpy.random.random_integers(3, 50, size=())
assert numpy.allclose(f(M), numpy.bartlett(M))
assert numpy.allclose(f(0), numpy.bartlett(0))
assert numpy.allclose(f(-1), numpy.bartlett(-1))
b = numpy.array([17], dtype='uint8')
assert numpy.allclose(f(b[0]), numpy.bartlett(b[0]))
def test_infer_shape(self):
x = tensor.lscalar()
self._compile_and_check([x], [self.op(x)],
[numpy.random.random_integers(3, 50, size=())],
self.op_class)
self._compile_and_check([x], [self.op(x)], [0], self.op_class)
self._compile_and_check([x], [self.op(x)], [1], self.op_class)
class TestFillDiagonal(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonal, self).setUp()
self.op_class = FillDiagonal
self.op = fill_diagonal
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)
# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
@attr('slow')
def test_gradient(self):
utt.verify_grad(fill_diagonal, [numpy.random.rand(5, 8),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
utt.verify_grad(fill_diagonal, [numpy.random.rand(8, 5),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
def test_infer_shape(self):
z = tensor.dtensor3()
x = tensor.dmatrix()
y = tensor.dscalar()
self._compile_and_check([x, y], [self.op(x, y)],
[numpy.random.rand(8, 5),
numpy.random.rand()],
self.op_class)
self._compile_and_check([z, y], [self.op(z, y)],
# must be square when nd>2
[numpy.random.rand(8, 8, 8),
numpy.random.rand()],
self.op_class,
warn=False)
class TestFillDiagonalOffset(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonalOffset, self).setUp()
self.op_class = FillDiagonalOffset
self.op = fill_diagonal_offset
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
z = tensor.iscalar()
f = function([x, y, z], fill_diagonal_offset(x, y, z))
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val, test_offset)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min( min(a.shape),
a.shape[1]-test_offset )
else:
assert (out == val).sum() == min( min(a.shape),
a.shape[0]+test_offset )
def test_gradient(self):
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
# input 'offset' will not be tested
def fill_diagonal_with_fix_offset( a, val):
return fill_diagonal_offset( a, val, test_offset)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 8), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(8, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
def test_infer_shape(self):
x = tensor.dmatrix()
y = tensor.dscalar()
z = tensor.iscalar()
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(8, 5),
numpy.random.rand(),
test_offset],
self.op_class )
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(5, 8),
numpy.random.rand(),
test_offset],
self.op_class )
def test_to_one_hot():
v = theano.tensor.ivector()
o = to_one_hot(v, 10)
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == theano.config.floatX
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
v = theano.tensor.ivector()
o = to_one_hot(v, 10, dtype="int32")
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == "int32"
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
class test_Unique(utt.InferShapeTester):
def setUp(self):
super(test_Unique, self).setUp()
self.op_class = Unique
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]) :
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
"""
Basic test for a vector.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.vector()
inp = np.asarray([2,1,3,2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]) :
list_outs_expected.extend([
np.unique(inp, False, False, True),
|
np.unique(inp, True, False, True)
|
numpy.unique
|
# Copyright (c) 2003-2015 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy
import treecorr
import os
import fitsio
from test_helper import get_script_name
def test_binnedcorr3():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
numpy.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
numpy.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
numpy.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
numpy.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
numpy.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
numpy.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
numpy.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
numpy.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
numpy.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
numpy.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
numpy.testing.assert_equal(nnn.v1d.shape, (nnn.nvbins,) )
numpy.testing.assert_almost_equal(nnn.v1d[0], nnn.min_v + 0.5*nnn.vbin_size)
numpy.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
numpy.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, nnn.nvbins) )
numpy.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
numpy.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == numpy.ceil(1./nnn.bin_size)
assert nnn.min_v == -1.
assert nnn.max_v == 1.
assert nnn.nvbins == 2.*numpy.ceil(1./nnn.bin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=-0.2, max_v=0.2, nvbins=4)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == -0.2
assert nnn.max_v == 0.2
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.ubin_size == 0.05
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert nnn.vbin_size == 0.05
assert nnn.max_v == 0.2
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=-0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert nnn.ubin_size == 0.05
assert nnn.nubins == 4
assert nnn.min_v == -0.2
assert nnn.vbin_size == 0.05
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep >= 20. # Expanded a bit.
assert nnn.max_sep < 20. * numpy.exp(nnn.bin_size)
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep >= 20.
assert nnn.max_sep < 20. * numpy.exp(nnn.bin_size)
assert nnn.bin_size == 0.1
assert nnn.min_u <= 0.2
assert nnn.min_u >= 0.2 - nnn.ubin_size
assert nnn.max_u == 0.9
assert nnn.ubin_size == 0.03
assert nnn.min_v <= -0.2
assert nnn.min_v >= -0.2 - nnn.vbin_size
assert nnn.max_v >= 0.2
assert nnn.min_v <= 0.2 + nnn.vbin_size
assert nnn.vbin_size == 0.07
check_arrays(nnn)
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5.)
numpy.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
numpy.testing.assert_almost_equal(nnn.min_sep, 5.)
numpy.testing.assert_almost_equal(nnn.max_sep, 20.)
numpy.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
numpy.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
numpy.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
numpy.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
numpy.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=1.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=0.2,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.02)
numpy.testing.assert_almost_equal(nnn.bu, 0.006)
numpy.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=0.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.0)
numpy.testing.assert_almost_equal(nnn.bu, 0.0)
numpy.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1, bin_slop=2.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.2)
numpy.testing.assert_almost_equal(nnn.bu, 0.06)
numpy.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.4, bin_slop=1.0,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.4)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.4,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=-0.2, max_v=0.2, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert nnn.ubin_size == 0.03
assert nnn.vbin_size == 0.07
numpy.testing.assert_almost_equal(nnn.b, 0.1)
numpy.testing.assert_almost_equal(nnn.bu, 0.03)
numpy.testing.assert_almost_equal(nnn.bv, 0.07)
numpy.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.05,
min_u=0.2, max_u=0.9, ubin_size=0.3,
min_v=-0.2, max_v=0.2, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert nnn.ubin_size == 0.3
assert nnn.vbin_size == 0.17
numpy.testing.assert_almost_equal(nnn.b, 0.05)
numpy.testing.assert_almost_equal(nnn.bu, 0.1)
numpy.testing.assert_almost_equal(nnn.bv, 0.1)
numpy.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
def is_ccw(x1,y1, x2,y2, x3,y3):
# Calculate the cross product of 1->2 with 1->3
x2 -= x1
x3 -= x1
y2 -= y1
y3 -= y1
return x2*y3-x3*y2 > 0.
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 100
s = 10.
numpy.random.seed(8675309)
x = numpy.random.normal(0,s, (ngal,) )
y = numpy.random.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = -0.83
max_v = 0.59
nvbins = 20
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0., verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = numpy.log(min_sep)
log_max_sep = numpy.log(max_sep)
true_ntri = numpy.zeros( (nbins, nubins, nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = numpy.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = numpy.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = numpy.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
ccw = True
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk;
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik;
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik;
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk;
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij;
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij;
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if not ccw:
v = -v
kr = int(numpy.floor( (numpy.log(r)-log_min_sep) / bin_size ))
ku = int(
|
numpy.floor( (u-min_u) / ubin_size )
|
numpy.floor
|
# coding: utf8
import numpy as np
import utils_mpc
import time
from QP_WBC import wbc_controller
import MPC_Wrapper
import pybullet as pyb
from Planner import PyPlanner
import pinocchio as pin
from solopython.utils.viewerClient import viewerClient, NonBlockingViewerFromRobot
class Result:
"""Object to store the result of the control loop
It contains what is sent to the robot (gains, desired positions and velocities,
feedforward torques)"""
def __init__(self):
self.P = 0.0
self.D = 0.0
self.q_des =
|
np.zeros(12)
|
numpy.zeros
|
# Copyright 2021 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test layers from qpooling.py."""
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_raises
from numpy.testing import assert_equal
import pytest
import logging
import tempfile
import os
import tensorflow.compat.v2 as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.backend import clear_session
from qkeras import QAveragePooling2D
from qkeras import QGlobalAveragePooling2D
from qkeras import quantized_bits
from qkeras import binary
from qkeras import ternary
from qkeras.utils import model_save_quantized_weights
from qkeras.utils import quantized_model_from_json
from qkeras.utils import load_qmodel
from qkeras.utils import model_quantize
from qkeras import print_qstats
from qkeras.qtools import qgraph
from qkeras.qtools import generate_layer_data_type_map
from qkeras.qtools import interface
@pytest.mark.parametrize(
('pooling, input_size, pool_size, strides, padding, data_format,'
'average_quantizer, activation_quantizer, y'), [
('QAveragePooling2D', (4, 4, 3), (2, 2), (2, 2), 'valid',
'channels_last', quantized_bits(4, 0, 1), quantized_bits(4, 0, 1),
np.array([[[[0.375, 0.625, 0.375], [0.25, 0.75, 0.5]],
[[0.375, 0.25, 0.625], [0.625, 0.5, 0.375]]],
[[[0.375, 0.375, 0.5], [0.375, 0.5, 0.625]],
[[0.75, 0.625, 0.5], [0.5, 0.5, 0.75]]]]).astype(
np.float16)),
('QAveragePooling2D', (4, 4, 3), (3, 3), (3, 3), 'valid',
'channels_last', quantized_bits(4, 0, 1), quantized_bits(4, 0, 1),
np.array([[[[0.375, 0.625, 0.625]]], [[[0.625, 0.5, 0.625]]]]).astype(
np.float16)),
('QGlobalAveragePooling2D', (4, 4, 3), (2, 2), (2, 2), 'valid',
'channels_last', quantized_bits(10, 0, 1), quantized_bits(4, 0, 1),
np.array([[0.5, 0.5, 0.375], [0.5, 0.5, 0.625]]).astype(np.float16)),
('QAveragePooling2D', (4, 4, 3), (2, 2), (3, 3), 'valid',
'channels_last', quantized_bits(4, 0, 1), quantized_bits(4, 0, 1),
np.array([[[[0.375, 0.625, 0.375]]], [[[0.375, 0.375, 0.5]]]]).astype(
np.float16)),
('QAveragePooling2D', (4, 4, 3), (2, 2), (3, 3), 'same',
'channels_last', quantized_bits(4, 0, 1), quantized_bits(4, 0, 1),
np.array([[[[0.375, 0.625, 0.375], [0.375, 0.75, 0.25]],
[[0.75, 0.25, 0.375], [0.75, 0.75, 0.25]]],
[[[0.375, 0.375, 0.5], [0.25, 0.625, 0.5]],
[[0.625, 0.625, 0.5], [0.625, 0.625, 0.875]]]]).astype(
np.float16)),
('QAveragePooling2D', (4, 4, 3), (2, 2),
(2, 2), 'valid', 'channels_first', quantized_bits(
4, 0, 1), quantized_bits(4, 0, 1), None),
])
def test_q_average_pooling(pooling, input_size, pool_size, strides, padding,
data_format, average_quantizer,
activation_quantizer, y):
"""q_average_pooling test utility."""
np.random.seed(33)
x = Input(input_size)
xin = x
if pooling == 'QAveragePooling2D':
x = QAveragePooling2D(
pool_size=pool_size,
strides=strides,
padding=padding,
data_format=data_format,
average_quantizer=average_quantizer,
activation=activation_quantizer,
name='qpooling')(x)
else:
x = QGlobalAveragePooling2D(
data_format=data_format,
average_quantizer=average_quantizer,
activation=activation_quantizer,
name='qpooling')(
x)
model = Model(inputs=xin, outputs=x)
# Prints qstats to make sure it works with Conv1D layer
print_qstats(model)
size = (2,) + input_size
inputs = np.random.rand(size[0], size[1], size[2], size[3])
if data_format == 'channels_first':
assert_raises(tf.errors.InvalidArgumentError, model.predict, inputs)
else:
p = model.predict(inputs).astype(np.float16)
assert_allclose(p, y, rtol=1e-4)
# Reloads the model to ensure saving/loading works
json_string = model.to_json()
clear_session()
reload_model = quantized_model_from_json(json_string)
p = reload_model.predict(inputs).astype(np.float16)
assert_allclose(p, y, rtol=1e-4)
# Saves the model as an h5 file using Keras's model.save()
fd, fname = tempfile.mkstemp(".h5")
model.save(fname)
del model # Delete the existing model
# Returns a compiled model identical to the previous one
loaded_model = load_qmodel(fname)
# Cleans the created h5 file after loading the model
os.close(fd)
os.remove(fname)
# Applys quantizer to weights
model_save_quantized_weights(loaded_model)
p = loaded_model.predict(inputs).astype(np.float16)
assert_allclose(p, y, rtol=1e-4)
def test_qpooling_in_model_quantize():
input_size = (16, 16, 3)
pool_size = (2, 2)
x = Input(input_size)
xin = x
x = AveragePooling2D(pool_size=pool_size, name="pooling")(x)
x = GlobalAveragePooling2D(name="global_pooling")(x)
model = Model(inputs=xin, outputs=x)
quantize_config = {
"QAveragePooling2D": {
"average_quantizer": "binary",
"activation_quantizer": "binary"
},
"QGlobalAveragePooling2D": {
"average_quantizer": "quantized_bits(4, 0, 1)",
"activation_quantizer": "ternary"
}
}
qmodel = model_quantize(model, quantize_config, 4)
print_qstats(qmodel)
assert_equal(str(qmodel.layers[1].average_quantizer_internal), "binary()")
assert_equal(str(qmodel.layers[1].activation), "binary()")
assert_equal(
str(qmodel.layers[2].average_quantizer_internal), "quantized_bits(4,0,1)")
assert_equal(str(qmodel.layers[2].activation), "ternary()")
def test_qpooling_in_qtools():
input_size = (16, 16, 3)
pool_size = (2, 2)
input_quantizers = [quantized_bits(8, 0, 1)]
is_inference = False
x = Input(input_size)
xin = x
x = QAveragePooling2D(
pool_size=pool_size,
average_quantizer=binary(),
activation=quantized_bits(4, 0, 1),
name="pooling")(
x)
x = QGlobalAveragePooling2D(
average_quantizer=quantized_bits(4, 0, 1),
activation=ternary(),
name="global_pooling")(
x)
model = Model(inputs=xin, outputs=x)
(graph, source_quantizer_list) = qgraph.CreateGraph(
model, input_quantizers)
qgraph.GraphPropagateActivationsToEdges(graph)
layer_map = generate_layer_data_type_map.generate_layer_data_type_map(
graph, source_quantizer_list, is_inference)
dtype_dict = interface.map_to_json(layer_map)
# Checks the QAveragePpooling layer datatype
multiplier = dtype_dict["pooling"]["multiplier"]
accumulator = dtype_dict["pooling"]["accumulator"]
output = dtype_dict["pooling"]["output_quantizer"]
assert_equal(multiplier["quantizer_type"], "quantized_bits")
|
assert_equal(multiplier["bits"], 10)
|
numpy.testing.assert_equal
|
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron,LogisticRegression
from sklearn.metrics import accuracy_score
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier,test_idx=None, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],y=X[y == cl, 1],alpha=0.8, c=colors[idx],
marker=markers[idx], label=cl, edgecolor='black')
# highlight test samples
if test_idx:
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='', edgecolor='black', alpha=1.0,
linewidth=1, marker='o', s=100, label='test set')
def load_iris_data():
iris = datasets.load_iris()
X = iris.data[:, [2, 3]]
y = iris.target
print('Class labels:', np.unique(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 1, stratify = y)
print('Labels counts in y_train:', np.bincount(y_train))
print('Labels counts in y_test:', np.bincount(y_test))
sc = StandardScaler()
sc.fit(X_train) #Using the fit method, StandardScaler estimated the parameters
# μ (sample mean) and σ (standard deviation) for each feature dimension from the training data.
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
return X_train_std,X_test_std,y_train, y_test
def test_ppn(X_train_std,X_test_std,y_train, y_test):
ppn = Perceptron(max_iter=40, eta0=0.1, random_state=1)
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
print('Misclassified samples: %d' % (y_test != y_pred).sum())
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std,y = y_combined,classifier = ppn,test_idx = range(105, 150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
class Logistic_regression():
def __init__(self,eta=0.0001,n_iter=1000):
self.max_iter=n_iter
self.eta=eta
self.w_initialized = False
self.shuffle = True
self.cost_ = []
def fit(self,x,y_gt):
x_1 = np.hstack((x, np.ones((x.shape[0], 1))))
self.w_=np.random.random(x_1.shape[-1])
for iter in range(self.max_iter):
hidden=self.net_input(x_1)
y_pred=self.activation(hidden)
dw_=self.eta*
|
np.dot(x_1.T ,y_gt-y_pred)
|
numpy.dot
|
from enum import Enum
from queue import PriorityQueue
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy.linalg as LA
import numpy as np
import random
def r_ints(p1, p2):
#return the points between the given two points
I = [i - j for i,j in zip(p2, p1)]
d = np.sqrt(np.sum(np.square(I)))
I = [ i/d for i in I] #unit vector
p_set = [p1]
dis = np.sqrt(np.sum(np.square([i - j for i,j in zip(p1, p2)])))
s = 1
while dis >= (1.414 + 0.1):
p_m = [ round(i + j*s) for i,j in zip(p1, I)]
dis = np.sqrt(np.sum(np.square([i - j for i,j in zip(p2, p_m)])))
s = s + 1
if p_m not in p_set:
p_set.append(p_m)
if p2 not in p_set:
p_set.append(p2)
return p_set
class Motion_Planer:
def __init__(self, data):
"""
****data descripe the buildings or obstacles****
data: [z, a_x,a_y, b_x,b_y, c_x,c_y, d_x,d_y, h]
z is botttom of the building in z-anix
a,b,c,d are vertices of rectangle, a = (a_x, a_y)
h is the height
"""
self.voxmap, self.offset = create_voxmap(data)
#print("map_offset:", self.offset)
def find_paths(self, voxmap_start, voxmap_goal, simplify = 1, flag_offset = 1, flag_virtual = 0):
if flag_offset == 1:
voxmap_start = [round(i - j) for i,j in zip(voxmap_start, self.offset)]
voxmap_goal = [round(i - j) for i,j in zip(voxmap_goal, self.offset)]
paths, _ = a_star_3D(self.voxmap, heuristic, tuple(voxmap_start), tuple(voxmap_goal))
if simplify == 1:
num_key_points = [0]
num = len(paths)
i = 0
point_s = paths[0]
point_e = paths[1]
while i <= (num-1):
points_m = r_ints(point_s, point_e)
obs_r = []
for point in points_m:
obs_r.append(self.voxmap[point[0], point[1], point[2]])
if True in obs_r:
num_key_points.append(i)
point_s = paths[i]
point_e = paths[i+1]
continue
i = i + 1
try:
point_e = paths[i]
except:
pass
num_key_points.append(num - 1) #add the last point
paths_key = [ paths[i] for i in num_key_points]
paths_key_r = [ [i[0]+self.offset[0], i[1]+self.offset[1], i[2]+self.offset[2]] for i in paths_key[1:]]
paths_r = [ [i[0]+self.offset[0], i[1]+self.offset[1], i[2]+self.offset[2]] for i in paths[1:]]
#print("paths: ", paths)
#print("paths_r: ", paths_r)
if flag_virtual == 1:
# #virtualize
# start and end points
traj_s_e = np.zeros(self.voxmap.shape, dtype=np.bool)
traj_s_e[voxmap_start[0]][voxmap_start[1]][voxmap_start[2]] = True
traj_s_e[voxmap_goal[0]][voxmap_goal[1]][voxmap_goal[2]] = True
traj = np.zeros(self.voxmap.shape, dtype=np.bool)
for path in paths[1:-1]:
traj[path] = True
traj_key = np.zeros(self.voxmap.shape, dtype=np.bool)
for key in paths_key:
traj_key[key] = True
#combine obstacle, sratr-end points, and waypoints
World = self.voxmap |traj_s_e |traj
# set the colors of each object
colors = np.empty(self.voxmap.shape, dtype = object)
colors[self.voxmap] = 'grey'
colors[traj_s_e] = "black"
colors[traj] = "red"
colors[traj_key] = "blue"
print("Prepare to show the Virtual Worlf of motion planning")
#plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(World, facecolors=colors, edgecolor='k')
plt.show()
if simplify == 1:
return paths_key_r
else:
return paths_r
def rand_points(self):
while True:
n_goal = random.randint(0, self.voxmap.shape[0] - 1)
e_goal = random.randint(0, self.voxmap.shape[1] - 1)
alt_goal = random.randint(0, self.voxmap.shape[2] - 1)
if self.voxmap[n_goal, e_goal, alt_goal] == 0:
break
# Voxmap goal
voxmap_goal = [n_goal, e_goal, alt_goal]
while True:
n_start = random.randint(0, self.voxmap.shape[0] - 1)
e_start = random.randint(0, self.voxmap.shape[1] - 1)
alt_start = random.randint(0, self.voxmap.shape[2] - 1)
if self.voxmap[n_start, e_start, alt_start] == 0:
break
# Voxmap start
voxmap_start = [n_start, e_start, alt_start]
voxmap_start = [round(i + j) for i,j in zip(voxmap_start, self.offset)]
voxmap_goal = [round(i + j) for i,j in zip(voxmap_goal, self.offset)]
return voxmap_goal, voxmap_start
def heuristic(position, goal_position):
return LA.norm(np.array(position) - np.array(goal_position))
def create_voxmap(data, map_margin = 5, safety_distance = 0, voxel_size = 1):
#data: (z, a, b, c, d, h), a,b,c,d are vertices of rectangle. h is the height.
"""
Returns a grid representation of a 3D configuration space
based on given obstacle data and safety distance
The 'target_altitude': highest point + target_altitude is the height of the map.
The 'voxel_size' argument sets the resolution of the voxel map.
"""
# minimum and maximum coordinates
data = np.array(data)
# temp = data[:, 1][0]# + data[:, 2][0] + data[:, 3][0] + data[:, 4][0]
x_all = np.hstack((data[:, 1], data[:, 3], data[:, 5], data[:, 7]))
y_all = np.hstack((data[:, 2], data[:, 4], data[:, 6], data[:, 8]))
#start point
s_p_x = np.floor(np.min(x_all))
s_p_y = np.floor(np.min(y_all))
x_offset = s_p_x - map_margin
y_offset = s_p_y - map_margin
#end point
e_p_x = np.ceil(np.max(x_all))
e_p_y = np.ceil(np.max(y_all))
# z-axis
z_offset = np.min(data[:, 0])
x_size = int(e_p_x - s_p_x + map_margin*2)
y_size = int(e_p_y - s_p_y + map_margin*2)
z_size = int(np.max(data[:, 0]- z_offset + data[:, 9]) + map_margin)
voxmap = np.zeros((x_size, y_size, z_size), dtype=np.bool)
for i in range(data.shape[0]):
z, a_x,a_y, b_x,b_y, c_x,c_y, d_x,d_y, h = data[i, :]
a = [a_x,a_y]; b = [b_x,b_y]; c = [c_x,c_y]; d = [d_x,d_y]
x_index, y_index = map_color_seg(a, b, c, d)
x_index = x_index - x_offset
y_index = y_index - y_offset
z = int(z - z_offset)
voxmap[x_index.astype(int), y_index.astype(int), z:(z+h)] = True
offset = [x_offset, y_offset, z_offset]
return voxmap, offset
def map_color_seg(a,b,c,d):
def Sort4Points(points_4):
point_c = [ (a+b+c+d)/4 for a,b,c,d in zip(points_4[0], points_4[1], points_4[2], points_4[3])]
dic_a_p = {}
for point in points_4:
angle = np.arctan2(point[1]-point_c[1], point[0]-point_c[0])
dic_a_p[angle] = point
return [dic_a_p[k] for k in sorted(dic_a_p.keys())]
def linear_k_b(point1, point2):
if point1[0] == point2[0]:
k = 0
else:
k = (point1[1] - point2[1])/(point1[0] - point2[0])
b = point1[1] - k*point1[0]
return k, b
# the vertices should be in order
a,b,c,d = Sort4Points([a,b,c,d])
ab_linear = linear_k_b(a, b)
bc_linear = linear_k_b(b, c)
cd_linear = linear_k_b(c, d)
da_linear = linear_k_b(d, a)
#start point
s_p_x = np.floor(np.min([a[0], b[0], c[0], d[0]]))
s_p_y = np.floor(np.min([a[1], b[1], c[1], d[1]]))
#end point
e_p_x = np.ceil(np.max([a[0], b[0], c[0], d[0]]))
e_p_y = np.ceil(np.max([a[1], b[1], c[1], d[1]]))
offset = [s_p_x, s_p_y]
size = [int(e_p_x - s_p_x), int(e_p_y - s_p_y)]
ab_map = np.zeros((size[0], size[1]))
bc_map = np.zeros((size[0], size[1]))
cd_map = np.zeros((size[0], size[1]))
da_map = np.zeros((size[0], size[1]))
for x in range(size[0]):
for y in range(size[1]):
#two color for ab_map
if (x+offset[0])*ab_linear[0] + ab_linear[1] - (y+offset[1]) >=0:
ab_map[x][y] = 7
else:
ab_map[x][y] = 3
#two color for cd_map
if (x+offset[0])*cd_linear[0] + cd_linear[1] - (y+offset[1]) <=0:
cd_map[x][y] = -7
else:
cd_map[x][y] = -3
#two color for bc_map
if (x+offset[0])*bc_linear[0] + bc_linear[1] - (y+offset[1]) >=0:
bc_map[x][y] = 19
else:
bc_map[x][y] = 37
#two color for da_map
if (x+offset[0])*da_linear[0] + da_linear[1] - (y+offset[1]) <=0:
da_map[x][y] = -19
else:
da_map[x][y] = -37
map_all = ab_map + bc_map + cd_map + da_map
map_all[np.nonzero(map_all)]=1
[x_index, y_index] = np.where(map_all == 0)
x_index = x_index + offset[0]
y_index = y_index + offset[1]
return x_index.astype(int), y_index.astype(int)
### 3D A*
class Action3D(Enum):
"""
An action is represented by a 4 element tuple.
The first 3 values are the delta of the action relative
to the current voxel position. The final value
is the cost of performing the action.
"""
WEST = (0, -1, 0, 1)
EAST = (0, 1, 0, 1)
NORTH = (-1, 0, 0, 1)
SOUTH = (1, 0, 0, 1)
# Diagonal motions
N_WEST = (-1, -1, 0, np.sqrt(2))
N_EAST = (-1, 1, 0, np.sqrt(2))
S_WEST = (1, -1, 0, np.sqrt(2))
S_EAST = (1, 1, 0, np.sqrt(2))
# Up & down motions
UP = (0, 0, 1, 1)
DOWN = (0, 0, -1, 1)
@property
def cost(self):
return self.value[3]
@property
def delta(self):
return (self.value[0], self.value[1], self.value[2])
def valid_actions_3D(voxel, current_node):
"""
Returns a list of valid actions given a voxel and current node.
"""
valid_actions = list(Action3D)
n, m, max_alt = voxel.shape[0] - 1, voxel.shape[1] - 1, voxel.shape[2] - 1
x, y, z = current_node
# check if the node is off the voxel or
# it's an obstacle
if z - 1 < 0 or voxel[x, y, z - 1] == 1:
valid_actions.remove(Action3D.DOWN)
if z + 1 > max_alt or voxel[x, y, z + 1] == 1:
valid_actions.remove(Action3D.UP)
if x - 1 < 0 or voxel[x - 1, y, z] == 1:
valid_actions.remove(Action3D.NORTH)
valid_actions.remove(Action3D.N_WEST)
valid_actions.remove(Action3D.N_EAST)
if x + 1 > n or voxel[x + 1, y, z] == 1:
valid_actions.remove(Action3D.SOUTH)
valid_actions.remove(Action3D.S_WEST)
valid_actions.remove(Action3D.S_EAST)
if y - 1 < 0 or voxel[x, y - 1, z] == 1:
valid_actions.remove(Action3D.WEST)
if Action3D.N_WEST in valid_actions:
valid_actions.remove(Action3D.N_WEST)
if Action3D.S_WEST in valid_actions:
valid_actions.remove(Action3D.S_WEST)
if y + 1 > m or voxel[x, y + 1, z] == 1:
valid_actions.remove(Action3D.EAST)
if Action3D.N_EAST in valid_actions:
valid_actions.remove(Action3D.N_EAST)
if Action3D.S_EAST in valid_actions:
valid_actions.remove(Action3D.S_EAST)
return valid_actions
def a_star_3D(voxel, h, start, goal):
path = []
path_cost = 0
queue = PriorityQueue()
queue.put((0, start))
visited = set(start)
branch = {}
found = False
while not queue.empty():
item = queue.get()
current_node = item[1]
if
|
np.all(current_node == start)
|
numpy.all
|
from __future__ import division, absolute_import, print_function
import platform
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import run_module_suite, assert_, assert_equal, dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(object):
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(
|
np.ones_like(b)
|
numpy.ones_like
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 8 Cycles
elif len(self.df) == 8:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
ax8 = fig.add_subplot(338)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 8
ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax8.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax8.legend(loc='best', fontsize=10, frameon=False)
ax8.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[7]) > self.KK_ymax[7]:
ax8.set_ylim(self.KK_ymin[7]*100*1.5, np.abs(self.KK_ymin[7])*100*1.5)
if legend == 'on':
ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax8.annotate('Lin-KK ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[7]) < self.KK_ymax[7]:
ax8.set_ylim(np.negative(self.KK_ymax[7])*100*1.5, np.abs(self.KK_ymax[7])*100*1.5)
if legend == 'on':
ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymax[7])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax8.annotate('Lin-KK, ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), self.KK_ymax[7]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 9 Cycles
elif len(self.df) == 9:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
ax8 = fig.add_subplot(338)
ax9 = fig.add_subplot(339)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
ax7.set_xlabel("log(f) [Hz]")
if legend == 'on':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 8
ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax8.set_xlabel("log(f) [Hz]")
if legend == 'on':
ax8.legend(loc='best', fontsize=10, frameon=False)
ax8.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 9
ax9.plot(np.log10(self.df[8].f), self.KK_rr_re[8]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax9.plot(np.log10(self.df[8].f), self.KK_rr_im[8]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax9.set_xlabel("log(f) [Hz]")
if legend == 'on':
ax9.legend(loc='best', fontsize=10, frameon=False)
ax9.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[7]) > self.KK_ymax[7]:
ax8.set_ylim(self.KK_ymin[7]*100*1.5, np.abs(self.KK_ymin[7])*100*1.5)
if legend == 'on':
ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax8.annotate('Lin-KK ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[7]) < self.KK_ymax[7]:
ax8.set_ylim(np.negative(self.KK_ymax[7])*100*1.5, np.abs(self.KK_ymax[7])*100*1.5)
if legend == 'on':
ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymax[7])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax8.annotate('Lin-KK, ('+str(np.round(np.average(self.df[7].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[7].f)), self.KK_ymax[7]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[8]) > self.KK_ymax[8]:
ax9.set_ylim(self.KK_ymin[8]*100*1.5, np.abs(self.KK_ymin[8])*100*1.5)
if legend == 'on':
ax9.annotate('Lin-KK, #9', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymin[8])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax9.annotate('Lin-KK ('+str(np.round(np.average(self.df[8].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymin[8])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[8]) < self.KK_ymax[8]:
ax9.set_ylim(np.negative(self.KK_ymax[8])*100*1.5, np.abs(self.KK_ymax[8])*100*1.5)
if legend == 'on':
ax9.annotate('Lin-KK, #9', xy=[np.min(np.log10(self.df[8].f)), np.abs(self.KK_ymax[8])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax9.annotate('Lin-KK, ('+str(np.round(np.average(self.df[8].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[8].f)), self.KK_ymax[8]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
else:
print('Too many spectras, cannot plot all. Maximum spectras allowed = 9')
def EIS_fit(self, params, circuit, weight_func='modulus', nan_policy='raise'):
'''
EIS_fit() fits experimental data to an equivalent circuit model using complex non-linear least-squares (CNLS) fitting procedure and allows for batch fitting.
<NAME> (<EMAIL> / <EMAIL>)
Inputs
------------
- circuit:
Choose an equivalent circuits and defined circuit as a string. The following circuits are avaliable.
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-Q
- R-RQ-Q
- R-(Q(RW))
- C-RC-C
- Q-RQ-Q
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
The weight function to which the CNLS fitting is performed
- modulus (default)
- unity
- proportional
- nan_policy
How to handle Nan or missing values in dataset
- ‘raise’ = raise a value error (default)
- ‘propagate’ = do nothing
- ‘omit’ = drops missing data
Returns
------------
Returns the fitted impedance spectra(s) but also the fitted parameters that were used in the initial guesses. To call these use e.g. self.fit_Rs
'''
self.Fit = []
self.circuit_fit = []
self.fit_E = []
for i in range(len(self.df)):
self.Fit.append(minimize(leastsq_errorfunc, params, method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, circuit, weight_func), nan_policy=nan_policy, maxfev=9999990))
print(report_fit(self.Fit[i]))
self.fit_E.append(np.average(self.df[i].E_avg))
if circuit == 'C':
self.fit_C = []
for i in range(len(self.df)):
self.circuit_fit.append(elem_C(w=self.df[i].w, C=self.Fit[i].params.get('C').value))
self.fit_C.append(self.Fit[i].params.get('C').value)
elif circuit == 'Q':
self.fit_Q = []
self.fit_n = []
for i in range(len(self.df)):
self.circuit_fit.append(elem_Q(w=self.df[i].w, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value))
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
elif circuit == 'R-C':
self.fit_Rs = []
self.fit_C = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsC(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, C=self.Fit[i].params.get('C').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_C.append(self.Fit[i].params.get('C').value)
elif circuit == 'R-Q':
self.fit_Rs = []
self.fit_Q = []
self.fit_n = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
elif circuit == 'RC':
self.fit_R = []
self.fit_C = []
self.fit_fs = []
for i in range(len(self.df)):
if "'C'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RC(w=self.df[i].w, C=self.Fit[i].params.get('C').value, R=self.Fit[i].params.get('R').value, fs='none'))
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_C.append(self.Fit[i].params.get('C').value)
elif "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RC(w=self.df[i].w, C='none', R=self.Fit[i].params.get('R').value, fs=self.Fit[i].params.get('fs').value))
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_fs.append(self.Fit[i].params.get('R').value)
elif circuit == 'RQ':
self.fit_R = []
self.fit_n = []
self.fit_fs = []
self.fit_Q = []
for i in range(len(self.df)):
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RQ(w=self.df[i].w, R=self.Fit[i].params.get('R').value, Q='none', n=self.Fit[i].params.get('n').value, fs=self.Fit[i].params.get('fs').value))
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
elif "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RQ(w=self.df[i].w, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, fs='none'))
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
elif circuit == 'R-RQ':
self.fit_Rs = []
self.fit_R = []
self.fit_n = []
self.fit_fs = []
self.fit_Q = []
for i in range(len(self.df)):
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q='none', n=self.Fit[i].params.get('n').value, fs=self.Fit[i].params.get('fs').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
elif "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, fs='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
elif circuit == 'R-RQ-RQ':
self.fit_Rs = []
self.fit_R = []
self.fit_n = []
self.fit_R2 = []
self.fit_n2 = []
self.fit_fs = []
self.fit_fs2 = []
self.fit_Q = []
self.fit_Q2 = []
for i in range(len(self.df)):
if "'fs'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q='none', n=self.Fit[i].params.get('n').value, fs=self.Fit[i].params.get('fs').value, R2=self.Fit[i].params.get('R2').value, Q2='none', n2=self.Fit[i].params.get('n2').value, fs2=self.Fit[i].params.get('fs2').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
elif "'Q'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, fs='none', R2=self.Fit[i].params.get('R2').value, Q2='none', n2=self.Fit[i].params.get('n2').value, fs2=self.Fit[i].params.get('fs2').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
elif "'fs'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q='none', n=self.Fit[i].params.get('n').value, fs=self.Fit[i].params.get('fs').value, R2=self.Fit[i].params.get('R2').value, Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, fs2='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
elif "'Q'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQRQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, fs='none', R2=self.Fit[i].params.get('R2').value, Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, fs2='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
elif circuit == 'R-RC-C':
self.fit_Rs = []
self.fit_R1 = []
self.fit_C1 = []
self.fit_C = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsRCC(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, C1=self.Fit[i].params.get('C1').value, C=self.Fit[i].params.get('C').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_C1.append(self.Fit[i].params.get('C1').value)
self.fit_C.append(self.Fit[i].params.get('C').value)
elif circuit == 'R-RC-Q':
self.fit_Rs = []
self.fit_R1 = []
self.fit_C1 = []
self.fit_Q = []
self.fit_n = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsRCQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, C1=self.Fit[i].params.get('C1').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_C1.append(self.Fit[i].params.get('C1').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
elif circuit == 'R-RQ-Q':
self.fit_Rs = []
self.fit_n = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_Q = []
self.fit_fs1 = []
self.fit_Q1 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, R1=self.Fit[i].params.get('R1').value, Q1='none', n1=self.Fit[i].params.get('n1').value, fs1=self.Fit[i].params.get('fs1').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
elif "'Q1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, R1=self.Fit[i].params.get('R1').value, Q1=self.Fit[i].params.get('Q1').value, n1=self.Fit[i].params.get('n1').value, fs1='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
elif circuit == 'R-RQ-C':
self.fit_Rs = []
self.fit_C = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_Q1 = []
self.fit_fs1 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQC(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, C=self.Fit[i].params.get('C').value, R1=self.Fit[i].params.get('R1').value, Q1='none', n1=self.Fit[i].params.get('n1').value, fs1=self.Fit[i].params.get('fs1').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_C.append(self.Fit[i].params.get('C').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
elif "'Q1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQC(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, C=self.Fit[i].params.get('C').value, R1=self.Fit[i].params.get('R1').value, Q1=self.Fit[i].params.get('Q1').value, n1=self.Fit[i].params.get('n1').value, fs1='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_C.append(self.Fit[i].params.get('C').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
elif circuit == 'R-(Q(RW))':
self.fit_Rs = []
self.fit_R = []
self.fit_n = []
self.fit_sigma = []
self.fit_fs = []
self.fit_Q = []
for i in range(len(self.df)):
if "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Randles_simplified(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, fs='none', n=self.Fit[i].params.get('n').value, sigma=self.Fit[i].params.get('sigma').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_sigma.append(self.Fit[i].params.get('sigma').value)
elif "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Randles_simplified(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q='none', fs=self.Fit[i].params.get('fs').value, n=self.Fit[i].params.get('n').value, sigma=self.Fit[i].params.get('sigma').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_sigma.append(self.Fit[i].params.get('sigma').value)
elif circuit == 'R-TLsQ':
self.fit_Rs = []
self.fit_Q = []
self.fit_n = []
self.fit_Ri = []
self.fit_L = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsTLsQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif circuit == 'R-RQ-TLsQ':
self.fit_Rs = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_Q = []
self.fit_n = []
self.fit_Ri = []
self.fit_L = []
self.fit_fs1 = []
self.fit_Q1 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLsQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1=self.Fit[i].params.get('fs1').value, n1=self.Fit[i].params.get('n1').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, Q1='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif "'Q1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLsQ(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1='none', n1=self.Fit[i].params.get('n1').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, Q1=self.Fit[i].params.get('Q1').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif circuit == 'R-TLs':
self.fit_Rs = []
self.fit_R = []
self.fit_n = []
self.fit_Ri = []
self.fit_L = []
self.fit_fs = []
self.fit_Q = []
for i in range(len(self.df)):
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R=self.Fit[i].params.get('R').value, Q='none', n=self.Fit[i].params.get('n').value, fs=self.Fit[i].params.get('fs').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, fs='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif circuit == 'R-RQ-TLs':
self.fit_Rs = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_R2 = []
self.fit_n2 = []
self.fit_Ri = []
self.fit_L = []
self.fit_fs1 = []
self.fit_fs2 = []
self.fit_Q1 = []
self.fit_Q2 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1=self.Fit[i].params.get('fs1').value, R2=self.Fit[i].params.get('R2').value, n2=self.Fit[i].params.get('n2').value, fs2=self.Fit[i].params.get('fs2').value, Q1='none', Q2='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif "'Q1'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1='none', R2=self.Fit[i].params.get('R2').value, n2=self.Fit[i].params.get('n2').value, fs2=self.Fit[i].params.get('fs2').value, Q1=self.Fit[i].params.get('Q1').value, Q2='none'))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif "'fs1'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1=self.Fit[i].params.get('fs1').value, R2=self.Fit[i].params.get('R2').value, n2=self.Fit[i].params.get('n2').value, fs2='none', Q1='none', Q2=self.Fit[i].params.get('Q2').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif "'Q1'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLs(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, L=self.Fit[i].params.get('L').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1='none', R2=self.Fit[i].params.get('R2').value, n2=self.Fit[i].params.get('n2').value, fs2='none', Q1=self.Fit[i].params.get('Q1').value, Q2=self.Fit[i].params.get('Q2').value))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_L.append(self.Fit[i].params.get('L').value)
elif circuit == 'R-TLQ':
self.fit_L = []
self.fit_Rs = []
self.fit_Q = []
self.fit_n = []
self.fit_Rel = []
self.fit_Ri = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsTLQ(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif circuit == 'R-RQ-TLQ':
self.fit_Rs = []
self.fit_L = []
self.fit_Q = []
self.fit_n = []
self.fit_Rel = []
self.fit_Ri = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_fs1 = []
self.fit_Q1 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLQ(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1=self.Fit[i].params.get('fs1').value, Q1='none'))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
elif "'Q1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTLQ(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value, R1=self.Fit[i].params.get('R1').value, n1=self.Fit[i].params.get('n1').value, fs1='none', Q1=self.Fit[i].params.get('Q1').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
elif circuit == 'R-TL':
self.fit_L = []
self.fit_Rs = []
self.fit_R = []
self.fit_fs = []
self.fit_n = []
self.fit_Rel = []
self.fit_Ri = []
for i in range(len(self.df)):
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsTL(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, fs=self.Fit[i].params.get('fs').value, n=self.Fit[i].params.get('n').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value, Q='none'))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif circuit == 'R-RQ-TL':
self.fit_L = []
self.fit_Rs = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_R2 = []
self.fit_n2 = []
self.fit_Rel = []
self.fit_Ri = []
self.fit_Q1 = []
self.fit_Q2 = []
self.fit_fs1 = []
self.fit_fs2 = []
for i in range(len(self.df)):
if "'Q1'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1='none', Q1=self.Fit[i].params.get('Q1').value, n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, fs2='none', Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif "'fs1'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1=self.Fit[i].params.get('fs1').value, Q1='none', n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, fs2=self.Fit[i].params.get('fs2').value, Q2='none', n2=self.Fit[i].params.get('n2').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif "'Q1'" in str(self.Fit[i].params.keys()) and "'fs2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1='none', Q1=self.Fit[i].params.get('Q1').value, n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, fs2=self.Fit[i].params.get('fs2').value, Q2='none', n2=self.Fit[i].params.get('n2').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_fs2.append(self.Fit[i].params.get('fs2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif "'fs1'" in str(self.Fit[i].params.keys()) and "'Q2'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL(w=self.df[i].w, L=self.Fit[i].params.get('L').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, fs1=self.Fit[i].params.get('fs1').value, Q1='none', n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, fs2='none', Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif circuit == 'R-TL1Dsolid':
self.fit_L = []
self.fit_radius = []
self.fit_D = []
self.fit_Rs = []
self.fit_R = []
self.fit_Q = []
self.fit_n = []
self.fit_R_w = []
self.fit_n_w = []
self.fit_Rel = []
self.fit_Ri = []
for i in range(len(self.df)):
self.circuit_fit.append(cir_RsTL_1Dsolid(w=self.df[i].w, L=self.Fit[i].params.get('L').value, D=self.Fit[i].params.get('D').value, radius=self.Fit[i].params.get('radius').value, Rs=self.Fit[i].params.get('Rs').value, R=self.Fit[i].params.get('R').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, R_w=self.Fit[i].params.get('R_w').value, n_w=self.Fit[i].params.get('n_w').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_radius.append(self.Fit[i].params.get('radius').value)
self.fit_D.append(self.Fit[i].params.get('D').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R.append(self.Fit[i].params.get('R').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n.append(self.Fit[i].params.get('n').value)
self.fit_R_w.append(self.Fit[i].params.get('R_w').value)
self.fit_n_w.append(self.Fit[i].params.get('n_w').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif circuit == 'R-RQ-TL1Dsolid':
self.fit_L = []
self.fit_radius = []
self.fit_D = []
self.fit_Rs = []
self.fit_R1 = []
self.fit_n1 = []
self.fit_R2 = []
self.fit_Q2 = []
self.fit_n2 = []
self.fit_R_w = []
self.fit_n_w = []
self.fit_Rel = []
self.fit_Ri = []
self.fit_fs1 = []
self.fit_Q1 = []
for i in range(len(self.df)):
if "'fs1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL_1Dsolid(w=self.df[i].w, L=self.Fit[i].params.get('L').value, D=self.Fit[i].params.get('D').value, radius=self.Fit[i].params.get('radius').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, Q1='none', fs1=self.Fit[i].params.get('fs1').value, n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, R_w=self.Fit[i].params.get('R_w').value, n_w=self.Fit[i].params.get('n_w').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_radius.append(self.Fit[i].params.get('radius').value)
self.fit_D.append(self.Fit[i].params.get('D').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_fs1.append(self.Fit[i].params.get('fs1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_R_w.append(self.Fit[i].params.get('R_w').value)
self.fit_n_w.append(self.Fit[i].params.get('n_w').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif "'Q1'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_RsRQTL_1Dsolid(w=self.df[i].w, L=self.Fit[i].params.get('L').value, D=self.Fit[i].params.get('D').value, radius=self.Fit[i].params.get('radius').value, Rs=self.Fit[i].params.get('Rs').value, R1=self.Fit[i].params.get('R1').value, Q1=self.Fit[i].params.get('Q1').value, fs1='none', n1=self.Fit[i].params.get('n1').value, R2=self.Fit[i].params.get('R2').value, Q2=self.Fit[i].params.get('Q2').value, n2=self.Fit[i].params.get('n2').value, R_w=self.Fit[i].params.get('R_w').value, n_w=self.Fit[i].params.get('n_w').value, Rel=self.Fit[i].params.get('Rel').value, Ri=self.Fit[i].params.get('Ri').value))
self.fit_L.append(self.Fit[i].params.get('L').value)
self.fit_radius.append(self.Fit[i].params.get('radius').value)
self.fit_D.append(self.Fit[i].params.get('D').value)
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_R1.append(self.Fit[i].params.get('R1').value)
self.fit_Q1.append(self.Fit[i].params.get('Q1').value)
self.fit_n1.append(self.Fit[i].params.get('n1').value)
self.fit_R2.append(self.Fit[i].params.get('R2').value)
self.fit_Q2.append(self.Fit[i].params.get('Q2').value)
self.fit_n2.append(self.Fit[i].params.get('n2').value)
self.fit_R_w.append(self.Fit[i].params.get('R_w').value)
self.fit_n_w.append(self.Fit[i].params.get('n_w').value)
self.fit_Rel.append(self.Fit[i].params.get('Rel').value)
self.fit_Ri.append(self.Fit[i].params.get('Ri').value)
elif circuit == 'C-RC-C':
self.fit_Ce = []
self.fit_Rb = []
self.fit_fsb = []
self.fit_Cb = []
for i in range(len(self.df)):
if "'fsb'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_C_RC_C(w=self.df[i].w, Ce=self.Fit[i].params.get('Ce').value, Cb='none', Rb=self.Fit[i].params.get('Rb').value, fsb=self.Fit[i].params.get('fsb').value))
self.fit_Ce.append(self.Fit[i].params.get('Ce').value)
self.fit_Rb.append(self.Fit[i].params.get('Rb').value)
self.fit_fsb.append(self.Fit[i].params.get('fsb').value)
elif "'Cb'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_C_RC_C(w=self.df[i].w, Ce=self.Fit[i].params.get('Ce').value, Cb=self.Fit[i].params.get('Cb').value, Rb=self.Fit[i].params.get('Rb').value, fsb='none'))
self.fit_Ce.append(self.Fit[i].params.get('Ce').value)
self.fit_Rb.append(self.Fit[i].params.get('Rb').value)
self.fit_Cb.append(self.Fit[i].params.get('Cb').value)
elif circuit == 'Q-RQ-Q':
self.fit_Qe = []
self.fit_ne = []
self.fit_Rb = []
self.fit_nb = []
self.fit_fsb = []
self.fit_Qb = []
for i in range(len(self.df)):
if "'fsb'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Q_RQ_Q(w=self.df[i].w, Qe=self.Fit[i].params.get('Qe').value, ne=self.Fit[i].params.get('ne').value, Qb='none', Rb=self.Fit[i].params.get('Rb').value, fsb=self.Fit[i].params.get('fsb').value, nb=self.Fit[i].params.get('nb').value))
self.fit_Qe.append(self.Fit[i].params.get('Qe').value)
self.fit_ne.append(self.Fit[i].params.get('ne').value)
self.fit_Rb.append(self.Fit[i].params.get('Rb').value)
self.fit_fsb.append(self.Fit[i].params.get('fsb').value)
self.fit_nb.append(self.Fit[i].params.get('nb').value)
elif "'Qb'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Q_RQ_Q(w=self.df[i].w, Qe=self.Fit[i].params.get('Qe').value, ne=self.Fit[i].params.get('ne').value, Qb=self.Fit[i].params.get('Qb').value, Rb=self.Fit[i].params.get('Rb').value, fsb='none', nb=self.Fit[i].params.get('nb').value))
self.fit_Qe.append(self.Fit[i].params.get('Qe').value)
self.fit_ne.append(self.Fit[i].params.get('ne').value)
self.fit_Rb.append(self.Fit[i].params.get('Rb').value)
self.fit_Qb.append(self.Fit[i].params.get('Qb').value)
self.fit_nb.append(self.Fit[i].params.get('nb').value)
else:
print('Circuit was not properly defined, see details described in definition')
def EIS_plot(self, bode='off', fitting='off', rr='off', nyq_xlim='none', nyq_ylim='none', legend='on', savefig='none'):
'''
Plots Experimental and fitted impedance data in three subplots:
a) Nyquist, b) Bode, c) relative residuals between experimental and fit
<NAME> (kkn<EMAIL> / <EMAIL>)
Optional Inputs
-----------------
- bode
Plots the Bode Plot with the following possibilities
- 'on' = re, im vs. log(freq)
- 'log' = log(re, im) vs. log(freq)
- 're' = re vs. log(freq)
- 'log_re' = log(re) vs. log(freq)
- 'im' = im vs. log(freq)
- 'log_im' = log(im) vs. log(freq)
- legend:
Legend options
- 'on' = illustrates the cycle number
- 'off' = off
- 'potential' = illustrates the potential
- fitting:
If EIS_fit() has been called. To plot experimental- and fitted data turn fitting on
- 'on'
- 'off' (default)
- rr:
The relative residuals between fit and experimental data
- 'on' = opens a new subplot
- 'off' (default)
- nyq_xlim/nyq_xlim:
x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value
'''
if bode=='off':
fig = figure(dpi=120, facecolor='w', edgecolor='w')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(111, aspect='equal')
elif bode=='on' and rr=='off' or bode=='log' and rr=='off' or bode=='re' and rr=='off' or bode=='log_re' and rr=='off' or bode=='im' and rr=='off' or bode=='log_im' and rr=='off' or bode=='log' and rr=='off':
fig = figure(figsize=(6, 5), dpi=120, facecolor='w', edgecolor='w')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(211, aspect='equal')
ax1 = fig.add_subplot(212)
elif bode=='on' and rr=='on' or bode=='log' and rr=='on' or bode=='re' and rr=='on' or bode=='log_re' and rr=='on' or bode=='im' and rr=='on' or bode=='log_im' and rr=='on' or bode=='log' and rr=='on':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
### Colors
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', label=self.label_cycleno[i])
if fitting == 'on':
ax.plot(self.circuit_fit[i].to_numpy().real, -self.circuit_fit[i].to_numpy().imag, lw=0, marker='o', ms=8, mec='r', mew=1, mfc='none', label='')
### Bode Plot
if bode=='on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i], marker='D', ms=3, lw=2.25, ls='-', label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i], marker='s', ms=3, lw=2.25, ls='-', label=self.label_im_1[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), self.circuit_fit[i].to_numpy().real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.plot(np.log10(self.df[i].f), -self.circuit_fit[i].to_numpy().imag, lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i], marker='D', ms=3, lw=2.25, ls='-', label=self.label_cycleno[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), self.circuit_fit[i].to_numpy().real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend =='potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i], marker='D', ms=3, lw=2.25, ls='-', label=self.label_cycleno[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), np.log10(self.circuit_fit[i].to_numpy().real), lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode=='im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i], marker='s', ms=3, lw=2.25, ls='-', label=self.label_cycleno[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), -self.circuit_fit[i].to_numpy().imag, lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode=='log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i], marker='s', ms=3, lw=2.25, ls='-', label=self.label_cycleno[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), np.log10(-self.circuit_fit[i].to_numpy().imag), lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i], marker='D', ms=3, lw=2.25, ls='-', label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i], marker='s', ms=3, lw=2.25, ls='-', label=self.label_im_1[i])
if fitting == 'on':
ax1.plot(np.log10(self.df[i].f), np.log10(self.circuit_fit[i].to_numpy().real), lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='')
ax1.plot(np.log10(self.df[i].f), np.log10(-self.circuit_fit[i].to_numpy().imag), lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Relative Residuals on Fit
if rr=='on':
if fitting == 'off':
print('Fitting has not been performed, thus the relative residuals cannot be determined')
elif fitting == 'on':
self.rr_real = []
self.rr_imag = []
for i in range(len(self.df)):
self.rr_real.append(residual_real(re=self.df[i].re.values, fit_re=self.circuit_fit[i].to_numpy().real, fit_im=-self.circuit_fit[i].to_numpy().imag))
self.rr_imag.append(residual_imag(im=self.df[i].im.values, fit_re=self.circuit_fit[i].to_numpy().real, fit_im=-self.circuit_fit[i].to_numpy().imag))
if legend == 'on':
ax2.plot(np.log10(self.df[i].f), self.rr_real[i]*100, color=colors_real[i], marker='D', ms=6, lw=1, ls='--', label='#'+str(i+1))
ax2.plot(np.log10(self.df[i].f), self.rr_imag[i]*100, color=colors_imag[i], marker='s', ms=6, lw=1, ls='--',label='')
elif legend == 'potential':
ax2.plot(np.log10(self.df[i].f), self.rr_real[i]*100, color=colors_real[i], marker='D', ms=6, lw=1, ls='--', label=str(np.round(np.average(self.df[i].E_avg.values),2))+' V')
ax2.plot(np.log10(self.df[i].f), self.rr_imag[i]*100, color=colors_imag[i], marker='s', ms=6, lw=1, ls='--',label='')
ax2.axhline(0, ls='--', c='k', alpha=.5)
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
#Automatic y-limits limits
self.rr_im_min = []
self.rr_im_max = []
self.rr_re_min = []
for i in range(len(self.df)): # needs to be within a loop if cycles have different number of data points
self.rr_im_min = np.min(self.rr_imag[i])
self.rr_im_max = np.max(self.rr_imag[i])
self.rr_re_min = np.min(self.rr_real[i])
self.rr_re_max = np.max(self.rr_real[i])
if self.rr_re_max > self.rr_im_max:
self.rr_ymax = self.rr_re_max
else:
self.rr_ymax = self.rr_im_max
if self.rr_re_min < self.rr_im_min:
self.rr_ymin = self.rr_re_min
else:
self.rr_ymin = self.rr_im_min
if np.abs(self.rr_ymin) > np.abs(self.rr_ymax):
ax2.set_ylim(self.rr_ymin *100*1.5, np.abs(self.rr_ymin)*100*1.5)
ax2.annotate("$\Delta$Z'", xy=(np.log10(np.min(self.df[0].f)), np.abs(self.rr_ymin )*100*1.2), color=colors_real[-1], fontsize=12)
ax2.annotate("$\Delta$-Z''", xy=(np.log10(np.min(self.df[0].f)), np.abs(self.rr_ymin )*100*0.9), color=colors_imag[-1], fontsize=12)
elif np.abs(self.rr_ymin) < np.abs(self.rr_ymax):
ax2.set_ylim(np.negative(self.rr_ymax)*100*1.5, np.abs(self.rr_ymax)*100*1.5)
ax2.annotate("$\Delta$Z'", xy=(np.log10(np.min(self.df[0].f)), np.abs(self.rr_ymax)*100*1.2), color=colors_real[-1], fontsize=12)
ax2.annotate("$\Delta$-Z''", xy=(np.log10(np.min(self.df[0].f)), np.abs(self.rr_ymax)*100*0.9), color=colors_imag[-1], fontsize=12)
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig) #saves figure if fix text is given
def Fit_uelectrode(self, params, circuit, D_ox, r, theta_real_red, theta_imag_red, n, T, F, R, Q='none', weight_func='modulus', nan_policy='raise'):
'''
Fit the reductive microdisk electrode impedance repsonse following either BV or MHC infinite kientics
<NAME> (<EMAIL> / <EMAIL>)
'''
self.Fit = []
self.circuit_fit = []
for i in range(len(self.df)):
self.Fit.append(minimize(leastsq_errorfunc_uelectrode, params, method='leastsq', args=(self.df[i].w, self.df[i].re, self.df[i].im, circuit, weight_func, np.average(self.df[i].E_avg), D_ox, r, theta_real_red, theta_imag_red, n, T, F, R), nan_policy=nan_policy, maxfev=9999990))
print(report_fit(self.Fit[i]))
if circuit == 'R-(Q(RM)),BV_red':
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Rs_QRM_BV_red(w=self.df[i].w, E=np.average(self.df[i].E_avg), E0=self.Fit[i].params.get('E0').value, Rs=self.Fit[i].params.get('Rs').value, fs=self.Fit[i].params.get('fs').value, n_Q=self.Fit[i].params.get('n_Q').value, Q='none', Rct=self.Fit[i].params.get('Rct').value, alpha=self.Fit[i].params.get('alpha').value, C_ox=self.Fit[i].params.get('C_ox').value, D_ox=D_ox, r=r, theta_real_red=theta_real_red, theta_imag_red=theta_imag_red, n=n, T=T, F=F, R=R))
elif "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Rs_QRM_BV_red(w=self.df[i].w, E=np.average(self.df[i].E_avg), E0=self.Fit[i].params.get('E0').value, Rs=self.Fit[i].params.get('Rs').value, fs='none', n_Q=self.Fit[i].params.get('n_Q').value, Q=self.Fit[i].params.get('Q').value, Rct=self.Fit[i].params.get('Rct').value, alpha=self.Fit[i].params.get('alpha').value, C_ox=self.Fit[i].params.get('C_ox').value, D_ox=D_ox, r=r, theta_real_red=theta_real_red, theta_imag_red=theta_imag_red, n=n, T=T, F=F, R=R))
def uelectrode(self, params, circuit, E, alpha, n, C_ox, D_red, D_ox, r, theta_real_red, theta_real_ox, theta_imag_red, theta_imag_ox, F, R, T, weight_func='modulus', nan_policy='raise'):
'''
<NAME> (<EMAIL> / <EMAIL>)
Inputs
------------
- Rs = Series resistance [ohm]
- Q = constant phase element [s/ohm]
- n_Q = exponent of Q
- Rct = Charge transfer resistance [ohm]
- C_ox = concentration of oxidized specie [mol/cm3]
- circuit:
- 'R-(Q(RM))'
- 'R-RQ-(Q(RM))'
- weight_func = Weight function, Three options:
- modulus (default)
- unity
- proportional
- nan_policy = if issues occur with this fitting due to nan values 'propagate' should be used. otherwise, 'raise' is default
Returns
------------
Returns the fitted impedance spectra(s) but also the fitted parameters that were used in the initial guesses. To call these use e.g. self.fit_Rs
'''
self.Fit = []
self.circuit_fit = []
self.fit_Rs = []
self.fit_Q = []
self.fit_fs = []
self.fit_n_Q = []
self.fit_Rct = []
self.fit_E0 = []
self.fit_Cred = []
for i in range(len(self.df)):
self.Fit.append(minimize(leastsq_errorfunc_uelectrode, params, method='leastsq', args=(self.df[i].w, self.df[i].re, self.df[i].im, circuit, weight_func, E, alpha, n, C_ox, D_red, D_ox, r, theta_real_red, theta_real_ox, theta_imag_red, theta_imag_ox, F, R, T), nan_policy=nan_policy, maxfev=9999990))
print(report_fit(self.Fit[i]))
if circuit == 'R-(Q(RM))':
if "'fs'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Rs_QRM(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, fs=self.Fit[i].params.get('fs').value, Q='none', n_Q=self.Fit[i].params.get('n_Q').value, Rct=self.Fit[i].params.get('Rct').value, E=E, E0=self.Fit[i].params.get('E0').value, alpha=alpha, n=n, C_red=self.Fit[i].params.get('C_red').value, C_ox=C_ox, D_red=D_red, D_ox=D_ox, r=r, theta_real_red=theta_real_red, theta_real_ox=theta_real_ox, theta_imag_red=theta_imag_red, theta_imag_ox=theta_imag_ox, T=T, F=F, R=R))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_fs.append(self.Fit[i].params.get('fs').value)
self.fit_n_Q.append(self.Fit[i].params.get('n_Q').value)
self.fit_Rct.append(self.Fit[i].params.get('Rct').value)
self.fit_E0.append(self.Fit[i].params.get('E0').value)
self.fit_Cred.append(self.Fit[i].params.get('C_red').value)
elif "'Q'" in str(self.Fit[i].params.keys()):
self.circuit_fit.append(cir_Rs_QRM(w=self.df[i].w, Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, fs='none', n_Q=self.Fit[i].params.get('n_Q').value, Rct=self.Fit[i].params.get('Rct').value, E=E, E0=self.Fit[i].params.get('E0').value, alpha=alpha, n=n, C_red=self.Fit[i].params.get('C_red').value, C_ox=C_ox, D_red=D_red, D_ox=D_ox, r=r, theta_real_red=theta_real_red, theta_real_ox=theta_real_ox, theta_imag_red=theta_imag_red, theta_imag_ox=theta_imag_ox, T=T, F=F, R=R))
self.fit_Rs.append(self.Fit[i].params.get('Rs').value)
self.fit_Q.append(self.Fit[i].params.get('Q').value)
self.fit_n_Q.append(self.Fit[i].params.get('n_Q').value)
self.fit_Rct.append(self.Fit[i].params.get('Rct').value)
self.fit_E0.append(self.Fit[i].params.get('E0').value)
self.fit_Cred.append(self.Fit[i].params.get('C_red').value)
def uelectrode_sim_fit(self, params, circuit, E, alpha, n, C_ox, D_red, D_ox, r, theta_real_red, theta_real_ox, theta_imag_red, theta_imag_ox, F, R, T, weight_func='modulus', nan_policy='raise'):
'''
In development..
<NAME> (<EMAIL> / <EMAIL>)
Inputs
------------
- weight_func = Weight function, Three options:
- modulus (default)
- unity
- proportional
- nan_policy = if issues occur with this fitting due to nan values 'propagate' should be used. otherwise, 'raise' is default
- nyq_xlim/nyq_xlim: x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value
- legend: Display legend
Turn 'on', 'off'
- bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
- fitting: if EIS_exp_fit() has been called. Plotting exp and fits by = 'on'
Turn 'on', 'off'
- rr: relative residuals. Gives relative residuals of fit from experimental data.
Turn 'on', 'off'
Returns
------------
The fitted impedance spectra(s) but also the fitted parameters that were used in the initial guesses. To call these use e.g. self.fit_Rs
'''
self.Fit = minimize(leastsq_errorfunc_uelectrode, params, method='leastsq', args=(self.w, self.re, self.im, circuit, weight_func, E, alpha, n, C_ox, D_red, D_ox, r, theta_real_red, theta_real_ox, theta_imag_red, theta_imag_ox, F, R, T), nan_policy=nan_policy, maxfev=9999990)
print(report_fit(self.Fit))
def plot_Cdl_E(self, interface, BET_Area, m_electrode):
'''
Normalizing Q to C_eff or Cdl using either norm_nonFara_Q_C() or norm_Fara_Q_C()
Refs:
- <NAME>, A.L.G. vandenEeden, M.Sluyters-Rehbach, and J.H.Sluyters, J.Elec-
troanal. Chem. Interfacial Electrochem., 176, 275 (1984)
- <NAME>, ElectrochimicaActa, 55, 6218 (2010)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
---------
interface = faradaic / nonfaradaic
BET_Area = BET surface area of electrode material [cm]
m_electrode = mass of electrode [cm2/mg]
Inputs
---------
C_eff/C_dl = Normalized Double-layer capacitance measured from impedance [uF/cm2] (normalized by norm_nonFara_Q_C() or norm_Fara_Q_C())
'''
fig = figure(dpi=120, facecolor='w', edgecolor='w')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(111)
self.Q_norm = []
self.E = []
if interface == 'nonfaradaic':
self.Q_norm = []
for i in range(len(self.df)):
#self.Q_norm.append(norm_nonFara_Q_C(Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value, L=self.Fit[i].params.get('L').value) )
self.Q_norm.append(norm_nonFara_Q_C(Rs=self.Fit[i].params.get('Rs').value, Q=self.Fit[i].params.get('Q').value, n=self.Fit[i].params.get('n').value) )
self.E.append(np.average(self.df[i].E_avg))
elif interface == 'faradaic':
self.Q_norm = []
for j in range(len(self.df)):
self.Q_norm.append(norm_Fara_Q_C(Rs=self.Fit[j].params.get('Rs').value, Rct=self.Fit[j].params.get('R').value, n=self.Fit[j].params.get('n').value, fs=self.Fit[j].params.get('fs').value, L=self.Fit[j].params.get('L').value))
self.E.append(np.average(self.df[j].E_avg))
self.C_norm = (np.array(self.Q_norm)/(m_electrode*BET_Area))*10**6 #'uF/cm2'
ax.plot(self.E, self.C_norm, 'o--', label='C$_{dl}$')
ax.set_xlabel('Voltage [V]')
ax.set_ylabel('C$_{dl}$ [$\mu$F/cm$^2$]')
class EIS_sim:
'''
Simulates and plots Electrochemical Impedance Spectroscopy based-on build-in equivalent cirucit models
<NAME> (<EMAIL> || <EMAIL>)
Following circuits are implemented:
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-Q
- R-RQ-Q
- R-(Q(RW))
- C-RC-C
- Q-RQ-Q
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
Inputs
--------
- nyq_xlim/nyq_xlim:
x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value
- bode: Plots following Bode plots
- 'off'
- 'on' = re, im vs. log(freq)
- 'log' = log(re, im) vs. log(freq)
- 're' = re vs. log(freq)
- 'log_re' = log(re) vs. log(freq)
- 'im' = im vs. log(freq)
- 'log_im' = log(im) vs. log(freq)
'''
def __init__(self, circuit, frange, bode='off', nyq_xlim='none', nyq_ylim='none', legend='on', savefig='none'):
self.f = frange
self.w = 2*np.pi*frange
self.re = circuit.real
self.im = -circuit.imag
if bode=='off':
fig = figure(dpi=120, facecolor='w', edgecolor='w')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(111, aspect='equal')
elif bode=='on' or bode=='log' or bode=='re' or bode=='log_re' or bode=='im' or bode=='log_im' or bode=='log':
fig = figure(figsize=(6, 4.5), dpi=120, facecolor='w', edgecolor='w')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(211, aspect='equal')
ax1 = fig.add_subplot(212)
colors = sns.color_palette("colorblind", n_colors=1)
colors_real = sns.color_palette("Blues", n_colors=1)
colors_imag = sns.color_palette("Oranges", n_colors=1)
### Nyquist Plot
ax.plot(self.re, self.im, color=colors[0], marker='o', ms=4, lw=2, ls='-', label='Sim')
### Bode Plot
if bode=='on':
ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z'")
ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label="-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z'")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode=='im':
ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label="-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode=='log_im':
ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label="-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z''")
ax1.plot(np.log10(self.f), np.log10(self.im), color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label="-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Figure specifics
if legend == 'on':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig) #saves figure if fix text is given
def EIS_sim_fit(self, params, circuit, weight_func='modulus', nan_policy='raise', bode='on', nyq_xlim='none', nyq_ylim='none', legend='on', savefig='none'):
'''
This function fits simulations with a selected circuit. This function is mainly used to test fitting functions prior to being used on experimental data
<NAME> (<EMAIL> / <EMAIL>)
Inputs
------------
- Circuit: Equivlaent circuit models
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-Q
- R-RQ-Q
- R-(Q(RW))
- C-RC-C
- Q-RQ-Q
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func = Weight function, Three options:
- modulus (default)
- unity
- proportional
- nyq_xlim/nyq_xlim: x/y-axis on nyquist plot, if not equal to 'none' state [min,max] value
- legend: Display legend
Turn 'on', 'off'
- bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
Returns
------------
The fitted impedance spectra(s) but also the fitted parameters that were used in the initial guesses. To call these use e.g. self.fit_Rs
'''
self.Fit = minimize(leastsq_errorfunc, params, method='leastsq', args=(self.w, self.re, self.im, circuit, weight_func), maxfev=9999990, nan_policy=nan_policy)
print(report_fit(self.Fit))
if circuit == 'C':
self.circuit_fit = elem_C(w=self.w, C=self.Fit.params.get('C').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
elif circuit == 'Q':
self.circuit_fit = elem_Q(w=self.w, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
elif circuit == 'R-C':
self.circuit_fit = cir_RsC(w=self.w, Rs=self.Fit.params.get('Rs').value, C=self.Fit.params.get('C').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
elif circuit == 'R-Q':
self.circuit_fit = cir_RsQ(w=self.w, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
elif circuit == 'RC':
if "'C'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RC(w=self.w, C=self.Fit.params.get('C').value, R=self.Fit.params.get('R').value, fs='none')
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
elif "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RC(w=self.w, C='none', R=self.Fit.params.get('R').value, fs=self.Fit.params.get('fs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('R').value)
elif circuit == 'RQ':
if "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RQ(w=self.w, R=self.Fit.params.get('R').value, Q='none', n=self.Fit.params.get('n').value, fs=self.Fit.params.get('fs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
elif "'Q'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RQ(w=self.w, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, fs='none')
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
elif circuit == 'R-RQ':
if "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q='none', n=self.Fit.params.get('n').value, fs=self.Fit.params.get('fs').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
elif "'Q'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, fs='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
elif circuit == 'R-RQ-RQ':
if "'fs'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q='none', n=self.Fit.params.get('n').value, fs=self.Fit.params.get('fs').value, R2=self.Fit.params.get('R2').value, Q2='none', n2=self.Fit.params.get('n2').value, fs2=self.Fit.params.get('fs2').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
self.fit_R2 =[]
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
elif "'Q'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, fs='none', R2=self.Fit.params.get('R2').value, Q2='none', n2=self.Fit.params.get('n2').value, fs2=self.Fit.params.get('fs2').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
elif "'fs'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q='none', n=self.Fit.params.get('n').value, fs=self.Fit.params.get('fs').value, R2=self.Fit.params.get('R2').value, Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, fs2='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
elif "'Q'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQRQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, fs='none', R2=self.Fit.params.get('R2').value, Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, fs2='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
elif circuit == 'R-RC-C':
self.circuit_fit = cir_RsRCC(w=self.df[i].w, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, C1=self.Fit.params.get('C1').value, C=self.Fit.params.get('C').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_C1 = []
self.fit_C1.append(self.Fit.params.get('C1').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
elif circuit == 'R-RC-Q':
self.circuit_fit = cir_RsRCQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, C1=self.Fit.params.get('C1').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 =[]
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_C1 =[]
self.fit_C1.append(self.Fit.params.get('C1').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
elif circuit == 'R-RQ-Q':
if "'fs1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQQ(w=self.w, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, R1=self.Fit.params.get('R1').value, Q1='none', n1=self.Fit.params.get('n1').value, fs1=self.Fit.params.get('fs1').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
if "'Q1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQQ(w=self.w, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, R1=self.Fit.params.get('R1').value, Q1=self.Fit.params.get('Q1').value, n1=self.Fit.params.get('n1').value, fs1='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_fQ = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
elif circuit == 'R-RQ-C':
if "'fs1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQC(w=self.w, Rs=self.Fit.params.get('Rs').value, C=self.Fit.params.get('C').value, R1=self.Fit.params.get('R1').value, Q1='none', n1=self.Fit.params.get('n1').value, fs1=self.Fit.params.get('fs1').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
elif "'Q1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQC(w=self.df.w, Rs=self.Fit.params.get('Rs').value, C=self.Fit.params.get('C').value, R1=self.Fit.params.get('R1').value, Q1=self.Fit.params.get('Q1').value, n1=self.Fi.params.get('n1').value, fs1='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_C = []
self.fit_C.append(self.Fit.params.get('C').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
elif circuit == 'R-(Q(RW))':
if "'Q'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_Randles_simplified(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, fs='none', n=self.Fit.params.get('n').value, sigma=self.Fit.params.get('sigma').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_sigma = []
self.fit_sigma.append(self.Fit.params.get('sigma').value)
elif "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_Randles_simplified(w=self.w, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q='none', fs=self.Fit.params.get('fs').value, n=self.Fit.params.get('n').value, sigma=self.Fit.params.get('sigma').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_sigma = []
self.fit_sigma.append(self.Fit.params.get('sigma').value)
elif circuit == 'R-TLsQ':
self.circuit_fit = cir_RsTLsQ(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif circuit == 'R-RQ-TLsQ':
if "'fs1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLsQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1=self.Fit.params.get('fs1').value, n1=self.Fit.params.get('n1').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, Q1='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif "'Q1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLsQ(w=self.w, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1='none', n1=self.Fit.params.get('n1').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, Q1=self.Fit.params.get('Q1').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif circuit == 'R-TLs':
if "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R=self.Fit.params.get('R').value, Q='none', n=self.Fit.params.get('n').value, fs=self.Fit.params.get('fs').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif "'Q'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, fs='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif circuit == 'R-RQ-TLs':
if "'fs1'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1=self.Fit.params.get('fs1').value, R2=self.Fit.params.get('R2').value, n2=self.Fit.params.get('n2').value, fs2=self.Fit.params.get('fs2').value, Q1='none', Q2='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif "'Q1'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1='none', R2=self.Fit.params.get('R2').value, n2=self.Fit.params.get('n2').value, fs2=self.Fit.params.get('fs2').value, Q1=self.Fit.params.get('Q1').value, Q2='none')
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif "'fs1'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1=self.Fit.params.get('fs1').value, R2=self.Fit.params.get('R2').value, n2=self.Fit.params.get('n2').value, fs2='none', Q1='none', Q2=self.Fit.params.get('Q2').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif "'Q1'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLs(w=self.w, Rs=self.Fit.params.get('Rs').value, L=self.Fit.params.get('L').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1='none', R2=self.Fit.params.get('R2').value, n2=self.Fit.params.get('n2').value, fs2='none', Q1=self.Fit.params.get('Q1').value, Q2=self.Fit.params.get('Q2').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
elif circuit == 'R-TLQ':
self.circuit_fit = cir_RsTLQ(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif circuit == 'R-RQ-TLQ':
if "'fs1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLQ(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1=self.Fit.params.get('fs1').value, Q1='none')
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
elif "'Q1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTLQ(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value, R1=self.Fit.params.get('R1').value, n1=self.Fit.params.get('n1').value, fs1='none', Q1=self.Fit.params.get('Q1').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
elif circuit == 'R-TL':
if "'fs'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, fs=self.Fit.params.get('fs').value, n=self.Fit.params.get('n').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value, Q='none')
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_fs = []
self.fit_fs.append(self.Fit.params.get('fs').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif "'Q'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, fs='none', n=self.Fit.params.get('n').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value, Q=self.Fit.params.get('Q').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif circuit == 'R-RQ-TL':
if "'Q1'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1='none', Q1=self.Fit.params.get('Q1').value, n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, fs2='none', Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif "'fs1'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1=self.Fit.params.get('fs1').value, Q1='none', n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, fs2=self.Fit.params.get('fs2').value, Q2='none', n2=self.Fit.params.get('n2').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif "'Q1'" in str(self.Fit.params.keys()) and "'fs2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1='none', Q1=self.Fit.params.get('Q1').value, n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, fs2=self.Fit.params.get('fs2').value, Q2='none', n2=self.Fit.params.get('n2').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_fs2 = []
self.fit_fs2.append(self.Fit.params.get('fs2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif "'fs1'" in str(self.Fit.params.keys()) and "'Q2'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL(w=self.w, L=self.Fit.params.get('L').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, fs1=self.Fit.params.get('fs1').value, Q1='none', n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, fs2='none', Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif circuit == 'R-TL1Dsolid':
self.circuit_fit = cir_RsTL_1Dsolid(w=self.w, L=self.Fit.params.get('L').value, D=self.Fit.params.get('D').value, radius=self.Fit.params.get('radius').value, Rs=self.Fit.params.get('Rs').value, R=self.Fit.params.get('R').value, Q=self.Fit.params.get('Q').value, n=self.Fit.params.get('n').value, R_w=self.Fit.params.get('R_w').value, n_w=self.Fit.params.get('n_w').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_radius = []
self.fit_radius.append(self.Fit.params.get('radius').value)
self.fit_D = []
self.fit_D.append(self.Fit.params.get('D').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R = []
self.fit_R.append(self.Fit.params.get('R').value)
self.fit_Q = []
self.fit_Q.append(self.Fit.params.get('Q').value)
self.fit_n = []
self.fit_n.append(self.Fit.params.get('n').value)
self.fit_R_w = []
self.fit_R_w.append(self.Fit.params.get('R_w').value)
self.fit_n_w = []
self.fit_n_w.append(self.Fit.params.get('n_w').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif circuit == 'R-RQ-TL1Dsolid':
if "'fs1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL_1Dsolid(w=self.w, L=self.Fit.params.get('L').value, D=self.Fit.params.get('D').value, radius=self.Fit.params.get('radius').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, Q1='none', fs1=self.Fit.params.get('fs1').value, n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, R_w=self.Fit.params.get('R_w').value, n_w=self.Fit.params.get('n_w').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_radius = []
self.fit_radius.append(self.Fit.params.get('radius').value)
self.fit_D = []
self.fit_D.append(self.Fit.params.get('D').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_fs1 = []
self.fit_fs1.append(self.Fit.params.get('fs1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_R_w = []
self.fit_R_w.append(self.Fit.params.get('R_w').value)
self.fit_n_w = []
self.fit_n_w.append(self.Fit.params.get('n_w').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif "'Q1'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RsRQTL_1Dsolid(w=self.w, L=self.Fit.params.get('L').value, D=self.Fit.params.get('D').value, radius=self.Fit.params.get('radius').value, Rs=self.Fit.params.get('Rs').value, R1=self.Fit.params.get('R1').value, Q1=self.Fit.params.get('Q1').value, fs1='none', n1=self.Fit.params.get('n1').value, R2=self.Fit.params.get('R2').value, Q2=self.Fit.params.get('Q2').value, n2=self.Fit.params.get('n2').value, R_w=self.Fit.params.get('R_w').value, n_w=self.Fit.params.get('n_w').value, Rel=self.Fit.params.get('Rel').value, Ri=self.Fit.params.get('Ri').value)
self.fit_L = []
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_radius = []
self.fit_radius.append(self.Fit.params.get('radius').value)
self.fit_D = []
self.fit_D.append(self.Fit.params.get('D').value)
self.fit_Rs = []
self.fit_Rs.append(self.Fit.params.get('Rs').value)
self.fit_R1 = []
self.fit_R1.append(self.Fit.params.get('R1').value)
self.fit_Q1 = []
self.fit_Q1.append(self.Fit.params.get('Q1').value)
self.fit_n1 = []
self.fit_n1.append(self.Fit.params.get('n1').value)
self.fit_R2 = []
self.fit_R2.append(self.Fit.params.get('R2').value)
self.fit_Q2 = []
self.fit_Q2.append(self.Fit.params.get('Q2').value)
self.fit_n2 = []
self.fit_n2.append(self.Fit.params.get('n2').value)
self.fit_R_w = []
self.fit_R_w.append(self.Fit.params.get('R_w').value)
self.fit_n_w = []
self.fit_n_w.append(self.Fit.params.get('n_w').value)
self.fit_Rel = []
self.fit_Rel.append(self.Fit.params.get('Rel').value)
self.fit_Ri = []
self.fit_Ri.append(self.Fit.params.get('Ri').value)
elif circuit == 'C-RC-C':
if "'fsb'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_C_RC_C(w=self.w, Ce=self.Fit.params.get('Ce').value, Cb='none', Rb=self.Fit.params.get('Rb').value, fsb=self.Fit.params.get('fsb').value)
self.fit_Ce = []
self.fit_Ce.append(self.Fit.params.get('Ce').value)
self.fit_Rb = []
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_fsb = []
self.fit_fsb.append(self.Fit.params.get('fsb').value)
elif "'Cb'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_C_RC_C(w=self.w, Ce=self.Fit.params.get('Ce').value, Cb=self.Fit.params.get('Cb').value, Rb=self.Fit.params.get('Rb').value, fsb='none')
self.fit_Ce = []
self.fit_Ce.append(self.Fit.params.get('Ce').value)
self.fit_Rb = []
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Cb = []
self.fit_Cb.append(self.Fit.params.get('Cb').value)
elif circuit == 'Q-RQ-Q':
if "'fsb'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_Q_RQ_Q(w=self.w, Qe=self.Fit.params.get('Qe').value, ne=self.Fit.params.get('ne').value, Qb='none', Rb=self.Fit.params.get('Rb').value, fsb=self.Fit.params.get('fsb').value, nb=self.Fit.params.get('nb').value)
self.fit_Qe = []
self.fit_Qe.append(self.Fit.params.get('Qe').value)
self.fit_ne = []
self.fit_ne.append(self.Fit.params.get('ne').value)
self.fit_Rb = []
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_fsb = []
self.fit_fsb.append(self.Fit.params.get('fsb').value)
self.fit_nb = []
self.fit_nb.append(self.Fit.params.get('nb').value)
elif "'Qb'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_Q_RQ_Q(w=self.w, Qe=self.Fit.params.get('Qe').value, ne=self.Fit.params.get('ne').value, Qb=self.Fit.params.get('Qb').value, Rb=self.Fit.params.get('Rb').value, fsb='none', nb=self.Fit.params.get('nb').value)
self.fit_Qe = []
self.fit_Qe.append(self.Fit.params.get('Qe').value)
self.fit_ne = []
self.fit_ne.append(self.Fit.params.get('ne').value)
self.fit_Rb = []
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Qb = []
self.fit_Qb.append(self.Fit.params.get('Qb').value)
self.fit_nb = []
self.fit_nb.append(self.Fit.params.get('nb').value)
elif circuit == 'RC-RC-ZD':
self.fit_L = []
self.fit_D_s = []
self.fit_u1 = []
self.fit_u2 = []
self.fit_Cb = []
self.fit_Rb = []
self.fit_fsb = []
self.fit_Ce = []
self.fit_Re = []
self.fit_fse = []
if "'fsb'" in str(self.Fit.params.keys()) and "'fse'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RCRCZD(w=self.w, L=self.Fit.params.get('L').value, D_s=self.Fit.params.get('D_s').value, u1=self.Fit.params.get('u1').value, u2=self.Fit.params.get('u2').value, Cb='none', Rb=self.Fit.params.get('Rb').value, fsb=self.Fit.params.get('fsb').value, Ce='none', Re=self.Fit.params.get('Re').value, fse=self.Fit.params.get('fse').value)
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_D_s.append(self.Fit.params.get('D_s').value)
self.fit_u1.append(self.Fit.params.get('u1').value)
self.fit_u2.append(self.Fit.params.get('u2').value)
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Re.append(self.Fit.params.get('Re').value)
self.fit_fsb.append(self.Fit.params.get('fsb').value)
self.fit_fse.append(self.Fit.params.get('fse').value)
elif "'Cb'" in str(self.Fit.params.keys()) and "'Ce'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RCRCZD(w=self.w, L=self.Fit.params.get('L').value, D_s=self.Fit.params.get('D_s').value, u1=self.Fit.params.get('u1').value, u2=self.Fit.params.get('u2').value, Cb=self.Fit.params.get('Cb').value, Rb=self.Fit.params.get('Rb').value, fsb='none', Ce=self.Fit.params.get('Ce').value, Re=self.Fit.params.get('Re').value, fse='none')
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_D_s.append(self.Fit.params.get('D_s').value)
self.fit_u1.append(self.Fit.params.get('u1').value)
self.fit_u2.append(self.Fit.params.get('u2').value)
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Re.append(self.Fit.params.get('Re').value)
self.fit_Cb.append(self.Fit.params.get('Cb').value)
self.fit_Ce.append(self.Fit.params.get('Ce').value)
elif "'Cb'" in str(self.Fit.params.keys()) and "'fse'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RCRCZD(w=self.w, L=self.Fit.params.get('L').value, D_s=self.Fit.params.get('D_s').value, u1=self.Fit.params.get('u1').value, u2=self.Fit.params.get('u2').value, Cb=self.Fit.params.get('Cb').value, Rb=self.Fit.params.get('Rb').value, fsb='none', Ce='none', Re=self.Fit.params.get('Re').value, fse=self.Fit.params.get('fse').value)
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_D_s.append(self.Fit.params.get('D_s').value)
self.fit_u1.append(self.Fit.params.get('u1').value)
self.fit_u2.append(self.Fit.params.get('u2').value)
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Re.append(self.Fit.params.get('Re').value)
self.fit_Cb.append(self.Fit.params.get('Cb').value)
self.fit_fse.append(self.Fit.params.get('fse').value)
elif "'fsb'" in str(self.Fit.params.keys()) and "'Ce'" in str(self.Fit.params.keys()):
self.circuit_fit = cir_RCRCZD(w=self.w, L=self.Fit.params.get('L').value, D_s=self.Fit.params.get('D_s').value, u1=self.Fit.params.get('u1').value, u2=self.Fit.params.get('u2').value, Cb=self.Fit.params.get('Cb').value, Rb='none', fsb=self.Fit.params.get('fsb').value, Ce=self.Fit.params.get('Ce').value, Re=self.Fit.params.get('Re').value, fse='none')
self.fit_L.append(self.Fit.params.get('L').value)
self.fit_D_s.append(self.Fit.params.get('D_s').value)
self.fit_u1.append(self.Fit.params.get('u1').value)
self.fit_u2.append(self.Fit.params.get('u2').value)
self.fit_Rb.append(self.Fit.params.get('Rb').value)
self.fit_Re.append(self.Fit.params.get('Re').value)
self.fit_fsb.append(self.Fit.params.get('fsb').value)
self.fit_Ce.append(self.Fit.params.get('Ce').value)
else:
print('Circuit is not properly defined, see details described in definition')
fig = figure(figsize=(6, 4.5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(211, aspect='equal')
ax1 = fig.add_subplot(212)
colors = sns.color_palette("colorblind", n_colors=1)
colors_real = sns.color_palette("Blues", n_colors=1)
colors_imag = sns.color_palette("Oranges", n_colors=1)
### Nyquist Plot
ax.plot(self.re, self.im, color=colors[0], marker='o', ms=4, lw=2, ls='-', label='Sim')
ax.plot(self.circuit_fit.real, -self.circuit_fit.imag, lw=0, marker='o', ms=8, mec='r', mew=1, mfc='none', label='Fit')
### Bode Plot
if bode=='on':
ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z'")
ax1.plot(np.log10(self.f), self.im, color=colors_imag[0], marker='s', ms=3, lw=2.25, ls='-', label="-Z''")
ax1.plot(np.log10(self.f), self.circuit_fit.real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')
ax1.plot(np.log10(self.f), -self.circuit_fit.imag, lw=0, marker='s', ms=8, mec='r', mew=1, mfc='none')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
ax1.plot(np.log10(self.f), self.re, color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z'")
ax1.plot(np.log10(self.f), self.circuit_fit.real, lw=0, marker='D', ms=8, mec='r', mew=1, mfc='none', label='Fit')
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
ax1.plot(np.log10(self.f), np.log10(self.re), color=colors_real[0], marker='D', ms=3, lw=2.25, ls='-', label="Z''")
ax1.plot(np.log10(self.f),
|
np.log10(self.circuit_fit.real)
|
numpy.log10
|
import numpy as np
import pandas as pd
import os
import time
import logging
from fcntl import flock, LOCK_EX, LOCK_SH, LOCK_UN, LOCK_NB
from timebox.utils.datetime_utils import compress_time_delta_array, get_unit_data
from timebox.utils.numpy_utils import *
from timebox.utils.binary import determine_required_bytes_unsigned_integer, read_unsigned_int
from timebox.utils.pandas_utils import parse_pandas_dtype
from timebox.constants import *
from timebox.timebox_tag import TimeBoxTag, NUM_BYTES_PER_DEFINITION_WITHOUT_IDENTIFIER
from timebox.exceptions import *
MAX_WRITE_BLOCK_WAIT_SECONDS = 60
MAX_READ_BLOCK_WAIT_SECONDS = 30
class TimeBox:
def __init__(self, file_path=None):
self.file_path = file_path
self._timebox_version = 1
self._tag_names_are_strings = False
self._date_differentials_stored = True
self._num_points = 0
self._tags = {} # like { int|string tag_identifier : TimeBoxTag }
self._start_date = None
self._seconds_between_points = 0
self._bytes_per_date_differential = 0
self._date_differential_units = 0
self._date_differentials = None # numpy array
self._dates = None # numpy array of datetime64[s]
self._MAX_WRITE_BLOCK_WAIT_SECONDS = MAX_WRITE_BLOCK_WAIT_SECONDS
self._MAX_READ_BLOCK_WAIT_SECONDS = MAX_READ_BLOCK_WAIT_SECONDS
return
@classmethod
def save_pandas(cls, df: pd.DataFrame, file_path: str):
"""
Expects that the passing df has an index that is type Timestamp
or string which can be converted to Timestamp. All dtypes in pandas
data frame must be in the float/int/u-int family
:param df: pandas DataFrame
:param file_path: file path to save pandas DataFrame
:return: TimeBox object
"""
tb = TimeBox.from_pandas(df)
tb.file_path = file_path
try:
tb.write()
except DateUnitsError:
raise InvalidPandasIndexError('There was an error reading the date-time index on data frame')
return tb
@classmethod
def from_pandas(cls, df: pd.DataFrame):
"""
Expects that the passing df has an index that is type Timestamp
or string which can be converted to Timestamp. All dtypes in pandas
data frame must be in the float/int/u-int family
:param df: pandas DataFrame
:return: TimeBox object
"""
# make sure the pandas data frame is sorted on date
logging.debug('Before sorting: {}'.format(df.head()))
df = df.sort_index()
logging.debug('After sorting: {}'.format(df.head()))
tb = TimeBox()
tb._tag_names_are_strings = True
# ensure index is there and can be converted to numpy array of datetime64s
logging.debug('Datetime index dtype before and after:\n{}'.format(df.index.dtype))
tb._dates = df.index.values.astype(np.datetime64)
logging.debug('after: {}'.format(tb._dates.dtype))
tb._start_date = np.amin(tb._dates.astype(np.dtype('datetime64[s]')))
logging.debug('Min date: {}'.format(tb._start_date))
tb._date_differentials_stored = True
tb._num_points = tb._dates.size
# get column names and info
for c in df.columns:
type_info = parse_pandas_dtype(df[c].dtype)
tb._tags[c] = TimeBoxTag(c, type_info[0], type_info[1])
tb._tags[c].data = df[c].values
return tb
def to_pandas(self) -> pd.DataFrame:
"""
Populates a pandas data frame and returns it.
:return: Pandas DataFrame
"""
if len([t for t in self._tags if self._tags[t].data is None]) == 0:
self.read()
data = [(t, self._tags[t].data) for t in self._tags]
data.append(('DateTimes', self._dates))
df = pd.DataFrame.from_items(data)
return df.set_index('DateTimes')
def read(self):
"""
This function reads the entire file contents into memory.
Later it can be improved to only read certain tags/dates
:return: dictionary of results like {tag_identifier: numpy.array}
"""
with self._get_fcntl_lock('r') as handle:
try:
# read in the data
nb = self._read_file_info(handle)
logging.debug('Read num bytes in file info: {}'.format(nb))
if self._date_differentials_stored:
self._read_date_deltas(handle)
self._read_tag_data(handle)
finally:
# release shared lock
flock(handle, LOCK_UN)
return
def write(self):
"""
writes the file out to file_name.
requires an exclusive LOCK_EX fcntl lock.
blocks until it can get a lock
:return: void
"""
# put a file in the same directory to block new shared requests
# this prevents a popular file from blocking forever
# note, this is a blocking function as it waits for other write events to finish
file_is_new = not os.path.exists(self.file_path)
with self._get_fcntl_lock('w') as handle:
try:
# prepare datetime data
if self._date_differentials_stored:
self._calculate_date_differentials()
self._compress_date_differentials()
logging.debug('Writing file info')
num_bytes_in_file_info = self._write_file_info(handle)
logging.debug('Num bytes in file info: {}'.format(num_bytes_in_file_info))
if self._date_differentials_stored:
self._write_date_deltas(handle)
self._write_tag_data(handle)
except (InvalidPandasDataTypeError, InvalidPandasIndexError, DateDataError, DateUnitsError,
DateUnitsGranularityError, CompressionError, CompressionModeInvalidError) as e:
if file_is_new:
os.remove(self.file_path)
raise e
finally:
flock(handle, LOCK_UN) # release lock
block_file_name = self._blocking_file_name()
if os.path.exists(block_file_name):
os.remove(block_file_name)
return
def _update_required_bytes_for_tag_identifier(self):
"""
Looks at the tag list and determines what the max bytes required is
:return: void, updates class internals
"""
if self._tag_names_are_strings:
max_length = max([len(k) for k in self._tags])
self._num_bytes_for_tag_identifier = max_length * 4
else:
self._num_bytes_for_tag_identifier = determine_required_bytes_unsigned_integer(
max([k for k in self._tags])
)
return
def _unpack_options(self, from_int: int):
"""
Reads the options from the 1-byte options bit
:param from_int: int holding options
:return: void, populates class internals
"""
# starting with the right-most bits and working left
tag_name_result = (from_int >> TimeBoxOptionPositions.TAG_NAME_BIT_POSITION.value) & 1
self._tag_names_are_strings = True if tag_name_result else False
date_diff_result = (from_int >> TimeBoxOptionPositions.DATE_DIFFERENTIALS_STORED_POSITION.value) & 1
self._date_differentials_stored = True if date_diff_result else False
return
def _encode_options(self) -> int:
"""
Stores the bit-options in a 16-bit integer
:return: int, no more than 16 bits
"""
# note, this needs to be in the opposite order as _unpack_options
options = 0
options |= 1 if self._date_differentials_stored else 0
options <<= 1
options |= 1 if self._tag_names_are_strings else 0
return options
def _read_file_info(self, file_handle) -> int:
"""
Reads the file info from a file_handle. Populates file internals
:param file_handle: file handle object in 'rb' mode that is seeked to the correct position (0)
:return: int, seek bytes increased since file_handle was received
"""
self._timebox_version = read_unsigned_int(file_handle.read(1))
self._unpack_options(int(read_unsigned_int(file_handle.read(2))))
num_tags = read_unsigned_int(file_handle.read(1))
self._num_points = read_unsigned_int(file_handle.read(4))
self._num_bytes_for_tag_identifier = read_unsigned_int(file_handle.read(1))
bytes_seek = 1 + 2 + 1 + 4 + 1
# first 2 bytes are info on the tag
bytes_for_tag_def = num_tags * (self._num_bytes_for_tag_identifier+NUM_BYTES_PER_DEFINITION_WITHOUT_IDENTIFIER)
self._tags = TimeBoxTag.tag_definitions_from_bytes(
file_handle.read(bytes_for_tag_def),
self._num_bytes_for_tag_identifier,
self._tag_names_are_strings
)
bytes_seek += bytes_for_tag_def
self._start_date = np.fromfile(file_handle, dtype='datetime64[s]', count=1)[0]
bytes_seek += 8
if self._date_differentials_stored:
self._seconds_between_points = 0
self._bytes_per_date_differential = read_unsigned_int(file_handle.read(1))
stored_value_for_date_diff_units = read_unsigned_int(file_handle.read(2))
self._date_differential_units = get_date_utils_constant_from_stored_units_int(
stored_value_for_date_diff_units
)
bytes_seek += 3
else:
self._seconds_between_points = read_unsigned_int(file_handle.read(4))
self._bytes_per_date_differential = 0
self._date_differential_units = 0
bytes_seek += 4
return bytes_seek
def _write_file_info(self, file_handle) -> int:
"""
Writes out the file info to the file handle
:param file_handle: file handle object in 'wb' mode. pre-seeked to correct position (0)
:return: int, seek bytes advanced in this method
"""
np.array([np.uint8(self._timebox_version)], dtype=np.uint8).tofile(file_handle)
np.array([np.uint16(self._encode_options())], dtype=np.uint16).tofile(file_handle)
np.array([np.uint8(len(self._tags))], dtype=np.uint8).tofile(file_handle)
np.array([np.uint32(self._num_points)], dtype=np.uint32).tofile(file_handle)
self._update_required_bytes_for_tag_identifier()
np.array([np.uint8(self._num_bytes_for_tag_identifier)], dtype=np.uint8).tofile(file_handle)
bytes_seek = 1 + 2 + 1 + 4 + 1
sorted_tags = sorted([t for t in self._tags])
tags_to_bytes_result = TimeBoxTag.tag_list_to_bytes(
[self._tags[t] for t in sorted_tags],
self._num_bytes_for_tag_identifier,
self._tag_names_are_strings
)
file_handle.write(tags_to_bytes_result.byte_code)
bytes_seek += tags_to_bytes_result.num_bytes
np.array([np.datetime64(self._start_date, dtype='datetime64[s]')]).tofile(file_handle)
bytes_seek += 8
if self._date_differentials_stored:
np.array([np.uint8(self._bytes_per_date_differential)], dtype=np.uint8).tofile(file_handle)
int_to_store_date_diff_units = get_int_for_date_units_from_date_utils_constant(
self._date_differential_units
)
np.array([
|
np.uint16(int_to_store_date_diff_units)
|
numpy.uint16
|
###############################################################################
# apogee.spec.plot: various way to plot APOGEE spectra
###############################################################################
from functools import wraps
import numpy
from galpy.util import bovy_plot
from matplotlib import pyplot
import matplotlib.ticker as ticker
from matplotlib.ticker import NullFormatter
from matplotlib.backends.backend_pdf import PdfPages
import apogee.spec.window as apwindow
import apogee.tools.read as apread
from apogee.tools import air2vac, atomic_number,apStarWavegrid
_LAMBDASUB= 15000
_STARTENDSKIP= 30
# Good, clean Lines, mainly from Smith et al. (2013)
line_labels= {}
line_labels['fe']= r'$\mathrm{Fe\kern 0.1em I}$'
line_labels['mg']= r'$\mathrm{Mg\kern 0.1em I}$'
line_labels['al']= r'$\mathrm{Al\kern 0.1em I}$'
line_labels['si']= r'$\mathrm{Si\kern 0.1em I}$'
line_labels['k']= r'$\mathrm{K\kern 0.1em I}$'
line_labels['ca']= r'$\mathrm{Ca\kern 0.1em I}$'
line_labels['ti']= r'$\mathrm{Ti\kern 0.1em I}$'
line_labels['cr']= r'$\mathrm{Cr\kern 0.1em I}$'
line_labels['ni']= r'$\mathrm{Ni\kern 0.1em I}$'
line_labels['na']= r'$\mathrm{Na\kern 0.1em I}$'
line_labels['mn']= r'$\mathrm{Mn\kern 0.1em I}$'
line_labels['s']= r'$\mathrm{S\kern 0.1em I}$'
line_labels['v']= r'$\mathrm{V\kern 0.1em I}$'
line_labels['cob']= r'$\mathrm{Co\kern 0.1em I}$'
line_labels['cu']= r'$\mathrm{Cu\kern 0.1em I}$'
line_labels['oh']= r'$\mathrm{OH}$'
line_labels['co']= r'$^{12}\!\mathrm{CO}$'
line_labels['cn']= r'$\mathrm{CN}$'
line_labels['13co']= r'$^{13}\!\mathrm{CO}$'
line_labels['hbrpi']= r'$\mathrm{Br-}\pi$'
line_labels['hbrla']= r'$\mathrm{Br-}\lambda$'
line_labels['hbr']= r'$\mathrm{H[Br]}$'
line_labels['dib']= r'$\mathrm{DIB}$'
# From Table 2 in Smith et al. (2013)
_FEI_lines= [air2vac(l) for l in [15194.492,15207.526,15395.718,15490.339,
15648.510,15964.867,16040.657,16153.247,
16165.032]]
_FEI_lines.append(16697.635) # one more from Shetrone
# From Table 5
_MGI_lines= [air2vac(l) for l in [15740.716,15748.9,15765.8,15879.5,
15886.2,15954.477]]
_ALI_lines= [air2vac(l) for l in [16718.957,16750.564286,16763.359]]
_SII_lines= [air2vac(l) for l in [15361.161,15376.831,15833.602,15960.063,
16060.009,16094.787,16215.670,16680.770,
16828.159]]
_KI_lines= [air2vac(l) for l in [15163.067,15168.376]]
_CAI_lines= [air2vac(l) for l in [16136.823,16150.763,16155.236,16157.364]]
_TII_lines= [air2vac(l) for l in [15543.756,15602.842,15698.979,15715.573,
16635.161]]
_VI_lines= [air2vac(15925.)]
_CRI_lines= [air2vac(l) for l in [15680.063,15860.214]]
_MNI_lines= [air2vac(l) for l in [15159.,15217.85,15262.4]]
_COI_lines= [air2vac(16757.7)]
_NII_lines= [air2vac(l) for l in [15605.680,16584.439,16589.295,
16673.711,16815.471,16818.760]]
_CUI_lines= [air2vac(16005.7)]
# From <NAME>
_NAI_lines= [air2vac(16388.85)]
# From <NAME>
_SI_lines= [15406.540,15426.490,15474.043,15482.712]
# From Table 4 in Smith et al. (2013), with a few tweaks
_OH_lines= [air2vac(l) for l in [15279.5,15391.,15505.5,15570.5]]
_CO_lines= [air2vac(l) for l in [15582.,15780.5,15988.,16189.5]]
_CN_lines= [air2vac(l) for l in [15260.,15321.,15397.,15332.,15410.8,
15447.,15466.,15472.,15482.]]
_13CO_lines= [air2vac(l) for l in [16122.5,16743.5]]
#The hydrogen bracket series
_HBRPI_lines= [15196.005]
_HBRLA_lines= [15704.960]
_HBR_lines= [15004.970,15043.157,15086.906,15137.367,15264.717,
15345.992,15443.148,15560.708,15884.888,16113.721,
16411.681,16811.117]
def specPlotInputDecorator(func):
"""Decorator to parse input to spectral plotting"""
@wraps(func)
def input_wrapper(*args,**kwargs):
if len(args) >= 2 and isinstance(args[0],(list,numpy.ndarray)) \
and isinstance(args[1],(list,numpy.ndarray)):
# wavelength, spectrum
return func(args[0],args[1],*args[2:],**kwargs)
elif len(args) >= 1 and isinstance(args[0],(list,numpy.ndarray)):
# spectrum on standard re-sampled wavelength grid
lam=apStarWavegrid()
if len(args[0]) == 7214: # Input is on ASPCAP grid
spec= numpy.zeros(len(lam))
spec[322:3242]= args[0][:2920]
spec[3648:6048]= args[0][2920:5320]
spec[6412:8306]= args[0][5320:]
else:
spec= args[0]
return func(lam,spec,*args[1:],**kwargs)
elif isinstance(args[0],(int,numpy.short,str)) \
and isinstance(args[1],str):
# location ID and APOGEE ID (loc ID can be string for 1m sample)
if kwargs.get('apStar',False):
spec, hdr= apread.apStar(args[0],args[1],header=True,
ext=kwargs.pop('ext',1))
spec= spec[numpy.amin([kwargs.pop('apStarIndx',1),
len(spec)-1])]
else: #aspcapStar
spec, hdr= apread.aspcapStar(args[0],args[1],header=True,
ext=kwargs.pop('ext',1))
lam= 10.**numpy.arange(hdr['CRVAL1'],
hdr['CRVAL1']+len(spec)*hdr['CDELT1'],
hdr['CDELT1'])
return func(lam,spec,*args[2:],**kwargs)
return input_wrapper
@specPlotInputDecorator
def waveregions(*args,**kwargs):
"""
NAME:
waveregions
PURPOSE:
plot selected regions of the spectrum in one row
INPUT:
Either:
(a) wavelength, spectrum (\AA,spectrum units)
(b) spectrum (assumed on standard APOGEE re-sampled wavelength grid)
(c) location ID, APOGEE ID (default loads aspcapStar, loads extension ext(=1); apStar=True loads apStar spectrum)
KEYWORDS:
File loading:
ext= (1) extension to load
apStar= (False) if True, load the apStar spectrum
apStarIndx= (1) index in the apStar spectrum to load
Chunks position:
startlams, endlams= start and end wavelength in \AA of the various chunks (takes precedence over startindxs, endindxs)
startindxs, endindxs= star and end index in the wavelength array of the various chunks
Plotting-specific keywords
labelLines= (True) label some lines
noMolecLines= (False) don't label the molecules
cleanZero= (True) replace <= zero entries with NaN
labelID= A string ID that will be placed in the top-left corner
labelTeff, labellogg, labelmetals, labelafe= parameter labels that will be placed in the top-right corner
noxlabel= (False) if True, don't label the x axis
pyplot.plot args and kwargs
OUTPUT:
plot to output
The final axes allow one to put additional labels on the plot, e.g., for adding the APOGEE ID:
bovy_plot.bovy_text(r'$\mathrm{%s}$' % '2M02420597+0837017',top_left=True)
Note that an ID (e.g., the apogee ID) and Teff, logg, metallicity, and alpha-enhancement labels can be added using the keywords label* above
HISTORY:
2015-01-18 - Written (based on older code) - Bovy (IAS)
"""
# Grab non-pyplot.plot kwargs
apStar= kwargs.pop('apStar',False)
labelLines= kwargs.pop('labelLines',not 'overplot' in kwargs)
noMolecLines= kwargs.pop('noMolecLines',False)
cleanZero= kwargs.pop('cleanZero',True)
noxticks= kwargs.pop('_noxticks',False)
noxlabel= kwargs.pop('noxlabel',False)
noskipdiags= kwargs.pop('_noskipdiags',False)
labelwav= kwargs.pop('_labelwav',False)
plotw= kwargs.pop('_plotw',None)
markLines= kwargs.pop('markLines',False)
markwav= kwargs.pop('_markwav',None)
# Labels
labelID= kwargs.pop('labelID',None)
labelTeff= kwargs.pop('labelTeff',None)
labellogg= kwargs.pop('labellogg',None)
labelmetals= kwargs.pop('labelmetals',None)
labelafe= kwargs.pop('labelafe',None)
# Clean bad lines
if cleanZero:
args[1][args[1] <= 0.]= numpy.nan
# Chunk parameters
if 'startlams' in kwargs:
# Turn startlams into a startindxs and similar for endlams
startlams= kwargs.pop('startlams')
endlams= kwargs.pop('endlams')
startindxs= []
endindxs= []
for ii in range(len(startlams)):
startindxs.append(numpy.argmin(numpy.fabs(startlams[ii]-args[0])))
endindxs.append(numpy.argmin(numpy.fabs(endlams[ii]-args[0])))
else:
startindxs= kwargs.pop('startindxs',
[322,1794,2707,3850,4740,5820,7185])
endindxs= kwargs.pop('endindxs',
[590,1940,2857,4025,5070,5955,7400])
nregions= len(startindxs)
# Calculate the width of the plot
dx= numpy.array([args[0][numpy.amin([len(args[0])-1,endindxs[ii]])]\
-args[0][numpy.amax([0,startindxs[ii]-1])] \
for ii in range(nregions)],
dtype='float')
# Adjust 0 (and -1) to start (end) a little further
startendskip= kwargs.pop('_startendskip',_STARTENDSKIP)
dx[0]= args[0][numpy.amin([len(args[0])-1,endindxs[0]])]\
-args[0][numpy.amax([0,startindxs[0]-startendskip])]
dx[-1]= args[0][numpy.amin([len(args[0])-1,endindxs[-1]+startendskip])]\
-args[0][numpy.amax([0,startindxs[-1]-1])]
if nregions == 1: #special case
dx= [args[0][numpy.amin([len(args[0])-1,endindxs[0]+startendskip])]\
-args[0][numpy.amax([0,startindxs[0]-startendskip])]]
# Determine a good step for the tickmarks
tickStepTmp= numpy.log10(numpy.sum(dx)/10.) % 1
if tickStepTmp > numpy.log10(1.5) and tickStepTmp < numpy.log10(3.5):
tickStep= 2.*10.**int(numpy.log10(numpy.sum(dx)/10.))
elif tickStepTmp > numpy.log10(3.5) and tickStepTmp < numpy.log10(7.5):
tickStep= 5.*10.**int(numpy.log10(numpy.sum(dx)/10.))
else:
tickStep= 10.**int(numpy.log10(numpy.sum(dx)/10.))
dx/= numpy.sum(dx)
if noxticks:
totdx= 0.825
else:
totdx= 0.85
skipdx= kwargs.pop('skipdx',0.015)
dx*= (totdx-(nregions-1)*skipdx)
# Setup plot
overplot= kwargs.pop('overplot',False)
if not overplot:
bovy_plot.bovy_print(fig_width=kwargs.pop('fig_width',8.4),
fig_height=kwargs.pop('fig_height',2.5),
axes_labelsize=16,text_fontsize=14,
legend_fontsize=12,
xtick_labelsize=12,ytick_labelsize=12)
pyplot.figure()
if overplot:
yrange= numpy.array(pyplot.gca().get_ylim())
kwargs.pop('yrange',None) # pop if there
elif apStar:
yrange= kwargs.pop('yrange',[0.,1.1*numpy.nanmax(args[1])])
else:
yrange= kwargs.pop('yrange',[0.2,1.2])
# Deal with the label
if apStar:
ylabel= kwargs.pop('ylabel',r'$f_\lambda(\lambda)\,(10^{-17}\,\mathrm{erg\, s}^{-1}\,\mathrm{cm}^{-2}\,\AA^{-1})$')
else:
ylabel= kwargs.pop('ylabel',r'$f/f_c(\lambda)$')
kwargs['zorder']= kwargs.get('zorder',10)
for ii in range(nregions):
# Setup the axes
if ii == 0:
left, bottom, width, height= 0.1+(0.85-totdx)*2., 0.125, dx[ii],0.8
else:
left, bottom, width, height= 0.1+(0.85-totdx)*2.+numpy.cumsum(dx)[ii-1]+skipdx*ii,\
0.125, dx[ii], 0.8
thisax= pyplot.axes([left,bottom,width,height])
fig= pyplot.gcf()
fig.sca(thisax)
startindx, endindx= startindxs[ii], endindxs[ii]
if ii == 0 and nregions == 1:
xrange=[args[0][numpy.amax([0,startindx-startendskip])]-_LAMBDASUB,
args[0][numpy.amin([len(args[0])-1,endindx+startendskip])]-_LAMBDASUB]
elif ii == 0:
xrange=[args[0][numpy.amax([0,startindx-startendskip])]-_LAMBDASUB,
args[0][numpy.amin([len(args[0])-1,endindx])]-_LAMBDASUB]
elif ii == (nregions-1):
xrange=[args[0][numpy.amax([0,startindx-1])]-_LAMBDASUB,
args[0][numpy.amin([len(args[0])-1,endindx+startendskip])]-_LAMBDASUB]
else:
xrange=[args[0][numpy.amax([0,startindx-1])]-_LAMBDASUB,
args[0][numpy.amin([len(args[0])-1,endindx])]-_LAMBDASUB]
thisax.plot(args[0][startindx:endindx]-_LAMBDASUB,
args[1][startindx:endindx],
*args[2:],**kwargs)
if not plotw is None:
thisax.plot(args[0][startindx:endindx]-_LAMBDASUB,
plotw[startindx:endindx],
'-',lw=2.,color='0.65',zorder=1)
thisax.set_xlim(xrange[0],xrange[1])
thisax.set_ylim(yrange[0],yrange[1])
if noxticks:
nullfmtx= NullFormatter() # no labels, assume 1\AA
thisax.xaxis.set_major_formatter(nullfmtx)
thisax.xaxis.set_major_locator(ticker.MultipleLocator(2.))
else:
thisax.xaxis.set_major_locator(ticker.MultipleLocator(tickStep))
bovy_plot._add_ticks(xticks=True-noxticks)
if ii > 0:
nullfmt = NullFormatter() # no labels
thisax.yaxis.set_major_formatter(nullfmt)
elif not overplot:
pyplot.ylabel(ylabel)
# Remove spines between different wavelength regions
if ii == 0 and not nregions == 1:
thisax.spines['right'].set_visible(False)
thisax.tick_params(right=False,which='both')
elif ii == (nregions-1) and not nregions == 1:
thisax.spines['left'].set_visible(False)
thisax.tick_params(labelleft='off')
thisax.tick_params(left=False,which='both')
elif not nregions == 1:
thisax.spines['left'].set_visible(False)
thisax.spines['right'].set_visible(False)
thisax.tick_params(labelleft='off')
thisax.tick_params(left=False,which='both')
thisax.tick_params(right=False,which='both')
# Plot cut-out markers
cutOutkwargs = dict(transform=thisax.transAxes,color='k',
clip_on=False)
if not noskipdiags:
d = .015 # how big to make the diagonal lines in axes coordinates
slope= 1./(dx[ii]+0.2*skipdx)/3.
if ii == 0 and not nregions == 1:
thisax.plot((1-slope*d,1+slope*d),(-d,+d), **cutOutkwargs)
thisax.plot((1-slope*d,1+slope*d),(1-d,1+d), **cutOutkwargs)
elif ii == (nregions-1) and not nregions == 1:
thisax.plot((-slope*d,+slope*d),(-d,+d), **cutOutkwargs)
thisax.plot((-slope*d,+slope*d),(1-d,1+d), **cutOutkwargs)
elif not nregions == 1:
thisax.plot((1-slope*d,1+slope*d),(-d,+d), **cutOutkwargs)
thisax.plot((1-slope*d,1+slope*d),(1-d,1+d), **cutOutkwargs)
thisax.plot((-slope*d,+slope*d),(-d,+d), **cutOutkwargs)
thisax.plot((-slope*d,+slope*d),(1-d,1+d), **cutOutkwargs)
else: #plot gray bands
cutOutkwargs['color']= '0.5'
thisax.fill_between((1.,1.+skipdx),(0.,0.),(1.,1.),**cutOutkwargs)
# Label the lines
if labelLines:
_label_all_lines(args[0][startindx],args[0][endindx],
thisax,args[0],args[1],noMolecLines)
# Mark the lines
if markLines:
_mark_lines(markwav,args[0][startindx],args[0][endindx],
thisax,args[0],args[1])
# Label the largest round wavelength in angstrom for windows
if labelwav:
bovy_plot.bovy_text(2*numpy.floor((xrange[1]-(nregions > 15))/2.),
yrange[0]+0.05*(yrange[1]-yrange[0]),
r'$\lambda\kern 0.1em%i,%03i$' % (15+int(numpy.floor(xrange[1]/1000.)),
int(2.*numpy.floor((xrange[1]-(nregions > 15))/2.) % 1000.)),
horizontalalignment='center',
verticalalignment='bottom',
rotation='vertical',fontsize=10.)
# Add the x-axis label
if not nregions == 1:
thisax= pyplot.axes([0.1+(0.85-totdx)*2.,0.125,totdx,0.8])
pyplot.gcf().sca(thisax)
thisax.set_ylim(yrange[0],yrange[1])
thisax.spines['left'].set_visible(False)
thisax.spines['right'].set_visible(False)
thisax.spines['bottom'].set_visible(False)
thisax.spines['top'].set_visible(False)
thisax.tick_params(labelleft='off')
thisax.tick_params(left=False,which='both')
thisax.tick_params(right=False,which='both')
thisax.tick_params(labelbottom='off')
thisax.tick_params(bottom=False,which='both')
thisax.tick_params(top=False,which='both')
if not overplot and not noxticks and not noxlabel:
thisax.set_xlabel(r'$\lambda-%i,000\,(\AA)$' % (int(_LAMBDASUB/1000.)),
labelpad=10-(nregions == 1)*10)
elif not overplot and noxticks and not noxlabel:
thisax.set_xlabel(r'$\lambda\,(\AA)$',
labelpad=3-(nregions == 1)*3)
if not nregions == 1:
thisax.set_zorder(-1)
# Start another axis object for later labeling
thisax= pyplot.axes([0.1+(0.85-totdx)*2.,0.125,totdx,0.8])
pyplot.gcf().sca(thisax)
thisax.patch.set_facecolor('None')
thisax.set_zorder(10)
# Labels
if not labelID is None:
bovy_plot.bovy_text(r'$\mathrm{%s}$' % labelID,
top_left=True,fontsize=10)
if not labelTeff is None or not labellogg is None \
or not labelmetals is None or not labelafe is None:
nParamLabels= int(not labelTeff is None)\
+int(not labellogg is None)\
+int(not labelmetals is None)\
+int(not labelafe is None)
# Label parameters
paramStr= ''
if not labelTeff is None:
paramStr+= r'T_\mathrm{eff}= %i\,\mathrm{K}' % (int(labelTeff))
nParamLabels-= 1
if nParamLabels > 0:
paramStr+= ',\ '
if not labellogg is None:
paramStr+= r'\log g= %.1f' % labellogg
nParamLabels-= 1
if nParamLabels > 0:
paramStr+= ',\ '
if not labelmetals is None:
paramStr+= r'[\mathrm{M/H}]= %.2f' % labelmetals
nParamLabels-= 1
if nParamLabels > 0:
paramStr+= ',\ '
if not labelafe is None:
paramStr+= r'[\alpha/\mathrm{M}]= %.2f' % labelafe
nParamLabels-= 1
if nParamLabels > 0:
paramStr+= ',\ '
bovy_plot.bovy_text(r'$%s$' % paramStr,top_right=True,fontsize=10)
return None
@specPlotInputDecorator
def detector(*args,**kwargs):
"""
NAME:
detector
PURPOSE:
plot the spectrum from one of the detectors
INPUT:
Either:
(a) wavelength, spectrum (\AA,spectrum units)
(b) spectrum (assumed on standard APOGEE re-sampled wavelength grid)
(c) location ID, APOGEE ID (default loads aspcapStar, loads extension ext(=1); apStar=True loads apStar spectrum)
+'blue', 'green', 'red' to pick the detector
KEYWORDS:
apogee.spec.plot.waveregions keywords
OUTPUT:
plot to output
HISTORY:
2015-01-19 - Written - Bovy (IAS)
"""
plotArgsStart= 3
if len(args) > 2 and args[2].lower() == 'green':
startindxs= [3505]
endindxs= [6150]
elif len(args) > 2 and args[2].lower() == 'red':
startindxs= [6282]
endindxs= [8404]
elif len(args) > 2 and args[2].lower() == 'blue':
startindxs= [188]
endindxs= [3322]
else: #default: blue
startindxs= [188]
endindxs= [3322]
plotArgsStart= 2
return waveregions(args[0],args[1],startindxs=startindxs,endindxs=endindxs,
*args[plotArgsStart:],**kwargs)
@specPlotInputDecorator
def windows(*args,**kwargs):
"""
NAME:
windows
PURPOSE:
plot the spectral windows for a given element
INPUT:
Either:
(a) wavelength, spectrum (\AA,spectrum units)
(b) spectrum (assumed on standard APOGEE re-sampled wavelength grid)
(c) location ID, APOGEE ID (default loads aspcapStar, loads extension ext(=1); apStar=True loads apStar spectrum)
+element string (e.g., 'Al'); Adding 1 and 2 splits the windows into two
KEYWORDS:
plot_weights= (False) if True, also plot the weights for the windows (assumes that the spectrum is on the apStarWavegrid)
markLines= mark the location of 'lines' (see apogee.spec.window.lines)
apogee.spec.plot.waveregions keywords
OUTPUT:
plot to output
The final axes allow one to put additional labels on the plot, e.g., for adding the APOGEE ID:
bovy_plot.bovy_text(r'$\mathrm{%s}$' % '2M02420597+0837017',top_left=True)
Note that an ID (e.g., the apogee ID) and Teff, logg, metallicity, and alpha-enhancement labels can be added using the keywords label* above
HISTORY:
2015-01-26 - Written (based on older code) - Bovy (IAS)
"""
pad= kwargs.pop('pad',3)
try:
si,ei= apwindow.waveregions(args[2],pad=pad,asIndex=True)
except IOError:
try:
si, ei= apwindow.waveregions(args[2][:-1],pad=pad,asIndex=True)
except IOError:
raise IOError("Windows for element %s could not be loaded, please specify an existing APOGEE element" % ((args[2].lower().capitalize())))
if args[2][-1] == '1':
si= si[:len(si)//2]
ei= ei[:len(ei)//2]
else:
si= si[len(si)//2:]
ei= ei[len(ei)//2:]
# Remove the number from the element
newargs= (args[0],args[1],args[2][:-1])
for ii in range(len(args)-3):
newargs= newargs+(args[ii+3],)
args= newargs
# Also get the number and total width of all of the windows
dlam= apwindow.total_dlambda(args[2],pad=pad)
numw= apwindow.num(args[2])
# Set spacing between windows
if numw > 20:
kwargs['skipdx']= 0.003
kwargs['_noskipdiags']= True
elif numw > 15:
kwargs['skipdx']= 0.01
# Set initial space to zero
kwargs['_startendskip']= 0
# Set initial figure width
if not kwargs.get('overplot',False) and not 'fig_width' in kwargs:
if dlam > 150.:
kwargs['fig_width']= 8.4
else:
kwargs['fig_width']= 4.2
# Don't tick x
kwargs['_noxticks']= True
# Label the largest wavelength in angstrom
kwargs['_labelwav']= True
# Don't label the lines unless explicitly asked for
kwargs['labelLines']= kwargs.get('labelLines',False)
# Plot the weights as well
if kwargs.pop('plot_weights',False):
kwargs['_plotw']= apwindow.read(args[2],apStarWavegrid=True)
if kwargs.get('apStar',False):
kwargs['yrange']= kwargs.get('yrange',
[0.,1.1*
|
numpy.nanmax(args[1])
|
numpy.nanmax
|
import numpy as np
import os
from highlevel_planning_py.tools.util import rotate_orient, ObjectInfo
from highlevel_planning_py.sim.scene_base import SceneBase
from highlevel_planning_py.sim.cupboard import get_cupboard_info
from scipy.spatial.transform import Rotation as R
class ScenePlanning1(SceneBase):
def __init__(self, world, base_dir, restored_objects=None):
SceneBase.__init__(self, world, base_dir, restored_objects)
if restored_objects is None:
self.objects["table"] = ObjectInfo(
urdf_name_="table/table.urdf",
urdf_path_=os.path.join(base_dir, "table/table.urdf"),
init_pos_=np.array([3.0, 0.0, 0.0]),
init_orient_=np.array([0.0, 0.0, 0.0, 1.0]),
)
self.objects["cube1"] = ObjectInfo(
urdf_path_="cube_small.urdf",
init_pos_=
|
np.array([2.5, 0.0, 0.7])
|
numpy.array
|
# from __future__ import print_function
import numpy as np
import pickle as pkl
import csv
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
from scipy import spatial
import os
import math
from collections import defaultdict
def load_station_list(filename):
node_list = []
with open(filename, "r") as f:
lines = f.readlines()
line = lines[0] # the commonts
print("{}".format(lines[0]), end='')
line = lines[1] #
node_list = [int(x) for x in line.strip().split(",")]
# print(node_list)
return node_list
def load_station_district(filename):
# load station
print("load_station_district")
station_with_district = defaultdict(list)
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count=0
for row in csv_reader:
# if line_count >= 1:
# break
node_id = int(row[1])
district = int(row[2])
assert district==7, "ERROR not district 7"
def load_meta_data(filename):
node_with_attribute = defaultdict(dict)
node_list = []
latitude_list = []
longitude_list = []
freeway_list = []
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
line_count = 0
for row in csv_reader:
assert len(row)==18, "{}, {}".format(line_count, row)
if line_count == 0:
line_count += 1
print("first line: {}".format(row))
continue # the first line is title
node_id = int(row[0])
try:
freeway_id = int(row[1])
latitude = float(row[8])
longitude = float(row[9])
direction = row[2]
abs_pm = float(row[7])
node_type = row[11]
node_name = row[13]
except ValueError as e:
line_count += 1
continue
if node_type == "FF":
name_list = node_name.strip().split()
legal_str = {'E', 'W', 'N', "S"}
if len(name_list)==5 and str.isdigit(name_list[1]) and str.isdigit(name_list[4]) and name_list[0][0] in legal_str and name_list[3][0] in legal_str:
None
else:
node_type = "FFFalse"
node_with_attribute[node_id]['freeway_id'] = freeway_id
node_with_attribute[node_id]['latitude'] = latitude
node_with_attribute[node_id]['longitude']= longitude
node_with_attribute[node_id]['direction'] = direction
node_with_attribute[node_id]['abs_pm'] = abs_pm
node_with_attribute[node_id]['type'] = node_type
node_with_attribute[node_id]['name'] = node_name
# freeway_list.append(freeway_id)
# node_list.append(node_id)
# latitude_list.append(latitude)
# longitude_list.append(longitude)
line_count += 1
return node_with_attribute
def construct_graph(node_list, node_with_attribute):
print("node_list length: {}".format(len(node_list)))
print(len(node_with_attribute))
## construct the nodes
freeway_with_station = defaultdict(dict)
node_list_new = []
node_with_pm = defaultdict(float)
for node_id in node_list:
print(node_id)
if node_id not in node_with_attribute:
continue
node_list_new.append(node_id)
freeway_id = node_with_attribute[node_id]['freeway_id']
direction = node_with_attribute[node_id]['direction']
if direction in freeway_with_station[freeway_id]:
freeway_with_station[freeway_id][direction].append(node_id)
else:
freeway_with_station[freeway_id][direction] = []
freeway_with_station[freeway_id][direction].append(node_id)
abs_pm = node_with_attribute[node_id]['abs_pm']
node_with_pm[node_id] = abs_pm
node_name_to_id = defaultdict(int)
node_id_to_name = defaultdict(int)
for index, value in enumerate(node_list_new):
node_name_to_id[value] = index
node_id_to_name[index] = value
print("len(node_list_new): {}".format(len(node_list_new)))
G = nx.Graph()
## the nodes on the same road
for freeway_id in freeway_with_station:
for direction in freeway_with_station[freeway_id]:
node_list = freeway_with_station[freeway_id][direction]
pm_list = []
for node_id in node_list:
pm = node_with_pm[node_id]
pm_list.append(pm)
index_list = sorted(range(len(pm_list)), key=lambda k: pm_list[k])
node_list = np.array(node_list)
node_list = node_list[index_list]
for i in range(len(node_list) - 1):
node1 = node_list[i]
node2 = node_list[i+1]
node1 = node_name_to_id[node1]
node2 = node_name_to_id[node2]
G.add_edge(node1, node2)
## the edge at crossing
for node_id in node_list_new:
latitude = node_with_attribute[node_id]['latitude']
longitude = node_with_attribute[node_id]['longitude']
G.node[node_name_to_id[node_id]]['pos'] = (longitude, latitude)
freeway_with_stationPos = defaultdict(dict)
kdTree = defaultdict(dict)
for freeway_id in freeway_with_station:
for direction in freeway_with_station[freeway_id]:
freeway_with_stationPos[freeway_id][direction] = []
node_list = freeway_with_station[freeway_id][direction]
for node_id in node_list:
latitude = node_with_attribute[node_id]['latitude']
longitude = node_with_attribute[node_id]['longitude']
freeway_with_stationPos[freeway_id][direction].append([latitude, longitude])
kdTree[freeway_id][direction] = spatial.KDTree(freeway_with_stationPos[freeway_id][direction] )
print("HERE")
for node_id in node_list_new:
node_type = node_with_attribute[node_id]['type']
if node_type == "FF":
node_name = node_with_attribute[node_id]['name']
node_name_list = node_name.strip().split()
if len(node_name_list)==5:
print(node_name_list)
first_freeway = int(node_name_list[1])
second_freeway = int(node_name_list[4])
if first_freeway in freeway_with_stationPos and second_freeway in freeway_with_stationPos:
dir_1 = node_name_list[0][0]
dir_2 = node_name_list[3][0]
if dir_1 in freeway_with_stationPos[first_freeway] and dir_2 in freeway_with_stationPos[second_freeway]:
latitude = node_with_attribute[node_id]['latitude']
longitude = node_with_attribute[node_id]['longitude']
pts = [latitude, longitude]
if node_with_attribute[node_id]['freeway_id'] == first_freeway:
dist, ind = kdTree[second_freeway][dir_2].query(pts)
node_2_id = freeway_with_station[second_freeway][dir_2][ind]
# print("node_1_id: {}, ind:{}, dist:{}".format(node_id, ind, dist) )
if dist < 0.01:
print("node_1_id: {}, ind:{}, dist:{}".format(node_id, ind, dist) )
G.add_edge(node_name_to_id[node_id], node_name_to_id[node_2_id])
else:
# print(kdTree[first_freeway][dir_1])
dist, ind = kdTree[first_freeway][dir_1].query(pts)
node_1_id = freeway_with_station[first_freeway][dir_1][ind]
# print("node_2_id: {}, ind:{}, dist: {}".format(node_id, ind, dist))
if dist < 0.01:
print("node_2_id: {}, ind:{}, dist: {}".format(node_id, ind, dist))
G.add_edge(node_name_to_id[node_1_id], node_name_to_id[node_id])
return G, node_id_to_name, node_name_to_id
def generate_feature(node_list, node_name_to_id, station_profile):
# t = '06/01/2018 00:00:00'
# day = t.split()[0]
# day = day.split('/')
# print(day)
begin_time = np.datetime64('2018-06-01 00:00:00')
end_time = np.datetime64('2018-08-29 23:00:00')
num_time_steps = (end_time - begin_time) / np.timedelta64(1,'h') + 1
num_time_steps = int(num_time_steps)
print("num_time_steps: {}".format(num_time_steps))
num_nodes = len(node_list)
num_features = 2 # speed and
input_feature = np.zeros((num_time_steps, num_nodes, num_features))
node_feature_omits = np.zeros((num_nodes, num_features)) ## helper
node_feature_total = np.zeros((num_nodes, num_features))
with open(station_profile) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count % 10000 == 0:
print("process: {}".format(line_count / 3490560.0))
node_name = int(row[1])
if node_name not in node_name_to_id:
line_count += 1
continue
day = row[0].split()[0].split("/")
day_new = day[2] + '-' + day[0] + '-' + day[1]
time = day_new + " " + row[0].split()[1]
this_time = np.datetime64(time)
if this_time >= begin_time and this_time <= end_time:
assert len(row) >= 12, "line_count: {}, row: {}".format(line_count, row)
time_id = (this_time - begin_time) / np.timedelta64(1, "h")
# http://pems.dot.ca.gov/?dnode=Clearinghouse&type=station_hour&district_id=7&submit=Submit
# https://stackoverflow.com/questions/9573244/most-elegant-way-to-check-if-the-string-is-empty-in-python
time_id = int(time_id)
# node_name = int(row[1])
samples = row[7]
observed = row[8]
total_flow = row[9]
ave_occupancy = row[10]
ave_speed = row[11]
if node_name not in node_name_to_id:
line_count += 1
continue
node_id = node_name_to_id[node_name]
# input_feature[time_id, node_id, 0]: average speed
if ave_speed != '':
input_feature[time_id, node_id, 0] = float(ave_speed)
else:
node_feature_omits[node_id, 0] += 1
node_feature_total[node_id, 0] += 1
# input_feature[time_id, node_id, 1]: average occupancy
if ave_occupancy != '':
input_feature[time_id, node_id, 1] = float(ave_occupancy)
else:
node_feature_omits[node_id, 1] += 1
node_feature_total[node_id, 1] += 1
line_count += 1
pkl.dump(input_feature, open("input_feature.pkl", 'wb'), protocol=2)
def generate_label(node_list, node_name_to_id, station_profile, path_list, time_offset = np.timedelta64(24, 'h')):
print("path_list length: {}".format(len(path_list)))
print(path_list)
begin_time = np.datetime64('2018-06-01 00:00:00')
end_time = np.datetime64('2018-08-29 23:00:00')
num_time_steps = (end_time - begin_time) / np.timedelta64(1,'h') + 1
num_time_steps = int(num_time_steps)
num_nodes = len(node_list)
num_paths = len(path_list)
node_label = np.zeros((num_time_steps, num_nodes))
path_label = np.zeros((num_time_steps, num_paths))
map_node_to_path = defaultdict(list) #map node id to path id
for path_id in range(len(path_list)):
for v in path_list[path_id]:
map_node_to_path[v].append(path_id)
with open(station_profile) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count % 10000 == 0:
print("process: {}".format(line_count / 3490560.0))
node_name = int(row[1])
if node_name not in node_name_to_id:
line_count += 1
continue
# print("row: {}, [12]: {}".format(row, row[12]))
node_id = node_name_to_id[node_name]
day = row[0] .split()[0].split('/')
day_new = day[2] + '-' + day[0] + '-' + day[1]
time = day_new + " " + row[0].split()[1]
this_time = np.datetime64(time)
if this_time >= begin_time + time_offset and this_time <= end_time + time_offset:
time_id = time_id = (this_time - time_offset - begin_time) / np.timedelta64(1, "h")
time_id = math.floor(time_id)
if row[12] != '':
delay35 = float(row[12])
if delay35 > 0:
node_label[time_id, node_id] += 1
line_count += 1
# path label
for time_id in range(num_time_steps):
for path_id in range(num_paths):
node_list = path_list[path_id]
num_nodes = len(node_list)
# two nodes
for node1_id in range(num_nodes - 1 - 1):
node1 = node_list[node1_id]
node2 = node_list[node1_id + 1]
node3 = node_list[node1_id + 2]
if node_label[time_id, node1] > 0 and node_label[time_id, node2] > 0 and node_label[time_id, node3] > 0:
path_label[time_id, path_id] = 1
break;
pkl.dump(path_label, open("path_label.pkl", 'wb'), protocol=2)
def sample_path(G, path_num=200):
assert nx.is_connected(G), "not connected"
node_list = G.nodes()
path_list = []
loop = 200
count = 0
path_len_threshold = 50
while count < loop:
print(count)
[node1, node2] = np.random.choice(node_list, 2, replace=False)
shortest_path = nx.shortest_path(G, source=node1, target=node2)
if len(shortest_path) < path_len_threshold:
print("len: {}".format(len(shortest_path)))
path_list.append(shortest_path)
count += 1
else:
continue
path_dict = {}
for i in range(len(path_list)):
path_dict[i] = path_list[i]
# pkl.dump(path_list, open("path_list.pkl", "wb"), protocol=2)
pkl.dump(path_dict, open("path_dict.pkl", "wb"), protocol=2)
return path_list, path_dict
if __name__ == '__main__':
|
np.random.seed(2)
|
numpy.random.seed
|
import numpy as np
from utils import zitler_dominates, normalize_score
def get_pareto_front(cv_results, metrics):
cv_names = [ "mean_test_" + metric.name for metric in metrics ]
cv_metrics = [ cv_results[ name ] for name in cv_names ]
pareto_front = []
# Now, search for the pareto front
# 1) No solution in the front is strictly better than any other
# 2) Solutions that are strictly worse are removed
# We do this process for each explored hyper-parameter
for i in range(len( cv_metrics[0] )):
included = True # We start assuming the current parameter can be included
# Now, check for each of the pareto-front
# Whether it is overshadowed by any other parameter
for fp in pareto_front:
overshadowed = True # Assume it is, until we find a case it isnt
# Check for each metric
for m_object, metric in zip(metrics, cv_metrics):
# Gets around Nan values
if True in
|
np.isnan(metric)
|
numpy.isnan
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
2d quad8 element.
"""
__all__ = [
'Quad8'
]
import numpy as np
from numpy import sqrt
from .element import Element
from .tools import compute_B_matrix, scatter_matrix
# try to import Fortran routines
use_fortran = False
try:
import amfe.f90_element
use_fortran = True
except Exception:
print('Python was not able to load the fast fortran element routines.')
class Quad8(Element):
"""
Plane Quadrangle with quadratic shape functions and 8 nodes. 4 nodes are
at every corner, 4 nodes on every face.
"""
name = 'Quad8'
def __init__(self, *args, **kwargs):
"""
Definition of material properties and thickness as they are 2D-Elements.
"""
super().__init__(*args, **kwargs)
self.K = np.zeros((16,16))
self.f = np.zeros(16)
self.M_small = np.zeros((8,8))
self.M = np.zeros((16,16))
self.S = np.zeros((8,6))
self.E = np.zeros((8,6))
# Quadrature like ANSYS or ABAQUS:
g = np.sqrt(3/5)
w = 5/9
w0 = 8/9
self.gauss_points = ((-g, -g, w*w), ( g, -g, w*w ), ( g, g, w*w),
(-g, g, w*w), ( 0, -g, w0*w ), ( g, 0, w*w0),
( 0, g, w0*w), (-g, 0, w*w0), ( 0, 0, w0*w0))
# a little bit dirty but correct. Comes from sympy file.
self.extrapolation_points = np.array(
[[ 5*sqrt(15)/18 + 10/9, 5/18, -5*sqrt(15)/18 + 10/9,
5/18, -5/9 - sqrt(15)/9, -5/9 + sqrt(15)/9,
-5/9 + sqrt(15)/9, -5/9 - sqrt(15)/9, 4/9],
[5/18, 5*sqrt(15)/18 + 10/9, 5/18, -5*sqrt(15)/18 + 10/9,
-5/9 - sqrt(15)/9, -5/9 - sqrt(15)/9, -5/9 + sqrt(15)/9,
-5/9 + sqrt(15)/9, 4/9],
[-5*sqrt(15)/18 + 10/9, 5/18, 5*
|
sqrt(15)
|
numpy.sqrt
|
#!/usr/bin/env python
"""
Test specter throughput file format
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import unittest
from pkg_resources import resource_filename
from ..throughput import load_throughput
class TestThroughput(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.thru = load_throughput(resource_filename('specter.test', 't/throughput.fits'))
cls.w = np.arange(5000, 9000, 1)
def setUp(self):
self.flux = np.random.uniform(1,2, size=self.w.shape) * 1e-17
self.photflux = np.random.uniform(0,1, size=self.w.shape)
def test_area(self):
self.assertTrue(self.thru.area > 0)
def test_exptime(self):
self.assertTrue(self.thru.exptime > 0)
def test_fiberdia(self):
self.assertTrue(self.thru.fiberdia > 0)
def test_atmthru(self):
t = self.thru.atmospheric_throughput(self.w)
self.assertTrue(np.all( (0.0<=t) & (t<= 1.0)))
def test_atmext(self):
ext = self.thru.extinction(self.w)
self.assertTrue(np.all( ext > 0 ))
def test_fiberin(self):
t = self.thru.fiberinput_throughput(self.w)
self.assertTrue(np.all( (0.0<=t) & (t<= 1.0)))
#- Use a different wavelength grid
t = self.thru.fiberinput_throughput(self.w[0::2])
self.assertTrue(np.all( (0.0<=t) & (t<= 1.0)))
self.assertTrue(len(t) == len(self.w)//2)
#- Should even work with no wavelength grid
t = self.thru.fiberinput_throughput()
self.assertTrue(np.all( (0.0<=t) & (t<= 1.0)))
def test_fiberin_objtype(self):
tstar = self.thru.fiberinput_throughput(self.w, objtype='STAR')
telg = self.thru.fiberinput_throughput(self.w, objtype='ELG')
tsky = self.thru.fiberinput_throughput(self.w, objtype='SKY')
self.assertTrue(np.all( (0.0<=tstar) & (tstar<= 1.0)))
self.assertTrue(np.all( (0.0<=telg) & (telg<= 1.0)))
self.assertTrue(np.allclose(tsky, 1.0))
self.assertTrue(np.all(tsky >= tstar))
self.assertTrue(np.all(tstar >= telg))
def test_hardware(self):
t = self.thru.hardware_throughput(self.w)
self.assertTrue(np.all( (0.0<=t) & (t<= 1.0)))
def test_calibthru(self):
t1 = self.thru(self.w, objtype='CALIB', airmass=1.0)
t2 = self.thru(self.w, objtype='CALIB', airmass=2.0)
self.assertTrue( np.all(t1 == t2) )
self.assertTrue( len(t1) == len(t2) )
self.assertTrue( len(t1) == len(self.w) )
self.assertTrue( np.any(t2>0.0) )
def test_skythru(self):
t1 = self.thru(self.w, objtype='SKY', airmass=1.0)
t2 = self.thru(self.w, objtype='SKY', airmass=2.0)
self.assertTrue( np.all( (t1 > t2) | (t2 == 0.0)) )
self.assertTrue( len(t1) == len(t2) )
self.assertTrue( len(t1) == len(self.w) )
self.assertTrue( np.any(t2>0.0) )
def test_starthru(self):
t1 = self.thru(self.w, objtype='STAR', airmass=1.0)
t2 = self.thru(self.w, objtype='STAR', airmass=2.0)
self.assertTrue( np.all( (t1 > t2) | (t2 == 0.0)) )
self.assertTrue( len(t1) == len(t2) )
self.assertTrue( len(t1) == len(self.w) )
self.assertTrue( np.any(t2>0.0) )
def test_thru(self):
t1 = self.thru(self.w, objtype='CALIB', airmass=1.1)
t2 = self.thru(self.w, objtype='SKY', airmass=1.1)
t3 = self.thru(self.w, objtype='STAR', airmass=1.1)
self.assertTrue( np.all( (t1>t2) | (t1==0.0) ) )
self.assertTrue( np.all( (t2>t3) | (t2==0.0) ) )
self.assertTrue( np.any(t3>0.0) )
def test_fluxunits(self):
units = [
"erg/s/cm^2",
"erg/s/cm^2/A",
"erg/s/cm^2/A/arcsec^2",
"erg/s/cm^2/arcsec^2",
]
for u in units:
p = self.thru.photons(self.w, self.flux, units=u, objtype='STAR')
self.assertTrue( np.any(p>0) and np.all(p>=0) )
def test_scaledunits(self):
scale = 1e-16
units = [
"erg/s/cm^2",
"erg/s/cm^2/A",
"erg/s/cm^2/A/arcsec^2",
"erg/s/cm^2/arcsec^2",
]
for u in units:
scaled_units = str(scale) + " " + u
p0 = self.thru.photons(self.w, self.flux, units=u, objtype='STAR')
p1 = self.thru.photons(self.w, self.flux/scale, units=scaled_units, objtype='STAR')
ii = (p0 != 0.0)
dp = np.abs( (p0-p1)[ii]/p0[ii] )
self.assertTrue( np.max(dp) < 1e-14 ) #- allow for roundoff error
def test_photunits(self):
units = [
"photon",
"photon/A",
### "photon/A/arcsec^2",
]
for u in units:
p = self.thru.photons(self.w, self.photflux, units=u, objtype='STAR')
self.assertTrue( np.any(p>0) and np.all(p>=0) )
def test_calibphot(self):
p1 = self.thru.photons(self.w, self.flux, objtype='CALIB', airmass=1.0)
p2 = self.thru.photons(self.w, self.flux, objtype='CALIB', airmass=2.0)
self.assertTrue( np.any(p1>0) and np.all(p1==p2) )
def test_skyphot(self):
p1 = self.thru.photons(self.w, self.flux, objtype='SKY', airmass=1.0)
p2 = self.thru.photons(self.w, self.flux, objtype='SKY', airmass=2.0)
self.assertTrue( np.any(p1>0) )
self.assertTrue( np.all( (p1>p2) | (p2==0) ) )
def test_objphot(self):
p1 = self.thru.photons(self.w, self.flux, objtype='STAR', airmass=1.0)
p2 = self.thru.photons(self.w, self.flux, objtype='STAR', airmass=2.0)
self.assertTrue( np.any(p1>0) )
self.assertTrue( np.all( (p1>p2) | (p2==0) ) )
def test_multiphot(self):
p1 = self.thru.photons(self.w, self.flux, objtype='CALIB', airmass=1.0)
p2 = self.thru.photons(self.w, self.flux, objtype='SKY', airmass=1.0)
p3 = self.thru.photons(self.w, self.flux, objtype='STAR', airmass=1.0)
self.assertTrue( np.all( (p1>p2) | (p1==0.0) ) )
self.assertTrue( np.all( (p2>p3) | (p2==0.0) ) )
self.assertTrue( np.any(p3>0.0) )
def test_apply_throughput(self):
f1 = self.thru.apply_throughput(self.w, self.flux, objtype='CALIB')
f2 = self.thru.apply_throughput(self.w, self.flux, objtype='SKY')
f3 = self.thru.apply_throughput(self.w, self.flux, objtype='STAR')
self.assertTrue( np.all( (f1>f2) | (f1==0.0) ) )
self.assertTrue( np.all( (f2>f3) | (f2==0.0) ) )
self.assertTrue( np.any(f3>0.0) )
self.assertTrue( np.all(f1 <= self.flux) )
self.assertTrue(
|
np.all(f2 <= self.flux)
|
numpy.all
|
import h5py
import numpy as np
import constants as ct
import shape as sh
import potential as pt
from scipy.optimize import least_squares, minimize
from scipy.spatial import cKDTree
from astropy.cosmology import FlatLambdaCDM
# Dictionary of file output names
fname = {
"L205n2500TNG": "TNG300_L1",
"L205n1250TNG": "TNG300_L2",
"L205n625TNG": "TNG300_L3",
"L205n2500TNG_DM": "TNG300DM_L1",
"L205n1250TNG_DM": "TNG300DM_L2",
"L205n625TNG_DM": "TNG300DM_L3",
"L75n1820TNG": "TNG100_L1",
"L75n910TNG": "TNG100_L2",
"L75n455TNG": "TNG100_L3",
"L75n1820TNG_DM": "TNG100DM_L1",
"L75n910TNG_DM": "TNG100DM_L2",
"L75n455TNG_DM": "TNG100DM_L3",
}
# Dictionary of gravitational softening values used -- [DM or STAR, GAS / h]
# NOTE: I have converted the GAS values to NO h values, the values were not
# consistent in the TNG tables (for some reason!)
# NOTE: Values here are in kpc
soften = {
"L205n2500TNG": [0.15, 0.369],
"L205n1250TNG": [2.95, 0.738],
"L205n625TNG": [5.90, 1.476],
"L75n1820TNG": [0.74, 0.185],
"L75n910TNG": [0.15, 0.369],
"L75n455TNG": [2.95, 0.738],
}
"""
This class stores computes which simulation is associated with a
given halo, stores it and then compute various quantities. There
are a couple of external routines, but most is consolidated here
Takes the volume class as input, which has read a snapshot
"""
class halo:
def __init__(self, volume, Nbins=25):
"""
Take cosmology from the volume class instance
Arguments:
-volume : An instance of the entire_snapshot_read class
-Nbins : Number of bins in the radial profile [INTEGER]
"""
# Boxsize, cosmology, simulation to cgs unit conversions
self.boxsize = volume.BoxSize
self.axp = volume.axp
self.hubp = volume.hub
self.redshift = volume.redshift
self.OmegaB = volume.omega_b
self.OmegaM = volume.omega_m
self.OmegaL = volume.omega_l
self.Ulength = volume.Ulength
self.Umass = volume.Umass
self.Uvelc = volume.Uvelc
# Set tags for output
self.path = volume.path
self.simtag = self.path.split("/")[-2]
self.fname = fname[self.simtag]
self.snap = volume.snap
# Create radial bins
self.Nbins = Nbins
self.set_up_radial_profile()
return
def halo_data_store(self, mpi, subfind_table, volume, Extent=5.0, R200scale=False):
"""
Find all particles within given sphere for every halo of interest
then send particles to desired task and store
Arguments:
-mpi : An instance of the mpi class
-subfind_table : An instance of the build_table class
-volume : An instance of the entire_snapshot_read class
-Extent : Halocentric radial extent to extract particles to [FLOAT]
-R200scale : BOOLEAN, if TRUE rescale the extent by halo's R200 value
"""
if not mpi.Rank:
print(" > Distributing particles", flush=True)
# Set Extent of cut sphere
self.Extent = Extent
self.halo_data = {}
self.Nhalos = len(subfind_table.tags)
dims = np.array([self.boxsize, self.boxsize, self.boxsize])
# Loop over haloes of interest
Ntask_per_node = int(np.rint(mpi.NProcs / mpi.NNodes))
offset = 0
for j in range(0, self.Nhalos, 1):
# Scale extraction range
if R200scale:
Extent = self.Extent * subfind_table.R200[j]
else:
Extent = self.Extent * ct.Mpc_cm
# Select task to send particle data to
destination = (j % mpi.NNodes) * Ntask_per_node + (offset % Ntask_per_node)
if destination >= mpi.NProcs:
destination -= mpi.NProcs
if j > 0 and j % mpi.NNodes == mpi.NNodes - 1:
offset += 1
if not mpi.Rank:
print(" -{0:04d} {1:03d}".format(j, destination), flush=True)
if destination == mpi.Rank:
htag = subfind_table.tags[j]
self.halo_data[htag] = {}
# Find contributing cells/particles -- centering on halo
vkey = sorted(volume.__dict__.keys())
if "pos" in vkey:
Grad = volume.pos - subfind_table.CoP[j]
Grad = np.where(Grad > 0.5 * dims, Grad - dims, Grad)
Grad = np.where(Grad < -0.5 * dims, Grad + dims, Grad)
Grad = np.sqrt((Grad ** 2.0).sum(axis=-1))
gdx = np.where(Grad <= Extent)[0]
del Grad
if "DMpos" in vkey:
DMrad = volume.DMpos - subfind_table.CoP[j]
DMrad = np.where(DMrad > 0.5 * dims, DMrad - dims, DMrad)
DMrad = np.where(DMrad < -0.5 * dims, DMrad + dims, DMrad)
DMrad = np.sqrt((DMrad ** 2.0).sum(axis=-1))
ddx = np.where(DMrad <= Extent)[0]
del DMrad
if "STpos" in vkey:
STrad = volume.STpos - subfind_table.CoP[j]
STrad = np.where(STrad > 0.5 * dims, STrad - dims, STrad)
STrad = np.where(STrad < -0.5 * dims, STrad + dims, STrad)
STrad = np.sqrt((STrad ** 2.0).sum(axis=-1))
sdx = np.where(STrad <= Extent)[0]
del STrad
del vkey
# Gather particles/cells on destination task
# NOTE: No idea why I need to recast the positions arrays for this to work
# but after two days of testing I got bored and this fixes it.
keys = sorted(list(volume.__dict__.keys()))
# --- GAS
if "pos" in keys:
array = mpi.gatherv_single(
np.zeros(volume.pos[gdx].shape) + volume.pos[gdx], root=destination
)
if mpi.Rank == destination:
pos = array - subfind_table.CoP[j]
pos = np.where(pos > 0.5 * dims, pos - dims, pos)
pos = np.where(pos < -0.5 * dims, pos + dims, pos)
self.halo_data[subfind_table.tags[j]]["Pos"] = pos
del pos
if "rho" in keys:
array = mpi.gatherv_single(volume.rho[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Rho"] = array
if "ne_nh" in keys:
array = mpi.gatherv_single(volume.ne_nh[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Ne_Nh"] = array
if "zmet" in keys:
array = mpi.gatherv_single(volume.zmet[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Zmet"] = array
if "mass" in keys:
array = mpi.gatherv_single(volume.mass[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Mass"] = array
if "sub" in keys:
array = mpi.gatherv_single(volume.sub[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Sub"] = array
if "temp" in keys:
array = mpi.gatherv_single(volume.temp[gdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Temp"] = array
if "velc" in keys:
array = mpi.gatherv_single(
np.zeros(volume.velc[gdx].shape) + volume.velc[gdx],
root=destination,
)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["Velc"] = array
# --- DM
if "DMpos" in keys:
array = mpi.gatherv_single(
np.zeros(volume.DMpos[ddx].shape) + volume.DMpos[ddx],
root=destination,
)
if mpi.Rank == destination:
pos = array - subfind_table.CoP[j]
pos = np.where(pos > 0.5 * dims, pos - dims, pos)
pos = np.where(pos < -0.5 * dims, pos + dims, pos)
self.halo_data[subfind_table.tags[j]]["DMPos"] = pos
del pos
if "DMmass" in keys:
array = mpi.gatherv_single(volume.DMmass[ddx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["DMMass"] = array
if "DMsub" in keys:
array = mpi.gatherv_single(volume.DMsub[ddx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["DMSub"] = array
if "DMvelc" in keys:
array = mpi.gatherv_single(
np.zeros(volume.DMvelc[ddx].shape) + volume.DMvelc[ddx],
root=destination,
)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["DMVelc"] = array
# --- STARS
if "STpos" in keys:
array = mpi.gatherv_single(
np.zeros(volume.STpos[sdx].shape, dtype=np.float)
+ volume.STpos[sdx],
root=destination,
)
if mpi.Rank == destination:
pos = array - subfind_table.CoP[j]
pos = np.where(pos > 0.5 * dims, pos - dims, pos)
pos = np.where(pos < -0.5 * dims, pos + dims, pos)
self.halo_data[subfind_table.tags[j]]["STPos"] = pos
del pos
if "STmass" in keys:
array = mpi.gatherv_single(volume.STmass[sdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["STMass"] = array
if "STsub" in keys:
array = mpi.gatherv_single(volume.STsub[sdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["STSub"] = array
if "STvelc" in keys:
array = mpi.gatherv_single(
np.zeros((len(sdx), 3), dtype=np.float) + volume.STvelc[sdx],
root=destination,
)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["STVelc"] = array
if "STzmet" in keys:
array = mpi.gatherv_single(volume.STzmet[sdx], root=destination)
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["STZmet"] = array
# Add key SUBFIND quanitites
if mpi.Rank == destination:
self.halo_data[subfind_table.tags[j]]["tag"] = subfind_table.tags[j]
self.halo_data[subfind_table.tags[j]]["hub"] = subfind_table.hub
self.halo_data[subfind_table.tags[j]]["axp"] = subfind_table.axp
self.halo_data[subfind_table.tags[j]][
"redshift"
] = subfind_table.redshift
self.halo_data[subfind_table.tags[j]][
"rho_crit"
] = subfind_table.rho_crit
self.halo_data[subfind_table.tags[j]]["CoP"] = subfind_table.CoP[j]
self.halo_data[subfind_table.tags[j]]["M200"] = subfind_table.M200[j]
self.halo_data[subfind_table.tags[j]]["M500"] = subfind_table.M500[j]
self.halo_data[subfind_table.tags[j]]["Mvir"] = subfind_table.Mvir[j]
self.halo_data[subfind_table.tags[j]]["R200"] = subfind_table.R200[j]
self.halo_data[subfind_table.tags[j]]["R500"] = subfind_table.R500[j]
self.halo_data[subfind_table.tags[j]]["Rvir"] = subfind_table.Rvir[j]
self.halo_data[subfind_table.tags[j]]["Vbulk"] = subfind_table.Vbulk[j]
self.halo_data[subfind_table.tags[j]][
"LenType"
] = subfind_table.GrLenType[j]
self.halo_data[subfind_table.tags[j]][
"OffType"
] = subfind_table.OffType[j]
self.halo_data[subfind_table.tags[j]]["Nsubs"] = subfind_table.Nsubs[j]
self.halo_data[subfind_table.tags[j]][
"SubLenType"
] = subfind_table.SubLenType[j]
mpi.comm.Barrier()
return
def set_up_halo(self, mpi, tag):
"""
Set up the required arrays for this halo as other classes expect
Arguments:
-mpi : An instance of the mpi class
-tag : STRING labelling this halo for property storage
"""
# Basic halo propertie
self.tag = tag
self.hub = self.halo_data[tag]["hub"]
self.axp = self.halo_data[tag]["axp"]
self.redshift = self.halo_data[tag]["redshift"]
self.rho_crit = self.halo_data[tag]["rho_crit"]
self.CoP = self.halo_data[tag]["CoP"]
self.M200 = self.halo_data[tag]["M200"]
self.M500 = self.halo_data[tag]["M500"]
self.Mvir = self.halo_data[tag]["Mvir"]
self.R200 = self.halo_data[tag]["R200"]
self.R500 = self.halo_data[tag]["R500"]
self.Rvir = self.halo_data[tag]["Rvir"]
self.Vbulk = self.halo_data[tag]["Vbulk"]
self.LenType = self.halo_data[tag]["LenType"]
self.OffType = self.halo_data[tag]["OffType"]
self.Nsubs = self.halo_data[tag]["Nsubs"]
self.SubLenType = self.halo_data[tag]["SubLenType"]
# Datasets
keys = sorted(list(self.halo_data[tag].keys()))
if "Mass" in keys:
self.mass = self.halo_data[tag]["Mass"]
if "Ne_Nh" in keys:
self.ne_nh = self.halo_data[tag]["Ne_Nh"]
if "Pos" in keys:
self.pos = self.halo_data[tag]["Pos"]
if "Rho" in keys:
self.rho = self.halo_data[tag]["Rho"]
if "Sub" in keys:
self.sub = self.halo_data[tag]["Sub"]
if "Temp" in keys:
self.temp = self.halo_data[tag]["Temp"]
if "Velc" in keys:
self.velc = self.halo_data[tag]["Velc"]
if "Zmet" in keys:
self.zmet = self.halo_data[tag]["Zmet"]
if "DMMass" in keys:
self.DMmass = self.halo_data[tag]["DMMass"]
if "DMPos" in keys:
self.DMpos = self.halo_data[tag]["DMPos"]
if "DMSub" in keys:
self.DMsub = self.halo_data[tag]["DMSub"]
if "DMVelc" in keys:
self.DMvelc = self.halo_data[tag]["DMVelc"]
if "STMass" in keys:
self.STmass = self.halo_data[tag]["STMass"]
if "STPos" in keys:
self.STpos = self.halo_data[tag]["STPos"]
if "STSub" in keys:
self.STsub = self.halo_data[tag]["STSub"]
if "STVelc" in keys:
self.STvelc = self.halo_data[tag]["STVelc"]
if "STZmet" in keys:
self.STzmet = self.halo_data[tag]["STZmet"]
del keys, self.halo_data[tag]
# Compute radii
if "pos" in self.__dict__.keys():
self.rad = np.sqrt((self.pos ** 2.0).sum(axis=-1)) / self.R200
if "DMpos" in self.__dict__.keys():
self.DMrad = np.sqrt((self.DMpos ** 2.0).sum(axis=-1)) / self.R200
if "STpos" in self.__dict__.keys():
self.STrad = np.sqrt((self.STpos ** 2.0).sum(axis=-1)) / self.R200
return
def compute_gravitational_potentials(self, mpi):
"""
Compute the gravitational potential of all particles of a given halo
Arguments:
-mpi : An instance of the mpi class
"""
if not mpi.Rank:
print(" > Computing gravitational potential", flush=True)
# First build a KD tree for all particles
DMoff = len(self.mass)
SToff = DMoff + len(self.DMmass)
pos = np.concatenate((self.pos, self.DMpos, self.STpos), axis=0)
mass = np.concatenate((self.mass, self.DMmass, self.STmass), axis=0)
soft = np.concatenate(
(
np.zeros(len(self.pos), dtype=np.float) + soften[self.simtag][1],
np.zeros(len(self.DMpos), dtype=np.float) + soften[self.simtag][0],
np.zeros(len(self.STpos), dtype=np.float) + soften[self.simtag][0],
),
axis=0,
)
# Check all particle positions are unique -- if not, minor shift overlapping
pos = _check_points_unique(pos)
if not mpi.Rank:
print(" -Building tree...", flush=True)
tree = pt.construct_tree(pos, mass, soft)
# Now compute potential
if not mpi.Rank:
print(" -Computing potential...", flush=True)
pot = pt.compute_potential_via_tree(pos, tree)
del pos, mass, soft, tree
# Set potentials for particles
self.gpot = pot[:DMoff]
self.DMgpot = pot[DMoff:SToff]
self.STgpot = pot[SToff:]
del DMoff, SToff
return
def compute_shape(self, mpi, aperture="500", ptype="GAS", remove_subs=True):
"""
Compute shape within aperture
Arguments:
-mpi : An instance of the mpi class
-aperture : STRING defining the radial aperture of interest
-ptype : STRING defining the particle type (e.g. dark matter) of interest
-remove_subs : BOOLEAN, if TRUE remove particles bound to substructures
"""
if not mpi.Rank:
print(
" > Computing {0} shape - aperture: {1}".format(ptype, aperture),
flush=True,
)
# Compute aperture
if aperture == "500":
ap = self.R500 / ct.Mpc_cm
elif aperture == "200":
ap = self.R200 / ct.Mpc_cm
elif aperture == "Vir":
ap = self.Rvir / ct.Mpc_cm
else:
print(
"ERROR:\n --> {0} aperture not implemented!\nEXITING...".format(
aperture
)
)
quit()
# Check particles type, select those with aperture
if ptype == "GAS":
pos = np.copy(self.pos) / ct.Mpc_cm
mass = np.copy(self.mass) / ct.Msun_g
elif ptype == "DM":
pos = np.copy(self.DMpos) / ct.Mpc_cm
mass = np.copy(self.DMmass) / ct.Msun_g
elif ptype == "STAR":
pos = np.copy(self.STpos) / ct.Mpc_cm
mass = np.copy(self.STmass) / ct.Msun_g
else:
print(
"ERROR:\n --> {0} particle type not implemented!\nEXITING...".format(
ptype
)
)
quit()
# Remove those in substuctures -- if required
if remove_subs:
if ptype == "GAS":
sdx = np.where(self.sub == 0)[0]
elif ptype == "DM":
sdx = np.where(self.DMsub == 0)[0]
elif ptype == "STAR":
sdx = np.where(self.STsub == 0)[0]
else:
sdx = np.arange(len(mass))
# Actual shape calculation -- check for empty aperture
if len(sdx) <= 0:
q = 0.0
s = 0.0
Ivectors = np.zeros((3, 3), dtype=np.float)
else:
try:
q, s, Ivectors = sh.iterative_cumulative_shape_measure(
pos[sdx], mass[sdx], rmax=ap
)
except:
q = 0.0
s = 0.0
Ivectors = np.zeros((3, 3), dtype=np.float)
# Store and return
if ptype == "GAS":
if aperture == "500":
self.s_gas_500 = s
self.q_gas_500 = q
self.Iv_gas_500 = Ivectors
elif aperture == "200":
self.s_gas_200 = s
self.q_gas_200 = q
self.Iv_gas_200 = Ivectors
elif aperture == "Vir":
self.s_gas_vir = s
self.q_gas_vir = q
self.Iv_gas_vir = Ivectors
elif ptype == "DM":
if aperture == "500":
self.s_dm_500 = s
self.q_dm_500 = q
self.Iv_dm_500 = Ivectors
elif aperture == "200":
self.s_dm_200 = s
self.q_dm_200 = q
self.Iv_dm_200 = Ivectors
elif aperture == "Vir":
self.s_dm_vir = s
self.q_dm_vir = q
self.Iv_dm_vir = Ivectors
elif ptype == "STAR":
if aperture == "500":
self.s_st_500 = s
self.q_st_500 = q
self.Iv_st_500 = Ivectors
elif aperture == "200":
self.s_st_200 = s
self.q_st_200 = q
self.Iv_st_200 = Ivectors
elif aperture == "Vir":
self.s_st_vir = s
self.q_st_vir = q
self.Iv_st_vir = Ivectors
del s, q, Ivectors
return
def compute_shape_profile(self, mpi, ptype="GAS", remove_subs=True):
"""
Iteratively compute the shape profile of the gas cells
Arguments:
-mpi : An instance of the mpi class
-pytpe : STRING defining the particle type (e.g. dark matter) of interest
-remove_subs : BOOLEAN, if TRUE remove particles bound to substructures
"""
if not mpi.Rank:
print(
" > Computing {0} iterative inertial tensor".format(ptype), flush=True
)
# Bin relevant particle type
if ptype in ["GAS", "GAS_HOT", "GAS_TEMP", "GAS_PRES"]:
pos = np.copy(self.pos) / ct.Mpc_cm
mass = np.copy(self.mass) / ct.Msun_g # [Msun]
if ptype in ["GAS_HOT", "GAS_TEMP", "GAS_PRES"]:
hdx = np.where(
(self.temp > 1.0e6) & (self.rho * 0.752 / ct.mp_g < 0.1)
)[0]
pos = pos[hdx]
if ptype == "GAS_HOT":
mass = mass[hdx] # [Msun]
elif ptype == "GAS_TEMP":
mass = np.copy(self.temp)[hdx] # [K]
elif ptype == "GAS_PRES":
mass = (
self.rho[hdx]
/ (ct.mu * ct.mp_g)
* self.temp[hdx]
* ct.kB_erg_K
/ ct.kev_2_erg
) # [keV/cm^3]
elif ptype == "DM":
pos = np.copy(self.DMpos) / ct.Mpc_cm
mass = np.copy(self.DMmass) / ct.Msun_g
elif ptype == "STAR":
pos = np.copy(self.STpos) / ct.Mpc_cm
mass = np.copy(self.STmass) / ct.Msun_g
else:
print(
"ERROR:\n --> {0} particle type not implemented!\nEXITING...".format(
ptype
)
)
quit()
# Remove those in substuctures -- if required
if remove_subs:
if ptype in ["GAS", "GAS_HOT", "GAS_TEMP", "GAS_PRES"]:
sdx = np.where(self.sub == 0)[0]
elif ptype == "DM":
sdx = np.where(self.DMsub == 0)[0]
elif ptype == "STAR":
sdx = np.where(self.STsub == 0)[0]
else:
sdx = np.arange(len(mass))
# Actual shape profile measurement -- check for no particles
if len(sdx) <= 0:
q = np.zeros(self.Nbins, dtype=np.float)
s = np.zeros(self.Nbins, dtype=np.float)
Ivectors = np.zeros((self.Nbins, 3, 3), dtype=np.float)
else:
try:
q, s, Ivectors = sh.iterative_radial_shape_profile(
pos, mass, self.R200 / ct.Mpc_cm
)
except:
q = np.zeros(self.Nbins, dtype=np.float)
s = np.zeros(self.Nbins, dtype=np.float)
Ivectors = np.zeros((self.Nbins, 3, 3), dtype=np.float)
# Store and return
if ptype == "GAS":
self.s_gas = s
self.q_gas = q
self.Iv_gas = Ivectors
elif ptype == "GAS_HOT":
self.s_ghot = s
self.q_ghot = q
self.Iv_ghot = Ivectors
elif ptype == "GAS_TEMP":
self.s_temp = s
self.q_temp = q
self.Iv_temp = Ivectors
elif ptype == "GAS_PRES":
self.s_pres = s
self.q_pres = q
self.Iv_pres = Ivectors
elif ptype == "DM":
self.s_dm = s
self.q_dm = q
self.Iv_dm = Ivectors
elif ptype == "STAR":
self.s_star = s
self.q_star = q
self.Iv_star = Ivectors
del s, q, Ivectors
return
def compute_mass_profiles(self, mpi, Nb=25):
"""
Compute mass profiles and mass-weighted gas temperature profile
Arguemnts:
-mpi : An instance of the mpi class
-Nb : Number of bins in the radial profile [INTEGER]
"""
if not mpi.Rank:
print(" > Computing mass profiles", flush=True)
if "rad" in self.__dict__.keys():
self.GASpro = np.histogram(self.rad, bins=self.bins, weights=self.mass)[0]
if "temp" in self.__dict__.keys():
hdx = np.where(
(self.temp > 1.0e6) & (self.rho * 0.752 / ct.mp_g < 0.1)
)[0]
self.GHOTpro = np.histogram(
self.rad[hdx], bins=self.bins, weights=self.mass[hdx]
)[0]
if "DMrad" in self.__dict__.keys():
self.DMpro = np.histogram(self.DMrad, bins=self.bins, weights=self.DMmass)[
0
]
if "STrad" in self.__dict__.keys():
self.STpro = np.histogram(self.STrad, bins=self.bins, weights=self.STmass)[
0
]
# Temperature profile
if "rad" in self.__dict__.keys():
self.TEMPpro = np.histogram(
self.rad, bins=self.bins, weights=self.mass * self.temp
)[0]
idx = np.where(self.GASpro != 0.0)[0]
self.TEMPpro[idx] /= self.GASpro[idx]
del idx
# Total mass profile for NFW fit
self.TOTALpro = np.zeros(Nb, dtype=np.float)
if "rad" in self.__dict__.keys():
self.TOTALpro += self.GASpro
if "DMrad" in self.__dict__.keys():
self.TOTALpro += self.DMpro
if "STrad" in self.__dict__.keys():
self.TOTALpro += self.STpro
if self.TOTALpro.sum() <= 0.0:
del self.TOTALpro
return
def compute_velocities_and_non_thermal_pressure(self, mpi, Nb=25):
"""
Compute the non-thermal pressure profile
Arguments:
-mpi : An instance of the mpi class
-Nb : Number of bins in the radial profile [INTEGER]
"""
if not mpi.Rank:
print(" > Computing non-thermal pressure profile", flush=True)
# Remove bulk, add Hubble flow -- !!! TNG COSMOLOGY HARD WIRED !!!
H_z = (
np.sqrt(0.3089 * (1.0 + self.redshift) ** 3.0 + 0.6911)
* 100.0
* self.hub
* ct.km_cm
)
if "rad" in self.__dict__.keys():
ghub = (sh.vnorm_rp(self.pos).T * H_z * self.rad * self.R200 / ct.Mpc_cm).T
Gvel = self.velc - self.Vbulk + ghub
vmag = np.sqrt(((Gvel - ghub) ** 2.0).sum(axis=-1))
# del ghub
if "DMrad" in self.__dict__.keys():
dhub = (
sh.vnorm_rp(self.DMpos).T * H_z * self.DMrad * self.R200 / ct.Mpc_cm
).T
Dvel = self.DMvelc - self.Vbulk + dhub
del dhub
if "STrad" in self.__dict__.keys():
shub = (
sh.vnorm_rp(self.STpos).T * H_z * self.STrad * self.R200 / ct.Mpc_cm
).T
Svel = self.STvelc - self.Vbulk + shub
del shub
del H_z
# --- Compute r, theta, phi velocities
# GAS
if "rad" in self.__dict__.keys():
vr_g = (self.pos * Gvel).sum(axis=-1) / np.sqrt(
(self.pos ** 2.0).sum(axis=-1)
)
vt_g = (
Gvel[:, 0] * self.pos[:, 1] - self.pos[:, 0] * Gvel[:, 1]
) / np.sqrt((self.pos[:, 0:2] ** 2.0).sum(axis=-1))
vp_g = (
self.pos[:, 2]
* (self.pos[:, 0] * Gvel[:, 0] + self.pos[:, 1] * Gvel[:, 1])
- Gvel[:, 2] * (self.pos[:, 0:2] ** 2.0).sum(axis=-1)
) / (
np.sqrt((self.pos ** 2.0).sum(axis=-1))
* np.sqrt((self.pos[:, 0:2] ** 2.0).sum(axis=-1))
)
# DM
if "DMrad" in self.__dict__.keys():
vr_d = (self.DMpos * Dvel).sum(axis=-1) / np.sqrt(
(self.DMpos ** 2.0).sum(axis=-1)
)
vt_d = (
Dvel[:, 0] * self.DMpos[:, 1] - self.DMpos[:, 0] * Dvel[:, 1]
) / np.sqrt((self.DMpos[:, 0:2] ** 2.0).sum(axis=-1))
vp_d = (
self.DMpos[:, 2]
* (self.DMpos[:, 0] * Dvel[:, 0] + self.DMpos[:, 1] * Dvel[:, 1])
- Dvel[:, 2] * (self.DMpos[:, 0:2] ** 2.0).sum(axis=-1)
) / (
np.sqrt((self.DMpos ** 2.0).sum(axis=-1))
* np.sqrt((self.DMpos[:, 0:2] ** 2.0).sum(axis=-1))
)
# STARS
if "STrad" in self.__dict__.keys():
vr_s = (self.STpos * Svel).sum(axis=-1) / np.sqrt(
(self.STpos ** 2.0).sum(axis=-1)
)
vt_s = (
Svel[:, 0] * self.STpos[:, 1] - self.STpos[:, 0] * Svel[:, 1]
) / np.sqrt((self.STpos[:, 0:2] ** 2.0).sum(axis=-1))
vp_s = (
self.STpos[:, 2]
* (self.STpos[:, 0] * Svel[:, 0] + self.STpos[:, 1] * Svel[:, 1])
- Svel[:, 2] * (self.STpos[:, 0:2] ** 2.0).sum(axis=-1)
) / (
np.sqrt((self.STpos ** 2.0).sum(axis=-1))
* np.sqrt((self.STpos[:, 0:2] ** 2.0).sum(axis=-1))
)
# --- Compute mass-weighted velocity profiles
# GAS
if "rad" in self.__dict__.keys():
mass = np.histogram(self.rad, bins=self.bins, weights=self.mass)[0]
self.vr_gas = np.histogram(
self.rad, bins=self.bins, weights=self.mass * vr_g
)[0]
self.vt_gas = np.histogram(
self.rad, bins=self.bins, weights=self.mass * vt_g
)[0]
self.vp_gas = np.histogram(
self.rad, bins=self.bins, weights=self.mass * vp_g
)[0]
idx = np.where(mass > 0.0)[0]
self.vr_gas[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vt_gas[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vp_gas[idx] /= mass[idx] * ct.km_cm # [km/s]
del mass, idx
# DM
if "DMrad" in self.__dict__.keys():
mass = np.histogram(self.DMrad, bins=self.bins, weights=self.DMmass)[0]
self.vr_dm = np.histogram(
self.DMrad, bins=self.bins, weights=self.DMmass * vr_d
)[0]
self.vt_dm = np.histogram(
self.DMrad, bins=self.bins, weights=self.DMmass * vt_d
)[0]
self.vp_dm = np.histogram(
self.DMrad, bins=self.bins, weights=self.DMmass * vp_d
)[0]
idx = np.where(mass > 0.0)[0]
self.vr_dm[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vt_dm[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vp_dm[idx] /= mass[idx] * ct.km_cm # [km/s]
del mass, idx
# STARS
if "STrad" in self.__dict__.keys():
mass = np.histogram(self.STrad, bins=self.bins, weights=self.STmass)[0]
self.vr_star = np.histogram(
self.STrad, bins=self.bins, weights=self.STmass * vr_s
)[0]
self.vt_star = np.histogram(
self.STrad, bins=self.bins, weights=self.STmass * vt_s
)[0]
self.vp_star = np.histogram(
self.STrad, bins=self.bins, weights=self.STmass * vp_s
)[0]
idx = np.where(mass > 0.0)[0]
self.vr_star[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vt_star[idx] /= mass[idx] * ct.km_cm # [km/s]
self.vp_star[idx] /= mass[idx] * ct.km_cm # [km/s]
del mass, idx
# --- Velocity dispersion
# GAS
if "rad" in self.__dict__.keys():
self.sigr_gas = np.zeros(Nb, dtype=np.float)
self.sigt_gas = np.zeros(Nb, dtype=np.float)
self.sigp_gas = np.zeros(Nb, dtype=np.float)
gdx = np.digitize(self.rad, self.bins) - 1
for j in np.unique(gdx):
if j >= 0 and j < Nb:
self.sigr_gas[j] = (
np.sqrt(
np.average(
(vr_g[gdx == j] - self.vr_gas[j]) ** 2.0,
weights=self.mass[gdx == j],
)
)
/ ct.km_cm
)
self.sigt_gas[j] = (
np.sqrt(
np.average(
(vt_g[gdx == j] - self.vt_gas[j]) ** 2.0,
weights=self.mass[gdx == j],
)
)
/ ct.km_cm
)
self.sigp_gas[j] = (
np.sqrt(
np.average(
(vp_g[gdx == j] - self.vp_gas[j]) ** 2.0,
weights=self.mass[gdx == j],
)
)
/ ct.km_cm
)
del gdx
# DM
if "DMrad" in self.__dict__.keys():
self.sigr_dm = np.zeros(Nb, dtype=np.float)
self.sigt_dm = np.zeros(Nb, dtype=np.float)
self.sigp_dm = np.zeros(Nb, dtype=np.float)
ddx = np.digitize(self.DMrad, self.bins) - 1
for j in np.unique(ddx):
if j >= 0 and j < Nb:
self.sigr_dm[j] = (
np.sqrt(
np.average(
(vr_d[ddx == j] - self.vr_dm[j]) ** 2.0,
weights=self.DMmass[ddx == j],
)
)
/ ct.km_cm
)
self.sigt_dm[j] = (
np.sqrt(
np.average(
(vt_d[ddx == j] - self.vt_dm[j]) ** 2.0,
weights=self.DMmass[ddx == j],
)
)
/ ct.km_cm
)
self.sigp_dm[j] = (
np.sqrt(
np.average(
(vp_d[ddx == j] - self.vp_dm[j]) ** 2.0,
weights=self.DMmass[ddx == j],
)
)
/ ct.km_cm
)
del ddx
# STARS
if "STrad" in self.__dict__.keys():
self.sigr_star = np.zeros(Nb, dtype=np.float)
self.sigt_star = np.zeros(Nb, dtype=np.float)
self.sigp_star = np.zeros(Nb, dtype=np.float)
sdx = np.digitize(self.STrad, self.bins) - 1
for j in np.unique(sdx):
if j >= 0 and j < Nb:
self.sigr_star[j] = (
np.sqrt(
np.average(
(vr_s[sdx == j] - self.vr_star[j]) ** 2.0,
weights=self.STmass[sdx == j],
)
)
/ ct.km_cm
)
self.sigt_star[j] = (
np.sqrt(
np.average(
(vt_s[sdx == j] - self.vt_star[j]) ** 2.0,
weights=self.STmass[sdx == j],
)
)
/ ct.km_cm
)
self.sigp_star[j] = (
np.sqrt(
np.average(
(vp_s[sdx == j] - self.vp_star[j]) ** 2.0,
weights=self.STmass[sdx == j],
)
)
/ ct.km_cm
)
del sdx
# Non-thermal pressure
self.vols = (
(4.0 / 3.0)
* np.pi
* ((self.bins[1:] * self.R200) ** 3.0 - (self.bins[:-1] * self.R200) ** 3.0)
)
if "rad" in self.__dict__.keys():
rho = self.GASpro / self.vols
vmag2 = (
self.sigr_gas ** 2.0 + self.sigt_gas ** 2.0 + self.sigp_gas ** 2.0
) * 1.0e10
self.Pkin = rho * vmag2
del rho, vmag2
return
def compute_thermo_profiles(self, mpi, Nb=25):
"""
Compute various thermodynamic profiles
Arguments:
-mpi : An instance of the mpi class
-Nb : Number of bins in the radial profile [INTEGER]
"""
if not mpi.Rank:
print(" > Computing thermodynamic profiles", flush=True)
# Find hot, non-star forming (density cut) gas
idx = np.where((self.temp > 1.0e6) & (self.rho * 0.752 / ct.mp_g < 0.1))[0]
# Hot gas density profile
mass_hot = np.histogram(self.rad[idx], bins=self.bins, weights=self.mass[idx])[
0
]
hdx = np.where(mass_hot > 0.0)[0]
self.Rho_hot = (mass_hot / self.vols) * (
ct.Mpc_cm ** 3.0 / ct.Msun_g
) # [Msun / Mpc^3]
# Spectroscopic-like temperature profile
self.Tsl = np.zeros(len(self.cens), dtype=np.float64)
wgts1 = np.histogram(
self.rad[idx],
bins=self.bins,
weights=((self.rho[idx] * (ct.Mpc_cm ** 3.0) / ct.Msun_g) ** 2.0)
/ (self.temp[idx] ** 0.5),
)[0]
wgts2 = np.histogram(
self.rad[idx],
bins=self.bins,
weights=((self.rho[idx] * (ct.Mpc_cm ** 3.0) / ct.Msun_g) ** 2.0)
/ (self.temp[idx] ** 1.5),
)[0]
self.Tsl[hdx] = (ct.kB_erg_K / ct.kev_2_erg) * (
wgts1[hdx] / wgts2[hdx]
) # [keV]
# Clumping density profiles
mass = np.histogram(self.rad, bins=self.bins, weights=self.mass)[0]
mdx = np.where(mass > 0.0)[0]
self.rho_sq = np.histogram(
self.rad, bins=self.bins, weights=self.mass * self.rho * self.rho
)[0]
self.rho_sq[mdx] /= mass[mdx] * (
ct.Msun_g ** 2.0 / ct.Mpc_cm ** 6.0
) # [Msun^2 / Mpc^6]
self.rho_sq_hot = np.histogram(
self.rad[idx],
bins=self.bins,
weights=self.mass[idx] * self.rho[idx] * self.rho[idx],
)[0]
self.rho_sq_hot[hdx] /= mass_hot[hdx] * (
ct.Msun_g ** 2.0 / ct.Mpc_cm ** 6.0
) # [Msun^2 / Mpc^6]
# Clumping pressure profiles
pres = ct.kB_erg_K * self.temp * self.rho / (ct.mu * ct.mp_g)
self.pres_sq = np.histogram(
self.rad, bins=self.bins, weights=self.mass * pres * pres
)[0]
self.pres_sq[mdx] /= mass[mdx] # [erg^2 / cm^6]
self.pres_sq_hot = np.histogram(
self.rad[idx],
bins=self.bins,
weights=self.mass[idx] * pres[idx] * pres[idx],
)[0]
self.pres_sq_hot[mdx] /= mass[mdx] # [erg^2 / cm^6]
del pres
# Emission measure profile (hot gas only)
ne = (self.ne_nh[idx] * 0.76 * self.rho[idx] / ct.mp_g) ** 2.0
sp_wgt = np.histogram(
self.rad[idx],
bins=self.bins,
weights=(self.rho[idx] * (ct.Mpc_cm ** 3.0 / ct.Msun_g) ** 2.0)
* (self.temp[idx] ** 0.5),
)[0]
self.emm = np.histogram(
self.rad[idx],
bins=self.bins,
weights=(self.rho[idx] * (ct.Mpc_cm ** 3.0 / ct.Msun_g) ** 2.0)
* (self.temp[idx] ** 0.5)
* ne,
)[0]
edx = np.where(sp_wgt > 0.0)[0]
self.emm[edx] /= sp_wgt[edx] # [cm^-6]
del edx, sp_wgt, ne
return
def compute_metallicity_profiles(self, mpi, Nb=25):
"""
Compute gaseous and stellar metallcity profiles
Arguments:
-mpi : An instance of the mpi class
-Nb : Number of bins in the radial profile [INTEGER]
"""
if not mpi.Rank:
print(" > Computing metallicity profiles", flush=True)
# Find hot, non-star forming (density cut) gas
idx = np.where((self.temp > 1.0e6) & (self.rho * 0.752 / ct.mp_g < 0.1))[0]
# Gas metallicity profile
sp_wgt = np.histogram(
self.rad[idx],
bins=self.bins,
weights=(self.rho[idx] * (ct.Mpc_cm ** 3.0 / ct.Msun_g) ** 2.0)
* (self.temp[idx] ** 0.5),
)[0]
self.Zgas = np.histogram(
self.rad[idx],
bins=self.bins,
weights=(self.rho[idx] * (ct.Mpc_cm ** 3.0 / ct.Msun_g) ** 2.0)
* (self.temp[idx] ** 0.5)
* self.zmet[idx],
)[0]
edx = np.where(sp_wgt > 0.0)[0]
self.Zgas[edx] /= sp_wgt[edx]
del sp_wgt, edx
# Stellar metallicity profile
smass = np.histogram(self.STrad, bins=self.bins, weights=self.STmass)[0]
self.Zstar = np.histogram(
self.STrad, bins=self.bins, weights=self.STmass * self.STzmet
)[0]
idx = np.where(smass > 0.0)[0]
self.Zstar[idx] /= smass[idx]
del smass, idx
return
def compute_observable_properties(self, mpi, Emin=0.5, Emax=2.0, Enorm=65.0):
"""
Compute the X-ray luminossity and SZ signal - try do everything in place
Arguments:
-mpi : An instance of the mpi class
-Emin : Minimum value in the X-ray band [FLOAT]
-Emax : Maximum value in the X-ray band [FLOAT]
-Enorm : Emission measure normalization value [FLOAT]
"""
if not mpi.Rank:
print(" > Computing observable properties", flush=True)
self.Enorm = Enorm
# We only use "hot" particles that are non-star-forming
hdx = np.where((self.temp >= 1.0e6) & (self.rho * 0.752 / ct.mp_g < 0.1))[0]
# --- Compton-y signal
Ysz = (
(ct.sigT / (ct.me_keV * ct.kev_2_erg))
* ct.kB_erg_K
* self.temp[hdx]
* (self.mass[hdx] * 0.752 * self.ne_nh[hdx] / ct.mp_g)
/ ct.Mpc_cm ** 2.0
)
# Ysz profile
self.Ysz_pro = np.histogram(self.rad[hdx], bins=self.bins, weights=Ysz)[0]
# Ysz aperture values
self.Ysz_500 = Ysz[self.rad[hdx] <= self.R500 / self.R200].sum()
self.Ysz_200 = Ysz[self.rad[hdx] <= 1.0].sum()
self.Ysz_5r500 = Ysz[self.rad[hdx] <= 5.0 * self.R500 / self.R200].sum()
del Ysz
# --- X-ray luminosity
self._read_APEC_table()
itemp = _locate(self.APEC_temperatures, np.log10(self.temp[hdx]))
ne2dV = (self.mass[hdx] / self.rho[hdx]) * (
self.ne_nh[hdx] * 0.76 * self.rho[hdx] / ct.mp_g
) ** 2.0
Xspec = np.zeros((len(itemp), len(self.APEC_energies)), dtype=np.float)
Xspec += self.APEC_H[itemp]
Xspec += self.APEC_He[itemp]
Xspec += (self.APEC_C[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_N[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_O[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_Ne[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_Mg[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_Si[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_S[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_Ca[itemp].T * self.zmet[hdx]).T
Xspec += (self.APEC_Fe[itemp].T * self.zmet[hdx]).T
Xspec = (Xspec.T * ne2dV).T
del itemp
# We want a soft-band luminosity
edx = np.where((self.APEC_energies >= Emin) & (self.APEC_energies <= Emax))[0]
Lx_sft = Xspec[:, edx].sum(axis=-1)
# Soft-band Lx profile
self.Lx_pro = np.histogram(self.rad[hdx], bins=self.bins, weights=Lx_sft)[0]
# Lx aperture values
self.Lx_500 = Lx_sft[self.rad[hdx] <= self.R500 / self.R200].sum()
self.Lx_500ce = Lx_sft[
(self.rad[hdx] > 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
].sum()
self.Lx_200 = Lx_sft[self.rad[hdx] <= 1.0].sum()
del edx, Lx_sft
# --- X-ray temperature
self._calculate_photon_conversion_factor()
idx = np.digitize(self.rad[hdx], self.bins) - 1
self.Tx_pro = np.zeros(self.Nbins, dtype=np.float)
for j in np.unique(idx):
# X-ray spectrum in annulus
Xspec_ann = Xspec[idx == j].sum(axis=0) * self.photon_conv
# Initial fit guesses - temperature, density, metallicity
T = np.log10(
np.sum((self.mass[hdx] * self.temp[hdx])[idx == j])
/ np.sum((self.mass[hdx])[idx == j])
)
D = (
np.log10(
np.sum((self.mass[hdx] * ne2dV)[idx == j])
/ np.sum((self.mass[hdx])[idx == j])
)
- self.Enorm
)
Z = np.sum((self.mass[hdx] * self.zmet[hdx])[idx == j]) / np.sum(
(self.mass[hdx])[idx == j]
)
# Fit spectrum for temperature
try:
limits = ([T - 0.5, -6.0, 1.0e-6], [T + 0.5, 6.0, 10.0])
fit = least_squares(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_LS"),
bounds=limits,
method="trf",
)
except:
limits = [(T - 0.5, T + 0.5), (-6.0, 6.0), (1.0e-6, 10.0)]
fit = minimize(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_MN"),
bounds=limits,
method="TNC",
options={"maxiter": 200},
)
self.Tx_pro[j] = (ct.kB_erg_K / ct.kev_2_erg) * 10.0 ** fit.x[0]
# --- Tx apertures
# R500
Xspec_ann = (
Xspec[self.rad[hdx] <= self.R500 / self.R200].sum(axis=0) * self.photon_conv
)
T = np.log10(
np.sum(
(self.mass[hdx] * self.temp[hdx])[
self.rad[hdx] <= self.R500 / self.R200
]
)
/ np.sum((self.mass[hdx])[self.rad[hdx] <= self.R500 / self.R200])
)
D = (
np.log10(
np.sum((self.mass[hdx] * ne2dV)[self.rad[hdx] <= self.R500 / self.R200])
/ np.sum((self.mass[hdx])[self.rad[hdx] <= self.R500 / self.R200])
)
- self.Enorm
)
Z = np.sum(
(self.mass[hdx] * self.zmet[hdx])[self.rad[hdx] <= self.R500 / self.R200]
) / np.sum((self.mass[hdx])[self.rad[hdx] <= self.R500 / self.R200])
try:
limits = ([T - 0.5, -6.0, 1.0e-6], [T + 0.5, 6.0, 10.0])
fit = least_squares(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_LS"),
bounds=limits,
method="trf",
)
except:
limits = [(T - 0.5, T + 0.5), (-6.0, 6.0), (1.0e-6, 10.0)]
fit = minimize(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_MN"),
bounds=limits,
method="TNC",
options={"maxiter": 200},
)
self.Tx_500 = (ct.kB_erg_K / ct.kev_2_erg) * 10.0 ** fit.x[0]
# R500 CE
Xspec_ann = (
Xspec[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
].sum(axis=0)
* self.photon_conv
)
T = np.log10(
np.sum(
(self.mass[hdx] * self.temp[hdx])[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
)
/ np.sum(
(self.mass[hdx])[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
)
)
D = (
np.log10(
np.sum(
(self.mass[hdx] * ne2dV)[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
)
/ np.sum(
(self.mass[hdx])[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
)
)
- self.Enorm
)
Z = np.sum(
(self.mass[hdx] * self.zmet[hdx])[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
) / np.sum(
(self.mass[hdx])[
(self.rad[hdx] <= 0.15 * self.R500 / self.R200)
& (self.rad[hdx] <= self.R500 / self.R200)
]
)
try:
limits = ([T - 0.5, -6.0, 1.0e-6], [T + 0.5, 6.0, 10.0])
fit = least_squares(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_LS"),
bounds=limits,
method="trf",
)
except:
limits = [(T - 0.5, T + 0.5), (-6.0, 6.0), (1.0e-6, 10.0)]
fit = minimize(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_MN"),
bounds=limits,
method="TNC",
options={"maxiter": 200},
)
self.Tx_500ce = (ct.kB_erg_K / ct.kev_2_erg) * 10.0 ** fit.x[0]
# R200
Xspec_ann = Xspec[self.rad[hdx] <= 1.0].sum(axis=0) * self.photon_conv
T = np.log10(
np.sum((self.mass[hdx] * self.temp[hdx])[self.rad[hdx] <= 1.0])
/ np.sum((self.mass[hdx])[self.rad[hdx] <= 1.0])
)
D = (
np.log10(
np.sum((self.mass[hdx] * ne2dV)[self.rad[hdx] <= 1.0])
/ np.sum((self.mass[hdx])[self.rad[hdx] <= 1.0])
)
- self.Enorm
)
Z = np.sum((self.mass[hdx] * self.zmet[hdx])[self.rad[hdx] <= 1.0]) / np.sum(
(self.mass[hdx])[self.rad[hdx] <= 1.0]
)
try:
limits = ([T - 0.5, -6.0, 1.0e-6], [T + 0.5, 6.0, 10.0])
fit = least_squares(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_LS"),
bounds=limits,
method="trf",
)
except:
limits = [(T - 0.5, T + 0.5), (-6.0, 6.0), (1.0e-6, 10.0)]
fit = minimize(
self._spectrum_model,
[T, D, Z],
args=(Xspec_ann, "FIT_MN"),
bounds=limits,
method="TNC",
options={"maxiter": 200},
)
self.Tx_200 = (ct.kB_erg_K / ct.kev_2_erg) * 10.0 ** fit.x[0]
del Xspec, ne2dV
return
def compute_centre_of_mass_offset(self, mpi):
"""
Compute the centre of mass offset inside Rvir
Arguments:
-mpi : An instance of the mpi class
"""
if not mpi.Rank:
print(" > Computing centre of mass offset", flush=True)
# Initialise
self.Xoff = 0.0
gmass = 0.0
dmass = 0.0
smass = 0.0
# Gas
if "rad" in self.__dict__.keys():
idx = np.where(self.rad * self.R200 <= self.Rvir)[0]
self.Xoff += np.sum(self.mass[idx] * self.pos[idx].T, axis=-1)
gmass = np.sum(self.mass[idx])
# DM
if "DMrad" in self.__dict__.keys():
idx =
|
np.where(self.DMrad * self.R200 <= self.Rvir)
|
numpy.where
|
#!/users/hoi-tim.cheung/.conda/envs/dmm/bin/python
from figaro.mixture import DPGMM
import numpy as np
import dill
import os
import sys
cdir = os.path.dirname(os.path.dirname(os.path.dirname(sys.path[0])))
np.random.seed(0)
import argparse
parser = argparse.ArgumentParser(description='Generate population and posterior samples.')
parser.add_argument('--N',type=int,help='number of events in the catalog',default=1000000)
args = parser.parse_args()
N = int(args.N) # Sunber of events
filename = cdir + '/Mock_Data/lensed_posterior{:.0f}.npz'.format(N)
data = np.load(filename)
l1m1 = data['m1p1']
l1m2 = data['m2p1']
l2m1 = data['m1p2']
l2m2 = data['m2p2']
l1z = data['zp1']
l2z = data['zp2']
mmin = min(np.min(l1m1),np.min(l1m2),np.min(l2m1),np.min(l2m2)) - 1.0
mmax = max(np.max(l1m1),
|
np.max(l1m2)
|
numpy.max
|
import json
import numpy as np
from matplotlib import pyplot as plt
from pathlib import Path
mainPath = Path('/yourpathhere/')
folderPath = mainPath.joinpath('img_results_2019-12-12_23-17_mnist_experiments')
filePath = folderPath.joinpath('losses_and_nfes.json')
with open(filePath) as f:
d = json.load(f)
print(d)
dicAnode5 = d[0]
dicAnode50 = d[1]
dicAnode100 = d[2]
dicNode = d[3]
accuracyAnode5 = np.mean(np.array(dicAnode5['epoch_accuracy_history']), axis=0)
nfeAnode5 = np.mean(np.array(dicAnode5['epoch_nfe_history']), axis=0)
lossAnode5 = np.mean(np.array(dicAnode5['epoch_loss_history']), axis=0)
accuracyAnode50 = np.mean(np.array(dicAnode50['epoch_accuracy_history']), axis=0)
nfeAnode50 = np.mean(np.array(dicAnode50['epoch_nfe_history']), axis=0)
lossAnode50 = np.mean(np.array(dicAnode50['epoch_loss_history']), axis=0)
accuracyAnode100 = np.mean(np.array(dicAnode100['epoch_accuracy_history']), axis=0)
nfeAnode100 = np.mean(np.array(dicAnode100['epoch_nfe_history']), axis=0)
lossAnode100 = np.mean(np.array(dicAnode100['epoch_loss_history']), axis=0)
accuracyNode = np.mean(np.array(dicNode['epoch_accuracy_history']), axis=0)
nfeNode = np.mean(np.array(dicNode['epoch_nfe_history']), axis=0)
lossNode = np.mean(np.array(dicNode['epoch_loss_history']), axis=0)
epochs = np.arange(1, len(np.squeeze(accuracyAnode5))+1)
# recreating figures from the json files saved by the experimental runs - 5 augmented Dimensions
fig1, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3.5))
ax1.plot(epochs,
|
np.squeeze(accuracyNode)
|
numpy.squeeze
|
import sys
import numpy as np
from time import clock
from masterboard_utils import CalibrationController, mbs, GotoController
class RobotHAL():
''' This class provide a robot specific access to the solo generic hardware'''
def __init__(self, interfaceName="eth0", dt=0.001):
self.isInitialized = False
self.InitRobotSpecificParameters()
assert len(self.motorToUrdf) == self.nb_motors
assert len(self.motorSign) == self.nb_motors
assert self.nb_motors % 2 == 0
# TODO assert mapping..
assert self.maximumCurrent >= 0
# decide how to search for the index, given that we are close to the real zero
searchStrategy = self.nb_motors * [CalibrationController.ALTERNATIVE]
# to be in [-2pi;+2pi]
self.encoderOffsets = np.fmod(self.encoderOffsets, 2*np.pi)
# to be in [-pi;+pi]
self.encoderOffsets[self.encoderOffsets > +np.pi] -= 2*np.pi
self.encoderOffsets[self.encoderOffsets < -np.pi] += 2*np.pi
for i in range(self.nb_motors):
if (self.encoderOffsets[i] > (np.pi/2.0)):
searchStrategy[i] = CalibrationController.POSITIVE
elif (self.encoderOffsets[i] < - (np.pi/2.0)):
searchStrategy[i] = CalibrationController.NEGATIVE
print(searchStrategy)
print(self.encoderOffsets)
self.t = 0
self.cpt = 0
self.last = 0
self.dt = dt
self.nb_motorDrivers = int(self.nb_motors/2)
self.gearRatioSigned = np.zeros(8)
self.gearRatioSigned = self.motorSign * self.gearRatio
self.jointKtSigned = self.motorKt * self.gearRatioSigned # Nm(joint)/A(motor)
self.q_mes = np.zeros(self.nb_motors)
self.v_mes = np.zeros(self.nb_motors)
self.torquesFromCurrentMeasurment = np.zeros(self.nb_motors)
self.baseAngularVelocity = np.zeros(3)
self.baseOrientation = np.array([0., 0., 0., 1.])
self.baseLinearAcceleration = np.zeros(3)
self.hardware = mbs.MasterBoardInterface(interfaceName)
self.calibCtrl = CalibrationController(self.hardware, self.nb_motors, self.dt, Kd=0.01, Kp=3.0 ,searchStrategy=searchStrategy)
self.gotoCtrl = GotoController(self.hardware, self.nb_motors, self.dt, Kd=0.01, Kp=3.0)
def InitRobotSpecificParameters(self):
'''
This function initialises all robot specific parameters
This function **must** be overloaded in a child class for different robots
'''
raise RuntimeError("This class is an abstract class. Please overload this method.")
def Init(self,calibrateEncoders=False, q_init=None):
# Initialization of the interface between the computer and the master board and the master board itself
self.hardware.Init()
self.EnableAllMotors()
self.isInitialized = True
self.InitMasterBoard() # Initialization of the master board
if not self.AreAllDriversConnected():
self.hardware.Stop()
raise RuntimeError("Not all declared motor drivers are connected.") # TODO replace RuntimeError by custom exception
if q_init is not None:
# Define initial configuration after calibration
assert (max(abs(q_init))<2*np.pi)
# Convert to motor angle:
motor_angle_init = np.zeros(self.nb_motors)
for i in range(self.nb_motors):
motor_angle_init[i] = q_init[self.motorToUrdf[i]] * self.gearRatioSigned[i]
# Change the target of the controller
self.gotoCtrl.FinalPosition = motor_angle_init
self.T_move = 3.
if calibrateEncoders:
for i in range(self.nb_motors):
self.hardware.GetMotor(i).SetPositionOffset(self.encoderOffsets[i])
self.hardware.GetMotor(i).enable_index_offset_compensation = True
print("Running calibration...")
self.RunHommingRoutine()
print("End Of Calibration")
@staticmethod
def EulerToQuaternion(roll_pitch_yaw):
roll, pitch, yaw = roll_pitch_yaw
sr =
|
np.sin(roll/2.)
|
numpy.sin
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
@timer
def test_cat_patches():
# Test the different ways to set patches in the catalog.
# Use the same input as test_radec()
if __name__ == '__main__':
ngal = 10000
npatch = 128
max_top = 7
else:
ngal = 1000
npatch = 8
max_top = 3
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
# cat0 is the base catalog without patches
cat0 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
assert cat0.npatch == 1
assert len(cat0.patches) == 1
assert cat0.patches[0].ntot == ngal
assert cat0.patches[0].npatch == 1
# 1. Make the patches automatically using kmeans
# Note: If npatch is a power of two, then the patch determination is completely
# deterministic, which is helpful for this test.
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
p2, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch)
np.testing.assert_array_equal(cat1.patch, p2)
assert cat1.npatch == npatch
assert len(cat1.patches) == npatch
assert np.sum([p.ntot for p in cat1.patches]) == ngal
assert all([c.npatch == npatch for c in cat1.patches])
# 2. Optionally can use alt algorithm
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt=True)
p3, cen = cat0.getNField(max_top=max_top).run_kmeans(npatch, alt=True)
np.testing.assert_array_equal(cat2.patch, p3)
assert cat2.npatch == npatch
assert len(cat2.patches) == npatch
assert all([c.npatch == npatch for c in cat2.patches])
# 3. Optionally can set different init method
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='kmeans++')
# Can't test this equalling a repeat run from cat0, because kmpp has a random aspect to it.
# But at least check that it isn't equal to the other two versions.
assert not np.array_equal(cat3.patch, p2)
assert not np.array_equal(cat3.patch, p3)
assert cat3.npatch == npatch
assert all([c.npatch == npatch for c in cat3.patches])
cat3b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='random')
assert not np.array_equal(cat3b.patch, p2)
assert not np.array_equal(cat3b.patch, p3)
assert not np.array_equal(cat3b.patch, cat3.patch)
# 4. Pass in patch array explicitly
cat4 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2)
np.testing.assert_array_equal(cat4.patch, p2)
assert cat4.npatch == npatch
assert all([c.npatch == npatch for c in cat4.patches])
# 5. Read patch from a column in ASCII file
file_name5 = os.path.join('output','test_cat_patches.dat')
cat4.write(file_name5)
cat5 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5.loaded
np.testing.assert_array_equal(cat5.patch, p2)
assert cat5.loaded # Now it's loaded, since we accessed cat5.patch.
assert cat5.npatch == npatch
assert all([c.npatch == npatch for c in cat5.patches])
# Just load a single patch from an ASCII file with many patches.
for i in range(npatch):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch == i
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
cata = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, last_row=ngal//2)
catb = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i, first_row=ngal//2+1)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat5.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat5.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat5.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat5.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# Patches start in an unloaded state (by default)
cat5b = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3)
assert not cat5b.loaded
cat5b_patches = cat5b.get_patches(low_mem=True)
assert cat5b.loaded # Needed to load to get number of patches.
cat5b._patches = None # Need this so get_patches doesn't early exit.
cat5b_patches2 = cat5b.get_patches(low_mem=True) # Repeat with loaded cat5b should be equiv.
cat5b._patches = None
cat5b_patches3 = cat5b.get_patches(low_mem=False)
cat5b._patches = None
cat5b_patches4 = cat5b.get_patches() # Default is False
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat5b_patches[i].loaded # But single patch not loaded yet.
assert not cat5b_patches2[i].loaded
assert cat5b_patches3[i].loaded # Unless we didn't ask for low memory.
assert cat5b_patches4[i].loaded
assert np.all(cat5b_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat5b_patches[i].x, cat5.x[cat5.patch == i])
# Just load a single patch from an ASCII file with many patches.
for i in range(4):
cat = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=3, patch=i)
assert cat.patch == cat5.patches[i].patch
np.testing.assert_array_equal(cat.x,cat5.patches[i].x)
np.testing.assert_array_equal(cat.y,cat5.patches[i].y)
assert cat == cat5.patches[i]
assert cat == cat5b_patches[i]
# 6. Read patch from a column in FITS file
file_name6 = os.path.join('output','test_cat_patches.fits')
cat4.write(file_name6)
cat6 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
np.testing.assert_array_equal(cat6.patch, p2)
cat6b = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_ext=1)
np.testing.assert_array_equal(cat6b.patch, p2)
assert len(cat6.patches) == npatch
assert len(cat6b.patches) == npatch
# Calling get_patches will not force loading of the file.
cat6c = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch')
assert not cat6c.loaded
cat6c_patches = cat6c.get_patches(low_mem=True)
assert cat6c.loaded
cat6c._patches = None
cat6c_patches2 = cat6c.get_patches(low_mem=True)
cat6c._patches = None
cat6c_patches3 = cat6c.get_patches(low_mem=False)
cat6c._patches = None
cat6c_patches4 = cat6c.get_patches()
for i in range(4):
assert not cat6c_patches[i].loaded
assert not cat6c_patches2[i].loaded
assert cat6c_patches3[i].loaded
assert cat6c_patches4[i].loaded
assert np.all(cat6c_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat6c_patches[i].x, cat6.x[cat6.patch == i])
cat = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cat.patch == cat6.patches[i].patch
np.testing.assert_array_equal(cat.x,cat6.patches[i].x)
np.testing.assert_array_equal(cat.y,cat6.patches[i].y)
assert cat == cat6.patches[i]
assert cat == cat6c_patches[i]
cata = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', last_row=ngal//2,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
catb = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec', first_row=ngal//2+1,
ra_units='rad', dec_units='rad', patch_col='patch', patch=i)
assert cata.patch == i
np.testing.assert_array_equal(cata.x,cat6.patches[i].x[:cata.nobj])
np.testing.assert_array_equal(cata.y,cat6.patches[i].y[:cata.nobj])
np.testing.assert_array_equal(catb.x,cat6.patches[i].x[cata.nobj:])
np.testing.assert_array_equal(catb.y,cat6.patches[i].y[cata.nobj:])
# get_patches from a single patch will return a list with just itself.
assert cata.get_patches(False) == [cata]
assert catb.get_patches(True) == [catb]
# 7. Set a single patch number
cat7 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=3)
np.testing.assert_array_equal(cat7.patch, 3)
cat8 = treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch=3)
np.testing.assert_array_equal(cat8.patch, 3)
# low_mem=True works if not from a file, but it's not any different
cat1_patches = cat1.patches
cat1._patches = None
assert cat1.get_patches(low_mem=True) == cat1_patches
cat2_patches = cat2.patches
cat2._patches = None
assert cat2.get_patches(low_mem=True) == cat2_patches
cat9 = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad')
cat9_patches = cat9.patches
cat9._patches = None
assert cat9.get_patches(low_mem=True) == cat9_patches
# Check serialization with patch
do_pickle(cat2)
do_pickle(cat7)
# Check some invalid parameters
# npatch if given must be compatible with patch
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=3, patch=p2)
# Note: npatch larger than what is in patch is ok.
# It indicates that this is part of a larger group with more patches.
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=300, patch=p2)
# patch has to have same number of entries
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=p2[:17])
# npatch=0 is not allowed
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=0)
# bad option names
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_init='invalid')
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch,
kmeans_alt='maybe')
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col='invalid')
# bad patch col
with assert_raises(ValueError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch_col=4)
# cannot give vector for patch when others are from file name
# (Should this be revisited? Allow this?)
with assert_raises(TypeError):
treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad',
patch=p2)
# bad patch ext
with assert_raises(IOError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patch', patch_ext=2)
# bad patch col name for fits
with assert_raises(ValueError):
treecorr.Catalog(file_name6, ra_col='ra', dec_col='dec',
ra_units='rad', dec_units='rad', patch_col='patches')
@timer
def test_cat_centers():
# Test writing patch centers and setting patches from centers.
if __name__ == '__main__':
ngal = 100000
npatch = 128
else:
ngal = 1000
npatch = 8
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat1 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', npatch=npatch)
centers = [(c.x.mean(), c.y.mean(), c.z.mean()) for c in cat1.patches]
centers /= np.sqrt(np.sum(np.array(centers)**2,axis=1))[:,np.newaxis]
centers2 = cat1.patch_centers
print('center0 = ',centers[0])
print(' ',centers2[0])
print('center1 = ',centers[1])
print(' ',centers2[1])
print('max center difference = ',np.max(np.abs(centers2-centers)))
for p in range(npatch):
np.testing.assert_allclose(centers2[p], centers[p], atol=1.e-4)
centers3 = cat1.get_patch_centers()
for p in range(npatch):
np.testing.assert_allclose(centers3[p], centers2[p])
# Write the centers to a file
cen_file = os.path.join('output','test_cat_centers.dat')
cat1.write_patch_centers(cen_file)
# Read the centers file
centers3 = cat1.read_patch_centers(cen_file)
np.testing.assert_allclose(centers3, centers2)
# Set patches from a centers dict
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=centers2)
np.testing.assert_array_equal(cat2.patch, cat1.patch)
np.testing.assert_array_equal(cat2.patch_centers, centers2)
# Set patches from file
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file)
np.testing.assert_array_equal(cat3.patch, cat1.patch)
np.testing.assert_array_equal(cat3.patch_centers, centers2)
# If doing this from a config dict, patch_centers will be found in the config dict.
config = dict(ra_units='rad', dec_units='rad', patch_centers=cen_file)
cat4 = treecorr.Catalog(config=config, ra=ra, dec=dec)
np.testing.assert_array_equal(cat4.patch, cat1.patch)
np.testing.assert_array_equal(cat4.patch_centers, centers2)
# If the original catalog had manual patches set, it needs to calculate the centers
# after the fact, so things aren't perfect, but should be close.
cat5 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=cat1.patch)
np.testing.assert_array_equal(cat5.patch, cat1.patch)
np.testing.assert_allclose(cat5.patch_centers, centers2, atol=1.e-4)
cat6 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat5.patch_centers)
print('n diff = ',np.sum(cat6.patch != cat5.patch))
assert np.sum(cat6.patch != cat5.patch) < 10
np.testing.assert_allclose(cat6.patch_centers, cat5.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen5 = [c.patch_centers[0] for c in cat5.patches]
np.testing.assert_array_equal(cen5, cat5.patch_centers)
# With weights, things can be a bit farther off of course.
w=rng.uniform(1,2,len(ra))
cat7 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch=cat1.patch)
cat8 = treecorr.Catalog(ra=ra, dec=dec, w=w, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
print('n diff = ',np.sum(cat8.patch != cat7.patch))
assert np.sum(cat8.patch != cat7.patch) < 200
np.testing.assert_allclose(cat8.patch_centers, cat7.patch_centers)
# But given the same patch centers, the weight doesn't change the assigned patches.
cat8b = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat7.patch_centers)
np.testing.assert_array_equal(cat8.patch, cat8b.patch)
np.testing.assert_array_equal(cat8.patch_centers, cat8b.patch_centers)
# Check flat
cat9 = treecorr.Catalog(x=x, y=y, npatch=npatch)
cen_file2 = os.path.join('output','test_cat_centers.txt')
cat9.write_patch_centers(cen_file2)
centers9 = cat9.read_patch_centers(cen_file2)
np.testing.assert_allclose(centers9, cat9.patch_centers)
cat10 = treecorr.Catalog(x=x, y=y, patch_centers=cen_file2)
np.testing.assert_array_equal(cat10.patch, cat9.patch)
np.testing.assert_array_equal(cat10.patch_centers, cat9.patch_centers)
cat11 = treecorr.Catalog(x=x, y=y, patch=cat9.patch)
cat12 = treecorr.Catalog(x=x, y=y, patch_centers=cat11.patch_centers)
print('n diff = ',np.sum(cat12.patch != cat11.patch))
assert np.sum(cat12.patch != cat11.patch) < 10
cat13 = treecorr.Catalog(x=x, y=y, w=w, patch=cat9.patch)
cat14 = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers)
print('n diff = ',np.sum(cat14.patch != cat13.patch))
assert np.sum(cat14.patch != cat13.patch) < 200
np.testing.assert_array_equal(cat14.patch_centers, cat13.patch_centers)
# The patch centers from the patch sub-catalogs should match.
cen13 = [c.patch_centers[0] for c in cat13.patches]
np.testing.assert_array_equal(cen13, cat13.patch_centers)
# Using the full patch centers, you can also just load a single patch.
for i in range(npatch):
cat = treecorr.Catalog(x=x, y=y, w=w, patch_centers=cat13.patch_centers, patch=i)
assert cat.patch == cat14.patches[i].patch
np.testing.assert_array_equal(cat.x,cat14.patches[i].x)
np.testing.assert_array_equal(cat.y,cat14.patches[i].y)
assert cat == cat14.patches[i]
# Loading from a file with patch_centers can mean that get_patches won't trigger a load.
file_name15 = os.path.join('output','test_cat_centers_f15.dat')
cat14.write(file_name15)
cat15 = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat14.patch_centers)
assert not cat15.loaded
cat15_patches = cat15.get_patches(low_mem=True)
assert not cat15.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat15_patches[i].loaded
assert np.all(cat15_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat15_patches[i].x, cat15.x[cat15.patch == i])
cat = treecorr.Catalog(file_name15, x_col=1, y_col=2, w_col=3,
patch_centers=cat15.patch_centers, patch=i)
assert cat.patch == cat15.patches[i].patch
np.testing.assert_array_equal(cat.x,cat15_patches[i].x)
np.testing.assert_array_equal(cat.y,cat15_patches[i].y)
assert cat == cat15_patches[i]
assert cat == cat15.patches[i]
# Check fits
file_name17 = os.path.join('output','test_cat_centers.fits')
cat8.write(file_name17)
cat17 = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers)
assert not cat17.loaded
cat17_patches = cat17.get_patches(low_mem=True)
assert not cat17.loaded # Unlike above (in test_cat_patches) it's still unloaded.
for i in range(4): # Don't bother with all the patches. 4 suffices to check this.
assert not cat17_patches[i].loaded
assert np.all(cat17_patches[i].patch == i) # Triggers load of patch.
np.testing.assert_array_equal(cat17_patches[i].ra, cat17.ra[cat17.patch == i])
cat = treecorr.Catalog(file_name17, ra_col='ra', dec_col='dec', w_col='w',
ra_units='rad', dec_units='rad',
patch_centers=cat8.patch_centers, patch=i)
assert cat.patch == cat17.patches[i].patch
np.testing.assert_array_equal(cat.ra,cat17_patches[i].ra)
np.testing.assert_array_equal(cat.dec,cat17_patches[i].dec)
assert cat == cat17_patches[i]
assert cat == cat17.patches[i]
# Check for some invalid values
# npatch if given must be compatible with patch_centers
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, npatch=3)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, npatch=13)
# Can't have both patch_centers and another patch specification
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch=np.ones_like(ra))
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file, patch_col=3)
# patch_centers is wrong shape
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cen_file2)
with assert_raises(ValueError):
treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch_centers=cat9.patch_centers)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cen_file)
with assert_raises(ValueError):
treecorr.Catalog(x=x, y=y, patch_centers=cat1.patch_centers)
# Missing some patch numbers
with assert_raises(RuntimeError):
c=treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad',
patch=np.random.uniform(10,20,len(ra)))
c.get_patch_centers()
def generate_shear_field(nside, rng=None):
if rng is None:
rng = np.random.RandomState()
# Generate a random shear field with a well-defined power spectrum.
# It generates shears on a grid nside x nside, and returns, x, y, g1, g2
kvals = np.fft.fftfreq(nside) * 2*np.pi
kx,ky = np.meshgrid(kvals,kvals)
k = kx + 1j*ky
ksq = kx**2 + ky**2
# Use a power spectrum with lots of large scale power.
# The rms shape ends up around 0.2 and min/max are around +-1.
# Having a lot more large-scale than small-scale power means that sample variance is
# very important, so the shot noise estimate of the variance is particularly bad.
Pk = 1.e4 * ksq / (1. + 300.*ksq)**2
# Make complex gaussian field in k-space.
f1 = rng.normal(size=Pk.shape)
f2 = rng.normal(size=Pk.shape)
f = (f1 + 1j*f2) * np.sqrt(0.5)
# Make f Hermitian, to correspond to E-mode-only field.
# Hermitian means f(-k) = conj(f(k)).
# Note: this is approximate. It doesn't get all the k=0 and k=nside/2 correct.
# But this is good enough for xi- to be not close to zero.
ikxp = slice(1,(nside+1)//2) # kx > 0
ikxn = slice(-1,nside//2,-1) # kx < 0
ikyp = slice(1,(nside+1)//2) # ky > 0
ikyn = slice(-1,nside//2,-1) # ky < 0
f[ikyp,ikxn] = np.conj(f[ikyn,ikxp])
f[ikyn,ikxn] = np.conj(f[ikyp,ikxp])
# Multiply by the power spectrum to get a realization of a field with this P(k)
f *= Pk
# Inverse fft gives the real-space field.
kappa = nside * np.fft.ifft2(f)
# Multiply by exp(2iphi) to get gamma field, rather than kappa.
ksq[0,0] = 1. # Avoid division by zero
exp2iphi = k**2 / ksq
f *= exp2iphi
gamma = nside * np.fft.ifft2(f)
# Generate x,y values for the real-space field
x,y = np.meshgrid(np.linspace(0.,1000.,nside), np.linspace(0.,1000.,nside))
x = x.ravel()
y = y.ravel()
gamma = gamma.ravel()
kappa = np.real(kappa.ravel())
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_gg_jk():
# Test the variance estimate for GG correlation with jackknife (and other) error estimate.
if __name__ == '__main__':
# 1000 x 1000, so 10^6 points. With jackknifing, that gives 10^4 per region.
nside = 1000
npatch = 64
tol_factor = 1
else:
# Use ~1/10 of the objects when running unit tests
nside = 200
npatch = 16
tol_factor = 8
# The full simulation needs to run a lot of times to get a good estimate of the variance,
# but this takes a long time. So we store the results in the repo.
# To redo the simulation, just delete the file data/test_gg_jk.fits
file_name = 'data/test_gg_jk_{}.npz'.format(nside)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_ggs = []
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nside)
print(run,': ',np.mean(g1),np.std(g1),np.min(g1),np.max(g1))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
gg.process(cat)
all_ggs.append(gg)
mean_xip = np.mean([gg.xip for gg in all_ggs], axis=0)
var_xip = np.var([gg.xip for gg in all_ggs], axis=0)
mean_xim = np.mean([gg.xim for gg in all_ggs], axis=0)
var_xim = np.var([gg.xim for gg in all_ggs], axis=0)
mean_varxip = np.mean([gg.varxip for gg in all_ggs], axis=0)
mean_varxim = np.mean([gg.varxim for gg in all_ggs], axis=0)
np.savez(file_name,
mean_xip=mean_xip, mean_xim=mean_xim,
var_xip=var_xip, var_xim=var_xim,
mean_varxip=mean_varxip, mean_varxim=mean_varxim)
data = np.load(file_name)
mean_xip = data['mean_xip']
mean_xim = data['mean_xim']
var_xip = data['var_xip']
var_xim = data['var_xim']
mean_varxip = data['mean_varxip']
mean_varxim = data['mean_varxim']
print('mean_xip = ',mean_xip)
print('mean_xim = ',mean_xim)
print('mean_varxip = ',mean_varxip)
print('mean_varxim = ',mean_varxim)
print('var_xip = ',var_xip)
print('ratio = ',var_xip / mean_varxip)
print('var_xim = ',var_xim)
print('ratio = ',var_xim / mean_varxim)
rng = np.random.RandomState(1234)
# First run with the normal variance estimate, which is too small.
x, y, g1, g2, _ = generate_shear_field(nside, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg1 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50.)
t0 = time.time()
gg1.process(cat)
t1 = time.time()
print('Time for non-patch processing = ',t1-t0)
# Quick gratuitous coverage test:
assert '_ok' not in gg1.__dict__
assert 'lazy_property' in str(treecorr.GGCorrelation._ok)
gg1._ok
assert '_ok' in gg1.__dict__
print('weight = ',gg1.weight)
print('xip = ',gg1.xip)
print('xim = ',gg1.xim)
print('varxip = ',gg1.varxip)
print('varxim = ',gg1.varxim)
print('pullsq for xip = ',(gg1.xip-mean_xip)**2/var_xip)
print('pullsq for xim = ',(gg1.xim-mean_xim)**2/var_xim)
print('max pull for xip = ',np.sqrt(np.max((gg1.xip-mean_xip)**2/var_xip)))
print('max pull for xim = ',np.sqrt(np.max((gg1.xim-mean_xim)**2/var_xim)))
np.testing.assert_array_less((gg1.xip - mean_xip)**2/var_xip, 25) # within 5 sigma
np.testing.assert_array_less((gg1.xim - mean_xim)**2/var_xim, 25)
np.testing.assert_allclose(gg1.varxip, mean_varxip, rtol=0.03 * tol_factor)
np.testing.assert_allclose(gg1.varxim, mean_varxim, rtol=0.03 * tol_factor)
# The naive error estimates only includes shape noise, so it is an underestimate of
# the full variance, which includes sample variance.
np.testing.assert_array_less(mean_varxip, var_xip)
np.testing.assert_array_less(mean_varxim, var_xim)
np.testing.assert_array_less(gg1.varxip, var_xip)
np.testing.assert_array_less(gg1.varxim, var_xim)
# Now run with patches, but still with shot variance. Should be basically the same answer.
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
gg2 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='shot',
rng=rng)
t0 = time.time()
gg2.process(cat)
t1 = time.time()
print('Time for shot processing = ',t1-t0)
print('weight = ',gg2.weight)
print('xip = ',gg2.xip)
print('xim = ',gg2.xim)
print('varxip = ',gg2.varxip)
print('varxim = ',gg2.varxim)
np.testing.assert_allclose(gg2.weight, gg1.weight, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xip, gg1.xip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.xim, gg1.xim, rtol=3.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxip, gg1.varxip, rtol=1.e-2*tol_factor)
np.testing.assert_allclose(gg2.varxim, gg1.varxim, rtol=1.e-2*tol_factor)
# Can get this as a (diagonal) covariance matrix using estimate_cov
np.testing.assert_allclose(gg2.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# Now run with jackknife variance estimate. Should be much better.
gg3 = treecorr.GGCorrelation(bin_size=0.3, min_sep=10., max_sep=50., var_method='jackknife',
rng=rng)
t0 = time.time()
gg3.process(cat)
t1 = time.time()
print('Time for jackknife processing = ',t1-t0)
print('xip = ',gg3.xip)
print('xim = ',gg3.xim)
print('varxip = ',gg3.varxip)
print('ratio = ',gg3.varxip / var_xip)
print('varxim = ',gg3.varxim)
print('ratio = ',gg3.varxim / var_xim)
np.testing.assert_allclose(gg3.weight, gg2.weight)
np.testing.assert_allclose(gg3.xip, gg2.xip)
np.testing.assert_allclose(gg3.xim, gg2.xim)
# Not perfect, but within about 30%.
np.testing.assert_allclose(gg3.varxip, var_xip, rtol=0.3*tol_factor)
np.testing.assert_allclose(gg3.varxim, var_xim, rtol=0.3*tol_factor)
# Can get the covariance matrix using estimate_cov, which is also stored as cov attribute
t0 = time.time()
np.testing.assert_allclose(gg3.estimate_cov('jackknife'), gg3.cov)
t1 = time.time()
print('Time to calculate jackknife covariance = ',t1-t0)
# Can also get the shot covariance matrix using estimate_cov
np.testing.assert_allclose(gg3.estimate_cov('shot'),
np.diag(np.concatenate([gg2.varxip, gg2.varxim])))
# And can even get the jackknife covariance from a run that used var_method='shot'
np.testing.assert_allclose(gg2.estimate_cov('jackknife'), gg3.cov)
# Check that cross-covariance between xip and xim is significant.
n = gg3.nbins
print('cross covariance = ',gg3.cov[:n,n:],np.sum(gg3.cov[n:,n:]**2))
# Make cross correlation matrix
c = gg3.cov[:n,n:] / (np.sqrt(gg3.varxip)[:,np.newaxis] * np.sqrt(gg3.varxim)[np.newaxis,:])
print('cross correlation = ',c)
assert np.sum(c**2) > 1.e-2 # Should be significantly non-zero
assert np.all(
|
np.abs(c)
|
numpy.abs
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
import ctypes
import numpy
import scipy.linalg
import pyscf.lib
from pyscf.lib import logger
try:
from pyscf.dft import libxc
except (ImportError, OSError):
from pyscf.dft import xcfun as libxc
libdft = pyscf.lib.load_library('libdft')
OCCDROP = 1e-12
BLKSIZE = 96
def eval_ao(mol, coords, deriv=0, relativity=0, shls_slice=None,
non0tab=None, out=None, verbose=None):
'''Evaluate AO function value on the given grids.
Args:
mol : an instance of :class:`Mole`
coords : 2D array, shape (N,3)
The coordinates of the grids.
Kwargs:
deriv : int
AO derivative order. It affects the shape of the return array.
If deriv=0, the returned AO values are stored in a (N,nao) array.
Otherwise the AO values are stored in an array of shape (M,N,nao).
Here N is the number of grids, nao is the number of AO functions,
M is the size associated to the derivative deriv.
relativity : bool
No effects.
shls_slice : 2-element list
(shl_start, shl_end).
If given, only part of AOs (shl_start <= shell_id < shl_end) are
evaluated. By default, all shells defined in mol will be evaluated.
non0tab : 2D bool array
mask array to indicate whether the AO values are zero. The mask
array can be obtained by calling :func:`make_mask`
out : ndarray
If provided, results are written into this array.
verbose : int or object of :class:`Logger`
No effects.
Returns:
2D array of shape (N,nao) for AO values if deriv = 0.
Or 3D array of shape (:,N,nao) for AO values and AO derivatives if deriv > 0.
In the 3D array, the first (N,nao) elements are the AO values,
followed by (3,N,nao) for x,y,z compoents;
Then 2nd derivatives (6,N,nao) for xx, xy, xz, yy, yz, zz;
Then 3rd derivatives (10,N,nao) for xxx, xxy, xxz, xyy, xyz, xzz, yyy, yyz, yzz, zzz;
...
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> coords = numpy.random.random((100,3)) # 100 random points
>>> ao_value = eval_ao(mol, coords)
>>> print(ao_value.shape)
(100, 24)
>>> ao_value = eval_ao(mol, coords, deriv=1, shls_slice=(1,4))
>>> print(ao_value.shape)
(4, 100, 7)
>>> ao_value = eval_ao(mol, coords, deriv=2, shls_slice=(1,4))
>>> print(ao_value.shape)
(10, 100, 7)
'''
if isinstance(deriv, bool):
logger.warn(mol, '''
You see this error message because of the API updates in pyscf v1.1.
Argument "isgga" is replaced by argument "deriv", to support high order AO derivatives''')
comp = (deriv+1)*(deriv+2)*(deriv+3)//6
feval = 'GTOval_sph_deriv%d' % deriv
return mol.eval_gto(feval, coords, comp, shls_slice, non0tab, out)
def make_mask(mol, coords, relativity=0, shls_slice=None, verbose=None):
'''Mask to indicate whether a shell is zero on particular grid
Args:
mol : an instance of :class:`Mole`
coords : 2D array, shape (N,3)
The coordinates of the grids.
Kwargs:
relativity : bool
No effects.
shls_slice : 2-element list
(shl_start, shl_end).
If given, only part of AOs (shl_start <= shell_id < shl_end) are
evaluated. By default, all shells defined in mol will be evaluated.
non0tab : 2D bool array
mask array to indicate whether the AO values are zero. The mask
array can be obtained by calling :func:`make_mask`
verbose : int or object of :class:`Logger`
No effects.
Returns:
2D bool array of shape (N,nbas), where N is the number of grids, nbas
is the number of shells
'''
coords = numpy.asarray(coords, order='C')
natm = ctypes.c_int(mol._atm.shape[0])
nbas = ctypes.c_int(mol.nbas)
ngrids = len(coords)
if shls_slice is None:
shls_slice = (0, mol.nbas)
assert(shls_slice == (0, mol.nbas))
non0tab = numpy.empty(((ngrids+BLKSIZE-1)//BLKSIZE, mol.nbas),
dtype=numpy.int8)
libdft.VXCnr_ao_screen(non0tab.ctypes.data_as(ctypes.c_void_p),
coords.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(ngrids), ctypes.c_int(BLKSIZE),
mol._atm.ctypes.data_as(ctypes.c_void_p), natm,
mol._bas.ctypes.data_as(ctypes.c_void_p), nbas,
mol._env.ctypes.data_as(ctypes.c_void_p))
return non0tab
#TODO: \nabla^2 rho and tau = 1/2 (\nabla f)^2
def eval_rho(mol, ao, dm, non0tab=None, xctype='LDA', verbose=None):
r'''Calculate the electron density for LDA functional, and the density
derivatives for GGA functional.
Args:
mol : an instance of :class:`Mole`
ao : 2D array of shape (N,nao) for LDA, 3D array of shape (4,N,nao) for GGA
or (5,N,nao) for meta-GGA. N is the number of grids, nao is the
number of AO functions. If xctype is GGA, ao[0] is AO value
and ao[1:3] are the AO gradients. If xctype is meta-GGA, ao[4:10]
are second derivatives of ao values.
dm : 2D array
Density matrix
Kwargs:
non0tab : 2D bool array
mask array to indicate whether the AO values are zero. The mask
array can be obtained by calling :func:`make_mask`
xctype : str
LDA/GGA/mGGA. It affects the shape of the return density.
verbose : int or object of :class:`Logger`
No effects.
Returns:
1D array of size N to store electron density if xctype = LDA; 2D array
of (4,N) to store density and "density derivatives" for x,y,z components
if xctype = GGA; (6,N) array for meta-GGA, where last two rows are
\nabla^2 rho and tau = 1/2(\nabla f)^2
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> coords = numpy.random.random((100,3)) # 100 random points
>>> ao_value = eval_ao(mol, coords, deriv=0)
>>> dm = numpy.random.random((mol.nao_nr(),mol.nao_nr()))
>>> dm = dm + dm.T
>>> rho, dx_rho, dy_rho, dz_rho = eval_rho(mol, ao, dm, xctype='LDA')
'''
assert(ao.flags.c_contiguous)
xctype = xctype.upper()
if xctype == 'LDA':
ngrids, nao = ao.shape
else:
ngrids, nao = ao[0].shape
if non0tab is None:
non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),
dtype=numpy.int8)
if xctype == 'LDA':
c0 = _dot_ao_dm(mol, ao, dm, nao, ngrids, non0tab)
rho = numpy.einsum('pi,pi->p', ao, c0)
elif xctype == 'GGA':
rho = numpy.empty((4,ngrids))
c0 = _dot_ao_dm(mol, ao[0], dm, nao, ngrids, non0tab)
rho[0] = numpy.einsum('pi,pi->p', c0, ao[0])
for i in range(1, 4):
rho[i] = numpy.einsum('pi,pi->p', c0, ao[i])
rho[i] *= 2 # *2 for +c.c. in the next two lines
#c1 = _dot_ao_dm(mol, ao[i], dm.T, nao, ngrids, non0tab)
#rho[i] += numpy.einsum('pi,pi->p', c1, ao[0])
else: # meta-GGA
# rho[4] = \nabla^2 rho, rho[5] = 1/2 |nabla f|^2
rho = numpy.empty((6,ngrids))
c0 = _dot_ao_dm(mol, ao[0], dm, nao, ngrids, non0tab)
rho[0] = numpy.einsum('pi,pi->p', ao[0], c0)
rho[5] = 0
for i in range(1, 4):
rho[i] = numpy.einsum('pi,pi->p', c0, ao[i]) * 2 # *2 for +c.c.
c1 = _dot_ao_dm(mol, ao[i], dm.T, nao, ngrids, non0tab)
rho[5] += numpy.einsum('pi,pi->p', c1, ao[i])
XX, YY, ZZ = 4, 7, 9
ao2 = ao[XX] + ao[YY] + ao[ZZ]
rho[4] = numpy.einsum('pi,pi->p', c0, ao2)
rho[4] += rho[5]
rho[4] *= 2
rho[5] *= .5
return rho
def eval_rho2(mol, ao, mo_coeff, mo_occ, non0tab=None, xctype='LDA',
verbose=None):
r'''Calculate the electron density for LDA functional, and the density
derivatives for GGA functional. This function has the same functionality
as :func:`eval_rho` except that the density are evaluated based on orbital
coefficients and orbital occupancy. It is more efficient than
:func:`eval_rho` in most scenario.
Args:
mol : an instance of :class:`Mole`
ao : 2D array of shape (N,nao) for LDA, 3D array of shape (4,N,nao) for GGA
or (5,N,nao) for meta-GGA. N is the number of grids, nao is the
number of AO functions. If xctype is GGA, ao[0] is AO value
and ao[1:3] are the AO gradients. If xctype is meta-GGA, ao[4:10]
are second derivatives of ao values.
dm : 2D array
Density matrix
Kwargs:
non0tab : 2D bool array
mask array to indicate whether the AO values are zero. The mask
array can be obtained by calling :func:`make_mask`
xctype : str
LDA/GGA/mGGA. It affects the shape of the return density.
verbose : int or object of :class:`Logger`
No effects.
Returns:
1D array of size N to store electron density if xctype = LDA; 2D array
of (4,N) to store density and "density derivatives" for x,y,z components
if xctype = GGA; (6,N) array for meta-GGA, where last two rows are
\nabla^2 rho and tau = 1/2(\nabla f)^2
'''
assert(ao.flags.c_contiguous)
xctype = xctype.upper()
if xctype == 'LDA':
ngrids, nao = ao.shape
else:
ngrids, nao = ao[0].shape
if non0tab is None:
non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),
dtype=numpy.int8)
pos = mo_occ > OCCDROP
cpos = numpy.einsum('ij,j->ij', mo_coeff[:,pos], numpy.sqrt(mo_occ[pos]))
if pos.sum() > 0:
if xctype == 'LDA':
c0 = _dot_ao_dm(mol, ao, cpos, nao, ngrids, non0tab)
rho = numpy.einsum('pi,pi->p', c0, c0)
elif xctype == 'GGA':
rho = numpy.empty((4,ngrids))
c0 = _dot_ao_dm(mol, ao[0], cpos, nao, ngrids, non0tab)
rho[0] = numpy.einsum('pi,pi->p', c0, c0)
for i in range(1, 4):
c1 = _dot_ao_dm(mol, ao[i], cpos, nao, ngrids, non0tab)
rho[i] = numpy.einsum('pi,pi->p', c0, c1) * 2 # *2 for +c.c.
else: # meta-GGA
# rho[4] = \nabla^2 rho, rho[5] = 1/2 |nabla f|^2
rho = numpy.empty((6,ngrids))
c0 = _dot_ao_dm(mol, ao[0], cpos, nao, ngrids, non0tab)
rho[0] = numpy.einsum('pi,pi->p', c0, c0)
rho[5] = 0
for i in range(1, 4):
c1 = _dot_ao_dm(mol, ao[i], cpos, nao, ngrids, non0tab)
rho[i] = numpy.einsum('pi,pi->p', c0, c1) * 2 # *2 for +c.c.
rho[5] += numpy.einsum('pi,pi->p', c1, c1)
XX, YY, ZZ = 4, 7, 9
ao2 = ao[XX] + ao[YY] + ao[ZZ]
c1 = _dot_ao_dm(mol, ao2, cpos, nao, ngrids, non0tab)
rho[4] = numpy.einsum('pi,pi->p', c0, c1)
rho[4] += rho[5]
rho[4] *= 2
rho[5] *= .5
else:
if xctype == 'LDA':
rho = numpy.zeros(ngrids)
elif xctype == 'GGA':
rho = numpy.zeros((4,ngrids))
else:
rho = numpy.zeros((6,ngrids))
neg = mo_occ < -OCCDROP
if neg.sum() > 0:
cneg = numpy.einsum('ij,j->ij', mo_coeff[:,neg], numpy.sqrt(-mo_occ[neg]))
if xctype == 'LDA':
c0 = _dot_ao_dm(mol, ao, cneg, nao, ngrids, non0tab)
rho -= numpy.einsum('pi,pi->p', c0, c0)
elif xctype == 'GGA':
c0 = _dot_ao_dm(mol, ao[0], cneg, nao, ngrids, non0tab)
rho[0] -= numpy.einsum('pi,pi->p', c0, c0)
for i in range(1, 4):
c1 = _dot_ao_dm(mol, ao[i], cneg, nao, ngrids, non0tab)
rho[i] -= numpy.einsum('pi,pi->p', c0, c1) * 2 # *2 for +c.c.
else:
c0 = _dot_ao_dm(mol, ao[0], cneg, nao, ngrids, non0tab)
rho[0] -= numpy.einsum('pi,pi->p', c0, c0)
rho5 = 0
for i in range(1, 4):
c1 = _dot_ao_dm(mol, ao[i], cneg, nao, ngrids, non0tab)
rho[i] -= numpy.einsum('pi,pi->p', c0, c1) * 2 # *2 for +c.c.
rho5 -= numpy.einsum('pi,pi->p', c1, c1)
XX, YY, ZZ = 4, 7, 9
ao2 = ao[XX] + ao[YY] + ao[ZZ]
c1 = _dot_ao_dm(mol, ao2, cneg, nao, ngrids, non0tab)
rho[4] -= numpy.einsum('pi,pi->p', c0, c1) * 2
rho[4] -= rho5 * 2
rho[5] -= rho5 * .5
return rho
def eval_mat(mol, ao, weight, rho, vxc,
non0tab=None, xctype='LDA', spin=0, verbose=None):
r'''Calculate XC potential matrix.
Args:
mol : an instance of :class:`Mole`
ao : ([4/10,] ngrids, nao) ndarray
2D array of shape (N,nao) for LDA,
3D array of shape (4,N,nao) for GGA
or (10,N,nao) for meta-GGA.
N is the number of grids, nao is the number of AO functions.
If xctype is GGA, ao[0] is AO value and ao[1:3] are the real space
gradients. If xctype is meta-GGA, ao[4:10] are second derivatives
of ao values.
weight : 1D array
Integral weights on grids.
rho : ([4/6,] ngrids) ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
vxc : ([4,] ngrids) ndarray
XC potential value on each grid = (vrho, vsigma, vlapl, vtau)
vsigma is GGA potential value on each grid.
If the kwarg spin is not 0, a list [vsigma_uu,vsigma_ud] is required.
Kwargs:
xctype : str
LDA/GGA/mGGA. It affects the shape of `ao` and `rho`
non0tab : 2D bool array
mask array to indicate whether the AO values are zero. The mask
array can be obtained by calling :func:`make_mask`
spin : int
If not 0, the matrix is contracted with the spin non-degenerated
UKS formula
Returns:
XC potential matrix in 2D array of shape (nao,nao) where nao is the
number of AO functions.
'''
assert(ao.flags.c_contiguous)
xctype = xctype.upper()
if xctype == 'LDA':
ngrids, nao = ao.shape
else:
ngrids, nao = ao[0].shape
if non0tab is None:
non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),
dtype=numpy.int8)
if xctype == 'LDA':
if not isinstance(vxc, numpy.ndarray) or vxc.ndim == 2:
vrho = vxc[0]
else:
vrho = vxc
# *.5 because return mat + mat.T
#:aow = numpy.einsum('pi,p->pi', ao, .5*weight*vrho)
aow = ao * (.5*weight*vrho).reshape(-1,1)
#mat = pyscf.lib.dot(ao.T, aow)
mat = _dot_ao_ao(mol, ao, aow, nao, ngrids, non0tab)
else:
#wv = weight * vsigma * 2
#aow = numpy.einsum('pi,p->pi', ao[1], rho[1]*wv)
#aow += numpy.einsum('pi,p->pi', ao[2], rho[2]*wv)
#aow += numpy.einsum('pi,p->pi', ao[3], rho[3]*wv)
#aow += numpy.einsum('pi,p->pi', ao[0], .5*weight*vrho)
vrho, vsigma = vxc[:2]
wv = numpy.empty((4,ngrids))
if spin == 0:
assert(vsigma is not None and rho.ndim==2)
wv[0] = weight * vrho * .5
wv[1:4] = rho[1:4] * (weight * vsigma * 2)
else:
rho_a, rho_b = rho
wv[0] = weight * vrho * .5
wv[1:4] = rho_a[1:4] * (weight * vsigma[0] * 2) # sigma_uu
wv[1:4]+= rho_b[1:4] * (weight * vsigma[1]) # sigma_ud
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
#mat = pyscf.lib.dot(ao[0].T, aow)
mat = _dot_ao_ao(mol, ao[0], aow, nao, ngrids, non0tab)
# JCP, 138, 244108
# JCP, 112, 7002
if xctype == 'MGGA':
vlapl, vtau = vxc[2:]
if vlapl is None:
vlpal = 0
aow = numpy.einsum('npi,p->npi', ao[1:4], weight * (.25*vtau+vlapl))
mat += _dot_ao_ao(mol, ao[1], aow[0], nao, ngrids, non0tab)
mat += _dot_ao_ao(mol, ao[2], aow[1], nao, ngrids, non0tab)
mat += _dot_ao_ao(mol, ao[3], aow[2], nao, ngrids, non0tab)
XX, YY, ZZ = 4, 7, 9
ao2 = ao[XX] + ao[YY] + ao[ZZ]
aow = numpy.einsum('pi,p->pi', ao2, .5 * weight * vlapl)
mat += _dot_ao_ao(mol, ao[0], aow, nao, ngrids, non0tab)
return mat + mat.T
def _dot_ao_ao(mol, ao1, ao2, nao, ngrids, non0tab):
'''return numpy.dot(ao1.T, ao2)'''
natm = ctypes.c_int(mol._atm.shape[0])
nbas = ctypes.c_int(mol.nbas)
ao1 = numpy.asarray(ao1, order='C')
ao2 = numpy.asarray(ao2, order='C')
vv = numpy.empty((nao,nao))
libdft.VXCdot_ao_ao(vv.ctypes.data_as(ctypes.c_void_p),
ao1.ctypes.data_as(ctypes.c_void_p),
ao2.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nao), ctypes.c_int(ngrids),
ctypes.c_int(BLKSIZE),
non0tab.ctypes.data_as(ctypes.c_void_p),
mol._atm.ctypes.data_as(ctypes.c_void_p), natm,
mol._bas.ctypes.data_as(ctypes.c_void_p), nbas,
mol._env.ctypes.data_as(ctypes.c_void_p))
return vv
def _dot_ao_dm(mol, ao, dm, nao, ngrids, non0tab):
'''return numpy.dot(ao, dm)'''
natm = ctypes.c_int(mol._atm.shape[0])
nbas = ctypes.c_int(mol.nbas)
vm = numpy.empty((ngrids,dm.shape[1]))
ao = numpy.asarray(ao, order='C')
dm = numpy.asarray(dm, order='C')
libdft.VXCdot_ao_dm(vm.ctypes.data_as(ctypes.c_void_p),
ao.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nao), ctypes.c_int(dm.shape[1]),
ctypes.c_int(ngrids), ctypes.c_int(BLKSIZE),
non0tab.ctypes.data_as(ctypes.c_void_p),
mol._atm.ctypes.data_as(ctypes.c_void_p), natm,
mol._bas.ctypes.data_as(ctypes.c_void_p), nbas,
mol._env.ctypes.data_as(ctypes.c_void_p))
return vm
def nr_vxc(mol, grids, xc_code, dm, spin=0, relativity=0, hermi=1,
max_memory=2000, verbose=None):
if isinstance(spin, (list, tuple, numpy.ndarray)):
# shift the old args (..., x_id, c_id, dm, spin, ..)
import warnings
xc_code = '%s, %s' % (xc_code, dm)
dm, spin = spin, relativity
with warnings.catch_warnings():
warnings.simplefilter("once")
warnings.warn('API updates: the 4th argument c_id is depercated '
'and will be removed in future release.\n')
ni = _NumInt()
ni.non0tab = ni.make_mask(mol, grids.coords)
if spin == 0:
return nr_rks(ni, mol, grids, xc_code, dm, relativity,
hermi, max_memory, verbose)
else:
return nr_uks(ni, mol, grids, xc_code, dm, relativity,
hermi, max_memory, verbose)
def nr_rks(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Calculate RKS XC functional and potential matrix on given meshgrids
for a set of density matrices
Args:
ni : an instance of :class:`_NumInt`
mol : an instance of :class:`Mole`
grids : an instance of :class:`Grids`
grids.coords and grids.weights are needed for coordinates and weights of meshgrids.
xc_code : str
XC functional description.
See :func:`parse_xc` of pyscf/dft/libxc.py for more details.
dms : 2D array a list of 2D arrays
Density matrix or multiple density matrices
Kwargs:
hermi : int
Input density matrices symmetric or not
max_memory : int or float
The maximum size of cache to use (in MB).
Returns:
nelec, excsum, vmat.
nelec is the number of electrons generated by numerical integration.
excsum is the XC functional value. vmat is the XC potential matrix in
2D array of shape (nao,nao) where nao is the number of AO functions.
Examples:
>>> from pyscf import gto, dft
>>> mol = gto.M(atom='H 0 0 0; H 0 0 1.1')
>>> grids = dft.gen_grid.Grids(mol)
>>> grids.coords = numpy.random.random((100,3)) # 100 random points
>>> grids.weights = numpy.random.random(100)
>>> dm = numpy.random.random((mol.nao_nr(),mol.nao_nr()))
>>> nelec, exc, vxc = dft.numint.nr_vxc(mol, grids, 'lda,vwn', dm)
'''
if isinstance(relativity, (list, tuple, numpy.ndarray)):
import warnings
xc_code = '%s, %s' % (xc_code, dms)
dms = relativity
with warnings.catch_warnings():
warnings.simplefilter("once")
warnings.warn('API updates: the 5th argument c_id is depercated '
'and will be removed in future release.\n')
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi)
ngrids = len(grids.weights)
if ni.non0tab is None:
non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),
dtype=numpy.int8)
else:
non0tab = ni.non0tab
nelec = numpy.zeros(nset)
excsum = numpy.zeros(nset)
vmat = numpy.zeros((nset,nao,nao))
if xctype == 'LDA':
ao_deriv = 0
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory, non0tab):
for idm in range(nset):
rho = make_rho(idm, ao, mask, 'LDA')
exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1, verbose)[:2]
vrho = vxc[0]
den = rho * weight
nelec[idm] += den.sum()
excsum[idm] += (den * exc).sum()
# *.5 because vmat + vmat.T
aow = numpy.einsum('pi,p->pi', ao, .5*weight*vrho)
vmat[idm] += _dot_ao_ao(mol, ao, aow, nao, weight.size, mask)
rho = exc = vxc = vrho = aow = None
elif xctype == 'GGA':
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory, non0tab):
ngrid = weight.size
for idm in range(nset):
rho = make_rho(idm, ao, mask, 'GGA')
exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1, verbose)[:2]
vrho, vsigma = vxc[:2]
den = rho[0] * weight
nelec[idm] += den.sum()
excsum[idm] += (den * exc).sum()
# ref eval_mat function
wv = numpy.empty((4,ngrid))
wv[0] = weight * vrho * .5
wv[1:] = rho[1:] * (weight * vsigma * 2)
aow = numpy.einsum('npi,np->pi', ao, wv)
vmat[idm] += _dot_ao_ao(mol, ao[0], aow, nao, ngrid, mask)
rho = exc = vxc = vrho = vsigma = wv = aow = None
else:
assert(all(x not in xc_code.upper() for x in ('CC06', 'CS', 'BR89', 'MK00')))
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory, non0tab):
ngrid = weight.size
for idm in range(nset):
rho = make_rho(idm, ao, mask, 'MGGA')
exc, vxc = ni.eval_xc(xc_code, rho, 0, relativity, 1, verbose)[:2]
vrho, vsigma, vlapl, vtau = vxc[:4]
den = rho[0] * weight
nelec[idm] += den.sum()
excsum[idm] += (den * exc).sum()
wv = numpy.empty((4,ngrid))
wv[0] = weight * vrho * .5
wv[1:] = rho[1:4] * (weight * vsigma * 2)
aow = numpy.einsum('npi,np->pi', ao[:4], wv)
vmat[idm] += _dot_ao_ao(mol, ao[0], aow, nao, ngrid, mask)
# FIXME: .5 * .5 First 0.5 for v+v.T symmetrization.
# Second 0.5 is due to the Libxc convention tau = 1/2 \nabla\phi\dot\nabla\phi
wv = (.5 * .5 * weight * vtau).reshape(-1,1)
vmat[idm] += _dot_ao_ao(mol, ao[1], wv*ao[1], nao, ngrid, mask)
vmat[idm] += _dot_ao_ao(mol, ao[2], wv*ao[2], nao, ngrid, mask)
vmat[idm] += _dot_ao_ao(mol, ao[3], wv*ao[3], nao, ngrid, mask)
rho = exc = vxc = vrho = vsigma = wv = aow = None
for i in range(nset):
vmat[i] = vmat[i] + vmat[i].T
if nset == 1:
nelec = nelec[0]
excsum = excsum[0]
vmat = vmat.reshape(nao,nao)
return nelec, excsum, vmat
def nr_uks(ni, mol, grids, xc_code, dms, relativity=0, hermi=1,
max_memory=2000, verbose=None):
'''Calculate UKS XC functional and potential matrix on given meshgrids
for a set of density matrices
Args:
mol : an instance of :class:`Mole`
grids : an instance of :class:`Grids`
grids.coords and grids.weights are needed for coordinates and weights of meshgrids.
xc_code : str
XC functional description.
See :func:`parse_xc` of pyscf/dft/libxc.py for more details.
dms : a list of 2D arrays
A list of density matrices, stored as (alpha,alpha,...,beta,beta,...)
Kwargs:
hermi : int
Input density matrices symmetric or not
max_memory : int or float
The maximum size of cache to use (in MB).
Returns:
nelec, excsum, vmat.
nelec is the number of (alpha,beta) electrons generated by numerical integration.
excsum is the XC functional value.
vmat is the XC potential matrix for (alpha,beta) spin.
'''
if isinstance(relativity, (list, tuple, numpy.ndarray)):
import warnings
xc_code = '%s, %s' % (xc_code, dms)
dms = relativity
with warnings.catch_warnings():
warnings.simplefilter("once")
warnings.warn('API updates: the 5th argument c_id is depercated '
'and will be removed in future release.\n')
xctype = ni._xc_type(xc_code)
ngrids = len(grids.weights)
if ni.non0tab is None:
non0tab = numpy.ones(((ngrids+BLKSIZE-1)//BLKSIZE,mol.nbas),
dtype=numpy.int8)
else:
non0tab = ni.non0tab
dms =
|
numpy.asarray(dms)
|
numpy.asarray
|
import numpy as np
# 结构数组
persontype = np.dtype({
'names': ['name', 'age', 'chinese', 'math', 'english'],
'formats': ['S1', 'i', 'i', 'i', 'f']
})
peoples = np.array([
("ZhangFei", 32, 75, 100, 90),
("GuanYu", 24, 85, 96, 88.5),
("ZhaoYun", 28, 85, 92, 96.5),
("HuangZhong", 29, 65, 85, 100)
], dtype=persontype)
ages = peoples[:]['age']
chinese = peoples[:]['chinese']
math = peoples[:]['math']
english = peoples[:]['english']
print(np.mean(ages))
print(np.mean(chinese))
print(np.mean(math))
print(np.mean(english))
print(np.max(ages))
print(math)
# ufunc运算
# 算数运算
x1 = np.arange(1, 11, 2)
x2 = np.linspace(1, 9, 5)
print(np.add(x1, x2))
print(np.subtract(x1, x2))
print(np.multiply(x1, x2))
print(np.divide(x1, x2))
print(np.power(x1, x2))
print(np.remainder(x1, x2))
# 统计函数
# 计数组 / 矩阵中的最大值函数 amax(),最小值函数 amin()
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(np.amin(a))
print(np.amin(a, 0))
print(np.amin(a, 1))
print(np.amax(a))
print(np.amax(a, 0))
print(np.amax(a, 1))
# 统计最大值与最小值之差
print(np.ptp(a))
print(np.ptp(a, 0))
print(np.ptp(a, 1))
# 百分位数
print(
|
np.percentile(a, 50)
|
numpy.percentile
|
"""
Copyright 2018 Defense Innovation Unit Experimental
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from PIL import Image
import tensorflow as tf
from PIL import Image, ImageDraw
import skimage.filters as filters
"""
Image augmentation utilities to be used for processing the dataset. Importantly, these utilities modify
the images as well as their respective bboxes (for example, in rotation). Includes:
rotation, shifting, salt-and-pepper, gaussian blurring. Also includes a 'draw_bboxes' function
for visualizing augmented images and bboxes
"""
def rotate_image_and_boxes(img, deg, pivot, boxes):
"""
Rotates an image and corresponding bounding boxes. Bounding box rotations are kept axis-aligned,
so multiples of non 90-degrees changes the area of the bounding box.
Args:
img: the image to be rotated in array format
deg: an integer representing degree of rotation
pivot: the axis of rotation. By default should be the center of an image, but this can be changed.
boxes: an (N,4) array of boxes for the image
Output:
Returns the rotated image array along with correspondingly rotated bounding boxes
"""
if deg < 0:
deg = 360-deg
deg = int(deg)
angle = 360-deg
padX = [img.shape[0] - pivot[0], pivot[0]]
padY = [img.shape[1] - pivot[1], pivot[1]]
imgP = np.pad(img, [padY, padX, [0,0]], 'constant').astype(np.uint8)
#scipy ndimage rotate takes ~.7 seconds
#imgR = ndimage.rotate(imgP, angle, reshape=False)
#PIL rotate uses ~.01 seconds
imgR = Image.fromarray(imgP).rotate(angle)
imgR =
|
np.array(imgR)
|
numpy.array
|
import sys
import numpy as np
import tables as tb
from tables.tests import common
class C:
c = (3, 4.5)
class BasicTestCase(common.TempFileMixin, common.PyTablesTestCase):
compress = 0
complib = "zlib"
shuffle = 0
bitshuffle = 0
fletcher32 = 0
flavor = "numpy"
def setUp(self):
super().setUp()
# Create an instance of an HDF5 Table
self.rootgroup = self.h5file.root
self.populateFile()
self.h5file.close()
def populateFile(self):
group = self.rootgroup
filters = tb.Filters(complevel=self.compress,
complib=self.complib,
shuffle=self.shuffle,
bitshuffle=self.bitshuffle,
fletcher32=self.fletcher32)
vlarray = self.h5file.create_vlarray(group, 'vlarray1',
atom=tb.Int32Atom(),
title="ragged array if ints",
filters=filters,
expectedrows=1000)
vlarray.flavor = self.flavor
# Fill it with 5 rows
vlarray.append([1, 2])
if self.flavor == "numpy":
vlarray.append(np.array([3, 4, 5], dtype='int32'))
vlarray.append(np.array([], dtype='int32')) # Empty entry
elif self.flavor == "python":
vlarray.append((3, 4, 5))
vlarray.append(()) # Empty entry
vlarray.append([6, 7, 8, 9])
vlarray.append([10, 11, 12, 13, 14])
def test00_attributes(self):
self.h5file = tb.open_file(self.h5fname, "r")
obj = self.h5file.get_node("/vlarray1")
self.assertEqual(obj.flavor, self.flavor)
self.assertEqual(obj.shape, (5,))
self.assertEqual(obj.ndim, 1)
self.assertEqual(obj.nrows, 5)
self.assertEqual(obj.atom.type, 'int32')
def test01_read(self):
"""Checking vlarray read."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_read..." % self.__class__.__name__)
# Create an instance of an HDF5 Table
self.h5file = tb.open_file(self.h5fname, "r")
vlarray = self.h5file.get_node("/vlarray1")
# Choose a small value for buffer size
vlarray.nrowsinbuf = 3
# Read some rows
row = vlarray.read(0)[0]
row2 = vlarray.read(2)[0]
if common.verbose:
print("Flavor:", vlarray.flavor)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row)
nrows = 5
self.assertEqual(nrows, vlarray.nrows)
if self.flavor == "numpy":
self.assertEqual(type(row), np.ndarray)
self.assertTrue(common.allequal(
row, np.array([1, 2], dtype='int32'), self.flavor))
self.assertTrue(common.allequal(
row2, np.array([], dtype='int32'), self.flavor))
elif self.flavor == "python":
self.assertEqual(row, [1, 2])
self.assertEqual(row2, [])
self.assertEqual(len(row), 2)
# Check filters:
if self.compress != vlarray.filters.complevel and common.verbose:
print("Error in compress. Class:", self.__class__.__name__)
print("self, vlarray:", self.compress, vlarray.filters.complevel)
self.assertEqual(vlarray.filters.complevel, self.compress)
if self.compress > 0 and tb.which_lib_version(self.complib):
self.assertEqual(vlarray.filters.complib, self.complib)
if self.shuffle != vlarray.filters.shuffle and common.verbose:
print("Error in shuffle. Class:", self.__class__.__name__)
print("self, vlarray:", self.shuffle, vlarray.filters.shuffle)
self.assertEqual(self.shuffle, vlarray.filters.shuffle)
if self.bitshuffle != vlarray.filters.bitshuffle and common.verbose:
print("Error in shuffle. Class:", self.__class__.__name__)
print("self, vlarray:", self.bitshuffle,
vlarray.filters.bitshuffle)
self.assertEqual(self.shuffle, vlarray.filters.shuffle)
if self.fletcher32 != vlarray.filters.fletcher32 and common.verbose:
print("Error in fletcher32. Class:", self.__class__.__name__)
print("self, vlarray:", self.fletcher32,
vlarray.filters.fletcher32)
self.assertEqual(self.fletcher32, vlarray.filters.fletcher32)
def test02a_getitem(self):
"""Checking vlarray __getitem__ (slices)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02a_getitem..." % self.__class__.__name__)
# Create an instance of an HDF5 Table
self.h5file = tb.open_file(self.h5fname, "r")
vlarray = self.h5file.get_node("/vlarray1")
rows = [[1, 2], [3, 4, 5], [], [6, 7, 8, 9], [10, 11, 12, 13, 14]]
slices = [
slice(None, None, None), slice(1, 1, 1), slice(30, None, None),
slice(0, None, None), slice(3, None, 1), slice(3, None, 2),
slice(None, 1, None), slice(None, 2, 1), slice(None, 30, 2),
slice(None, None, 1), slice(None, None, 2), slice(None, None, 3),
]
for slc in slices:
# Read the rows in slc
rows2 = vlarray[slc]
rows1 = rows[slc]
rows1f = []
if common.verbose:
print("Flavor:", vlarray.flavor)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("Original rows ==>", rows1)
print("Rows read in vlarray ==>", rows2)
if self.flavor == "numpy":
for val in rows1:
rows1f.append(np.array(val, dtype='int32'))
for i in range(len(rows1f)):
self.assertTrue(common.allequal(
rows2[i], rows1f[i], self.flavor))
elif self.flavor == "python":
self.assertEqual(rows2, rows1)
def test02b_getitem(self):
"""Checking vlarray __getitem__ (scalars)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02b_getitem..." % self.__class__.__name__)
if self.flavor != "numpy":
# This test is only valid for NumPy
return
# Create an instance of an HDF5 Table
self.h5file = tb.open_file(self.h5fname, "r")
vlarray = self.h5file.get_node("/vlarray1")
# Get a numpy array of objects
rows = np.array(vlarray[:], dtype=object)
for slc in [0, np.array(1), 2, np.array([3]), [4]]:
# Read the rows in slc
rows2 = vlarray[slc]
rows1 = rows[slc]
if common.verbose:
print("Flavor:", vlarray.flavor)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("Original rows ==>", rows1)
print("Rows read in vlarray ==>", rows2)
for i in range(len(rows1)):
self.assertTrue(common.allequal(
rows2[i], rows1[i], self.flavor))
def test03_append(self):
"""Checking vlarray append."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_append..." % self.__class__.__name__)
# Create an instance of an HDF5 Table
self.h5file = tb.open_file(self.h5fname, "a")
vlarray = self.h5file.get_node("/vlarray1")
# Append a new row
vlarray.append([7, 8, 9, 10])
# Choose a small value for buffer size
vlarray.nrowsinbuf = 3
# Read some rows:
row1 = vlarray[0]
row2 = vlarray[2]
row3 = vlarray[-1]
if common.verbose:
print("Flavor:", vlarray.flavor)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row1)
nrows = 6
self.assertEqual(nrows, vlarray.nrows)
if self.flavor == "numpy":
self.assertEqual(type(row1), type(np.array([1, 2])))
self.assertTrue(common.allequal(
row1, np.array([1, 2], dtype='int32'), self.flavor))
self.assertTrue(common.allequal(
row2, np.array([], dtype='int32'), self.flavor))
self.assertTrue(common.allequal(
row3, np.array([7, 8, 9, 10], dtype='int32'), self.flavor))
elif self.flavor == "python":
self.assertEqual(row1, [1, 2])
self.assertEqual(row2, [])
self.assertEqual(row3, [7, 8, 9, 10])
self.assertEqual(len(row3), 4)
def test04_get_row_size(self):
"""Checking get_row_size method."""
self.h5file = tb.open_file(self.h5fname, "a")
vlarray = self.h5file.get_node("/vlarray1")
self.assertEqual(vlarray.get_row_size(0), 2 * vlarray.atom.size)
self.assertEqual(vlarray.get_row_size(1), 3 * vlarray.atom.size)
self.assertEqual(vlarray.get_row_size(2), 0 * vlarray.atom.size)
self.assertEqual(vlarray.get_row_size(3), 4 * vlarray.atom.size)
self.assertEqual(vlarray.get_row_size(4), 5 * vlarray.atom.size)
class BasicNumPyTestCase(BasicTestCase):
flavor = "numpy"
class BasicPythonTestCase(BasicTestCase):
flavor = "python"
class ZlibComprTestCase(BasicTestCase):
compress = 1
complib = "zlib"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class BloscComprTestCase(BasicTestCase):
compress = 9
shuffle = 0
complib = "blosc"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class BloscShuffleComprTestCase(BasicTestCase):
compress = 6
shuffle = 1
complib = "blosc"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf(
common.blosc_version < common.min_blosc_bitshuffle_version,
f'BLOSC >= {common.min_blosc_bitshuffle_version} required')
class BloscBitShuffleComprTestCase(BasicTestCase):
compress = 9
bitshuffle = 1
complib = "blosc"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
class BloscBloscLZComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:blosclz"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf(
'lz4' not in tb.blosc_compressor_list(), 'lz4 required')
class BloscLZ4ComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:lz4"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf(
'lz4' not in tb.blosc_compressor_list(), 'lz4 required')
class BloscLZ4HCComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:lz4hc"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf('snappy' not in tb.blosc_compressor_list(),
'snappy required')
class BloscSnappyComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:snappy"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf(
'zlib' not in tb.blosc_compressor_list(), 'zlib required')
class BloscZlibComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:zlib"
@common.unittest.skipIf(not common.blosc_avail,
'BLOSC compression library not available')
@common.unittest.skipIf(
'zstd' not in tb.blosc_compressor_list(), 'zstd required')
class BloscZstdComprTestCase(BasicTestCase):
compress = 9
shuffle = 1
complib = "blosc:zstd"
@common.unittest.skipIf(
not common.lzo_avail, 'LZO compression library not available')
class LZOComprTestCase(BasicTestCase):
compress = 1
complib = "lzo"
@common.unittest.skipIf(not common.bzip2_avail,
'BZIP2 compression library not available')
class Bzip2ComprTestCase(BasicTestCase):
compress = 1
complib = "bzip2"
class ShuffleComprTestCase(BasicTestCase):
compress = 1
shuffle = 1
class Fletcher32TestCase(BasicTestCase):
fletcher32 = 1
class AllFiltersTestCase(BasicTestCase):
compress = 1
shuffle = 1
fletcher32 = 1
class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase):
open_mode = "w"
compress = 0
complib = "zlib" # Default compression library
def test01_StringAtom(self):
"""Checking vlarray with NumPy string atoms ('numpy' flavor)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01_StringAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "numpy"
vlarray.append(np.array(["1", "12", "123", "1234", "12345"]))
vlarray.append(np.array(["1", "12345"]))
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
np.testing.assert_array_equal(
row[0], np.array(["1", "12", "123", "123", "123"], 'S'))
np.testing.assert_array_equal(row[1], np.array(["1", "123"], 'S'))
self.assertEqual(len(row[0]), 5)
self.assertEqual(len(row[1]), 2)
def test01a_StringAtom(self):
"""Checking vlarray with NumPy string atoms ('numpy' flavor,
strided)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01a_StringAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "numpy"
vlarray.append(np.array(["1", "12", "123", "1234", "12345"][::2]))
vlarray.append(np.array(["1", "12345", "2", "321"])[::3])
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
np.testing.assert_array_equal(row[0],
np.array(["1", "123", "123"], 'S'))
np.testing.assert_array_equal(row[1], np.array(["1", "321"], 'S'))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test01a_2_StringAtom(self):
"""Checking vlarray with NumPy string atoms (NumPy flavor, no conv)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01a_2_StringAtom..." %
self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "numpy"
vlarray.append(np.array(["1", "12", "123", "123"]))
vlarray.append(np.array(["1", "2", "321"]))
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
np.testing.assert_array_equal(
row[0], np.array(["1", "12", "123", "123"], 'S'))
np.testing.assert_array_equal(row[1], np.array(["1", "2", "321"], 'S'))
self.assertEqual(len(row[0]), 4)
self.assertEqual(len(row[1]), 3)
def test01b_StringAtom(self):
"""Checking vlarray with NumPy string atoms (python flavor)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01b_StringAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom2',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "python"
vlarray.append(["1", "12", "123", "1234", "12345"])
vlarray.append(["1", "12345"])
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing String flavor")
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertEqual(row[0], [b"1", b"12", b"123", b"123", b"123"])
self.assertEqual(row[1], [b"1", b"123"])
self.assertEqual(len(row[0]), 5)
self.assertEqual(len(row[1]), 2)
def test01c_StringAtom(self):
"""Checking updating vlarray with NumPy string atoms
('numpy' flavor)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01c_StringAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "numpy"
vlarray.append(np.array(["1", "12", "123", "1234", "12345"]))
vlarray.append(np.array(["1", "12345"]))
# Modify the rows
vlarray[0] = np.array(["1", "123", "12", "", "12345"])
vlarray[1] = np.array(["44", "4"]) # This should work as well
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([b"1", b"123", b"12", b"", b"123"])))
self.assertTrue(common.allequal(
row[1], np.array(["44", "4"], dtype="S3")))
self.assertEqual(len(row[0]), 5)
self.assertEqual(len(row[1]), 2)
def test01d_StringAtom(self):
"""Checking updating vlarray with string atoms (String flavor)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test01d_StringAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'stringAtom2',
atom=tb.StringAtom(itemsize=3),
title="Ragged array of strings")
vlarray.flavor = "python"
vlarray.append(["1", "12", "123", "1234", "12345"])
vlarray.append(["1", "12345"])
# Modify the rows
vlarray[0] = ["1", "123", "12", "", "12345"]
vlarray[1] = ["44", "4"]
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing String flavor")
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertEqual(row[0], [b"1", b"123", b"12", b"", b"123"])
self.assertEqual(row[1], [b"44", b"4"])
self.assertEqual(len(row[0]), 5)
self.assertEqual(len(row[1]), 2)
def test02_BoolAtom(self):
"""Checking vlarray with boolean atoms."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02_BoolAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'BoolAtom',
atom=tb.BoolAtom(),
title="Ragged array of Booleans")
vlarray.append([1, 0, 3])
vlarray.append([-1, 0])
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([1, 0, 1], dtype='bool')))
self.assertTrue(common.allequal(
row[1], np.array([1, 0], dtype='bool')))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test02b_BoolAtom(self):
"""Checking setting vlarray with boolean atoms."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test02b_BoolAtom..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', 'BoolAtom',
atom=tb.BoolAtom(),
title="Ragged array of Booleans")
vlarray.append([1, 0, 3])
vlarray.append([-1, 0])
# Modify the rows
vlarray[0] = (0, 1, 3)
vlarray[1] = (0, -1)
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([0, 1, 1], dtype='bool')))
self.assertTrue(common.allequal(
row[1], np.array([0, 1], dtype='bool')))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test03_IntAtom(self):
"""Checking vlarray with integer atoms."""
ttypes = [
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"int64",
# "UInt64", # Unavailable in some platforms
]
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_IntAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([1, 2, 3])
vlarray.append([-1, 0])
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(
common.allequal(row[0], np.array([1, 2, 3], dtype=atype)))
self.assertTrue(
common.allequal(row[1], np.array([-1, 0], dtype=atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test03a_IntAtom(self):
"""Checking vlarray with integer atoms (byteorder swapped)"""
ttypes = {
"int8": np.int8,
"uint8": np.uint8,
"int16": np.int16,
"uint16": np.uint16,
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
# "UInt64": numpy.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03a_IntAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]))
a0 = np.array([1, 2, 3], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray.append(a0)
a1 = np.array([-1, 0], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray.append(a1)
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([1, 2, 3], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([-1, 0], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test03b_IntAtom(self):
"""Checking updating vlarray with integer atoms."""
ttypes = [
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"int64",
# "UInt64", # Unavailable in some platforms
]
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03_IntAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([1, 2, 3])
vlarray.append([-1, 0])
# Modify rows
vlarray[0] = (3, 2, 1)
vlarray[1] = (0, -1)
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(
common.allequal(row[0], np.array([3, 2, 1], dtype=atype)))
self.assertTrue(
common.allequal(row[1], np.array([0, -1], dtype=atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test03c_IntAtom(self):
"""Checking updating vlarray with integer atoms (byteorder swapped)"""
ttypes = {
"int8": np.int8,
"uint8": np.uint8,
"int16": np.int16,
"uint16": np.uint16,
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
# "UInt64": numpy.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03c_IntAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]))
a0 = np.array([1, 2, 3], dtype=atype)
vlarray.append(a0)
a1 = np.array([-1, 0], dtype=atype)
vlarray.append(a1)
# Modify rows
a0 = np.array([3, 2, 1], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray[0] = a0
a1 = np.array([0, -1], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray[1] = a1
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([3, 2, 1], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([0, -1], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test03d_IntAtom(self):
"""Checking updating vlarray with integer atoms (another byteorder)"""
ttypes = {
"int8": np.int8,
"uint8": np.uint8,
"int16": np.int16,
"uint16": np.uint16,
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
# "UInt64": numpy.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test03d_IntAtom..." % self.__class__.__name__)
byteorder = {'little': 'big', 'big': 'little'}[sys.byteorder]
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]),
byteorder=byteorder)
a0 = np.array([1, 2, 3], dtype=atype)
vlarray.append(a0)
a1 = np.array([-1, 0], dtype=atype)
vlarray.append(a1)
# Modify rows
a0 = np.array([3, 2, 1], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray[0] = a0
a1 = np.array([0, -1], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray[1] = a1
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
byteorder2 = tb.utils.byteorders[row[0].dtype.byteorder]
if byteorder2 != "irrelevant":
self.assertEqual(tb.utils.byteorders[row[0].dtype.byteorder],
sys.byteorder)
self.assertEqual(vlarray.byteorder, byteorder)
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([3, 2, 1], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([0, -1], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04_FloatAtom(self):
"""Checking vlarray with floating point atoms."""
ttypes = [
"float32",
"float64",
]
for name in ("float16", "float96", "float128"):
atomname = name.capitalize() + 'Atom'
if hasattr(tb, atomname):
ttypes.append(name)
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_FloatAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([1.3, 2.2, 3.3])
vlarray.append([-1.3e34, 1.e-32])
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([1.3, 2.2, 3.3], atype)))
self.assertTrue(common.allequal(
row[1], np.array([-1.3e34, 1.e-32], atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04a_FloatAtom(self):
"""Checking vlarray with float atoms (byteorder swapped)"""
ttypes = {
"float32": np.float32,
"float64": np.float64,
}
if hasattr(tb, "Float16Atom"):
ttypes["float16"] = np.float16
if hasattr(tb, "Float96Atom"):
ttypes["float96"] = np.float96
if hasattr(tb, "Float128Atom"):
ttypes["float128"] = np.float128
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04a_FloatAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]))
a0 = np.array([1.3, 2.2, 3.3], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray.append(a0)
a1 = np.array([-1.3e34, 1.e-32], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray.append(a1)
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([1.3, 2.2, 3.3], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([-1.3e34, 1.e-32], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04b_FloatAtom(self):
"""Checking updating vlarray with floating point atoms."""
ttypes = [
"float32",
"float64",
]
for name in ("float16", "float96", "float128"):
atomname = name.capitalize() + 'Atom'
if hasattr(tb, atomname):
ttypes.append(name)
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04b_FloatAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([1.3, 2.2, 3.3])
vlarray.append([-1.3e34, 1.e-32])
# Modifiy some rows
vlarray[0] = (4.3, 2.2, 4.3)
vlarray[1] = (-1.1e34, 1.3e-32)
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([4.3, 2.2, 4.3], atype)))
self.assertTrue(
common.allequal(row[1], np.array([-1.1e34, 1.3e-32], atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04c_FloatAtom(self):
"""Checking updating vlarray with float atoms (byteorder swapped)"""
ttypes = {
"float32": np.float32,
"float64": np.float64,
}
if hasattr(tb, "Float16Atom"):
ttypes["float16"] = np.float16
if hasattr(tb, "Float96Atom"):
ttypes["float96"] = np.float96
if hasattr(tb, "Float128Atom"):
ttypes["float128"] = np.float128
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04c_FloatAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]))
a0 = np.array([1.3, 2.2, 3.3], dtype=atype)
vlarray.append(a0)
a1 = np.array([-1, 0], dtype=atype)
vlarray.append(a1)
# Modify rows
a0 = np.array([4.3, 2.2, 4.3], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray[0] = a0
a1 = np.array([-1.1e34, 1.3e-32], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray[1] = a1
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([4.3, 2.2, 4.3], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([-1.1e34, 1.3e-32], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04d_FloatAtom(self):
"""Checking updating vlarray with float atoms (another byteorder)"""
ttypes = {
"float32": np.float32,
"float64": np.float64,
}
if hasattr(tb, "Float16Atom"):
ttypes["float16"] = np.float16
if hasattr(tb, "Float96Atom"):
ttypes["float96"] = np.float96
if hasattr(tb, "Float128Atom"):
ttypes["float128"] = np.float128
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04d_FloatAtom..." % self.__class__.__name__)
byteorder = {'little': 'big', 'big': 'little'}[sys.byteorder]
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(ttypes[atype]),
byteorder=byteorder)
a0 = np.array([1.3, 2.2, 3.3], dtype=atype)
vlarray.append(a0)
a1 = np.array([-1, 0], dtype=atype)
vlarray.append(a1)
# Modify rows
a0 = np.array([4.3, 2.2, 4.3], dtype=atype)
a0 = a0.byteswap()
a0 = a0.newbyteorder()
vlarray[0] = a0
a1 = np.array([-1.1e34, 1.3e-32], dtype=atype)
a1 = a1.byteswap()
a1 = a1.newbyteorder()
vlarray[1] = a1
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.byteorder, byteorder)
self.assertEqual(tb.utils.byteorders[row[0].dtype.byteorder],
sys.byteorder)
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([4.3, 2.2, 4.3], dtype=ttypes[atype])))
self.assertTrue(common.allequal(
row[1], np.array([-1.1e34, 1.3e-32], dtype=ttypes[atype])))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04_ComplexAtom(self):
"""Checking vlarray with numerical complex atoms."""
ttypes = [
"complex64",
"complex128",
]
if hasattr(tb, "Complex192Atom"):
ttypes.append("complex192")
if hasattr(tb, "Complex256Atom"):
ttypes.append("complex256")
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04_ComplexAtom..." % self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([(1.3 + 0j), (0+2.2j), (3.3+3.3j)])
vlarray.append([(0-1.3e34j), (1.e-32 + 0j)])
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([(1.3 + 0j), (0+2.2j), (3.3+3.3j)], atype)))
self.assertTrue(common.allequal(
row[1], np.array([(0-1.3e34j), (1.e-32 + 0j)], atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test04b_ComplexAtom(self):
"""Checking modifying vlarray with numerical complex atoms."""
ttypes = [
"complex64",
"complex128",
]
if hasattr(tb, "Complex192Atom"):
ttypes.append("complex192")
if hasattr(tb, "Complex256Atom"):
ttypes.append("complex256")
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test04b_ComplexAtom..." %
self.__class__.__name__)
for atype in ttypes:
vlarray = self.h5file.create_vlarray(
'/', atype, atom=tb.Atom.from_sctype(atype))
vlarray.append([(1.3 + 0j), (0+2.2j), (3.3+3.3j)])
vlarray.append([(0-1.3e34j), (1.e-32 + 0j)])
# Modify the rows
vlarray[0] = ((1.4 + 0j), (0+4.2j), (3.3+4.3j))
vlarray[1] = ((4-1.3e34j), (1.e-32 + 4j))
if self.reopen:
name = vlarray._v_pathname
self._reopen(mode='a')
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Testing type:", atype)
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertTrue(common.allequal(
row[0], np.array([(1.4 + 0j), (0+4.2j), (3.3+4.3j)], atype)))
self.assertTrue(common.allequal(
row[1], np.array([(4-1.3e34j), (1.e-32 + 4j)], atype)))
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 2)
def test05_VLStringAtom(self):
"""Checking vlarray with variable length strings."""
# Skip the test if the default encoding has been mangled.
if sys.getdefaultencoding() != 'ascii':
return
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05_VLStringAtom..." %
self.__class__.__name__)
vlarray = self.h5file.create_vlarray(
'/', "VLStringAtom", atom=tb.VLStringAtom())
vlarray.append(b"asd")
vlarray.append(b"asd\xe4")
vlarray.append(b"aaana")
vlarray.append(b"")
# Check for ticket #62.
self.assertRaises(TypeError, vlarray.append, [b"foo", b"bar"])
# `VLStringAtom` makes no encoding assumptions. See ticket #51.
self.assertRaises(UnicodeEncodeError, vlarray.append, "asd\xe4")
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 4)
self.assertEqual(row[0], b"asd")
self.assertEqual(row[1], b"asd\xe4")
self.assertEqual(row[2], b"aaana")
self.assertEqual(row[3], b"")
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 4)
self.assertEqual(len(row[2]), 5)
self.assertEqual(len(row[3]), 0)
def test05b_VLStringAtom(self):
"""Checking updating vlarray with variable length strings."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test05b_VLStringAtom..." %
self.__class__.__name__)
vlarray = self.h5file.create_vlarray(
'/', "VLStringAtom", atom=tb.VLStringAtom())
vlarray.append(b"asd")
vlarray.append(b"aaana")
# Modify values
vlarray[0] = b"as4"
vlarray[1] = b"aaanc"
self.assertRaises(ValueError, vlarray.__setitem__, 1, b"shrt")
self.assertRaises(ValueError, vlarray.__setitem__, 1, b"toolong")
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", repr(row[0]))
print("Second row in vlarray ==>", repr(row[1]))
self.assertEqual(vlarray.nrows, 2)
self.assertEqual(row[0], b"as4")
self.assertEqual(row[1], b"aaanc")
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 5)
def test06a_Object(self):
"""Checking vlarray with object atoms."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test06a_Object..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray(
'/', "Object", atom=tb.ObjectAtom())
vlarray.append(
[[1, 2, 3], "aaa", "aaa\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd"])
vlarray.append([3, 4, C()])
vlarray.append(42)
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 3)
self.assertEqual(
row[0],
[[1, 2, 3], "aaa", "aaa\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd"])
list1 = list(row[1])
obj = list1.pop()
self.assertEqual(list1, [3, 4])
self.assertEqual(obj.c, C().c)
self.assertEqual(row[2], 42)
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 3)
self.assertRaises(TypeError, len, row[2])
def test06b_Object(self):
"""Checking updating vlarray with object atoms."""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test06b_Object..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', "Object",
atom=tb.ObjectAtom())
# When updating an object, this seems to change the number
# of bytes that pickle.dumps generates
# vlarray.append(
# ([1,2,3], "aaa", "aaa\xef\xbf\xbd\xef\xbf\xbd\xef\xbf\xbd"))
vlarray.append(([1, 2, 3], "aaa", "\xef\xbf\xbd\xef\xbf\xbd4"))
# vlarray.append([3,4, C()])
vlarray.append([3, 4, [24]])
# Modify the rows
# vlarray[0] = ([1,2,4], "aa4", "aaa\xef\xbf\xbd\xef\xbf\xbd4")
vlarray[0] = ([1, 2, 4], "aa4", "\xef\xbf\xbd\xef\xbf\xbd5")
# vlarray[1] = (3,4, C())
vlarray[1] = [4, 4, [24]]
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 2)
self.assertEqual(row[0],
([1, 2, 4], "aa4", "\xef\xbf\xbd\xef\xbf\xbd5"))
list1 = list(row[1])
obj = list1.pop()
self.assertEqual(list1, [4, 4])
# self.assertEqual(obj.c, C().c)
self.assertEqual(obj, [24])
self.assertEqual(len(row[0]), 3)
self.assertEqual(len(row[1]), 3)
def test06c_Object(self):
"""Checking vlarray with object atoms (numpy arrays as values)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test06c_Object..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', "Object",
atom=tb.ObjectAtom())
vlarray.append(np.array([[1, 2], [0, 4]], 'i4'))
vlarray.append(np.array([0, 1, 2, 3], 'i8'))
vlarray.append(np.array(42, 'i1'))
if self.reopen:
name = vlarray._v_pathname
self._reopen()
vlarray = self.h5file.get_node(name)
# Read all the rows:
row = vlarray.read()
if common.verbose:
print("Object read:", row)
print("Nrows in", vlarray._v_pathname, ":", vlarray.nrows)
print("First row in vlarray ==>", row[0])
self.assertEqual(vlarray.nrows, 3)
self.assertTrue(common.allequal(
row[0], np.array([[1, 2], [0, 4]], 'i4')))
self.assertTrue(common.allequal(row[1], np.array([0, 1, 2, 3], 'i8')))
self.assertTrue(common.allequal(row[2], np.array(42, 'i1')))
def test06d_Object(self):
"""Checking updating vlarray with object atoms (numpy arrays)"""
if common.verbose:
print('\n', '-=' * 30)
print("Running %s.test06d_Object..." % self.__class__.__name__)
vlarray = self.h5file.create_vlarray('/', "Object",
atom=tb.ObjectAtom())
vlarray.append(
|
np.array([[1, 2], [0, 4]], 'i4')
|
numpy.array
|
import argparse
import os
import time
import h5py
import numpy as np
import pymesh
import trimesh
from joblib import Parallel, delayed
from scipy.interpolate import RegularGridInterpolator
import create_file_lst
CUR_PATH = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--thread_num', type=int, default='9', help='how many objs are creating at the same time')
parser.add_argument('--category', type=str, default="all", help='Which single class to generate on [default: all, can '
'be chair or plane, etc.]')
FLAGS = parser.parse_args()
def get_sdf_value(sdf_pt, sdf_params_ph, sdf_ph, sdf_res):
x = np.linspace(sdf_params_ph[0], sdf_params_ph[3], num=sdf_res + 1)
y = np.linspace(sdf_params_ph[1], sdf_params_ph[4], num=sdf_res + 1)
z = np.linspace(sdf_params_ph[2], sdf_params_ph[5], num=sdf_res + 1)
my_interpolating_function = RegularGridInterpolator((z, y, x), sdf_ph)
sdf_value = my_interpolating_function(sdf_pt)
print("sdf_value:", sdf_value.shape)
return np.expand_dims(sdf_value, axis=1)
def get_sdf(sdf_file, sdf_res):
intsize = 4
floatsize = 8
sdf = {
"param": [],
"value": []
}
with open(sdf_file, "rb") as f:
try:
bytes = f.read()
ress = np.frombuffer(bytes[:intsize * 3], dtype=np.int32)
if -1 * ress[0] != sdf_res or ress[1] != sdf_res or ress[2] != sdf_res:
raise Exception(sdf_file, "res not consistent with ", str(sdf_res))
positions = np.frombuffer(bytes[intsize * 3:intsize * 3 + floatsize * 6], dtype=np.float64)
# bottom left corner, x,y,z and top right corner, x, y, z
sdf["param"] = [positions[0], positions[1], positions[2], positions[3], positions[4], positions[5]]
sdf["param"] = np.float32(sdf["param"])
sdf["value"] = np.frombuffer(bytes[intsize * 3 + floatsize * 6:], dtype=np.float32)
sdf["value"] = np.reshape(sdf["value"], (sdf_res + 1, sdf_res + 1, sdf_res + 1))
finally:
f.close()
return sdf
def get_offset_ball(num, bandwidth):
u = np.random.normal(0, 1, size=(num, 1))
v = np.random.normal(0, 1, size=(num, 1))
w = np.random.normal(0, 1, size=(num, 1))
r = np.random.uniform(0, 1, size=(num, 1)) ** (1. / 3) * bandwidth
norm = np.linalg.norm(
|
np.concatenate([u, v, w], axis=1)
|
numpy.concatenate
|
"""
<NAME>
Date: June 24, 2021
Utility functions for reading and writing data
"""
import os
import csv
import datetime
import numpy as np
import pandas as pd
from astropy.time import Time
def read_csv(csv_file_path):
"""
function to read csv file and return list of dates
Parameters
----------
csv_file_path : str
path to csv file
Returns
-------
csv_list : str, list
list of elements in csv file
"""
with open(csv_file_path, newline='') as f:
reader = csv.reader(f)
csv_list = list(reader)
return csv_list
def append_list_as_row(file_name, list_of_elem):
"""
function to add row to csv file
Parameters
----------
file_name : str
path to csv file
list_of_elem : list
elements as a list to add to file
Returns
-------
"""
# Open file in append mode
with open(file_name, 'a+', newline='') as write_obj:
# Create a writer object from csv module
csv_writer = csv.writer(write_obj)
# Add contents of list as last row in the csv file
csv_writer.writerow(list_of_elem)
return None
def get_dates(date):
"""
function to convert dates from either JD, string, or datetime
to a Sunpy usable date form
Parameters
----------
date
date in any form (JD, string, datetime)
Returns
-------
date_str : str
UT datetime as string
date_obj : datetime
UT datetime object
date_jd : float
UT datetime as float (JD)
"""
if isinstance(date, str):
date_str = date
date_obj = datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S')
date_jd = Time(date_str)
date_jd.format = 'jd'
elif isinstance(date, float):
t = Time(date, format='jd')
date_obj = t.datetime
date_str = date_obj.strftime('%Y-%m-%dT%H:%M:%S')
date_jd = date
else:
date_obj = date
date_str = date_obj.strftime('%Y-%m-%dT%H:%M:%S')
date_jd = Time(date_str)
date_jd.format = 'jd'
return date_str, date_obj, date_jd
def get_neid_struct_dates(date):
"""
function to convert dates to the format used by the
NEID fits files directory structure
Parameters
----------
date
date in any form (JD, string, datetime)
Returns
-------
date_str : str
date string
date_obj : datetime
datetime object
date_jd : float
jd date
"""
if isinstance(date, str):
# date_str = date[0:4] + date[5:7] + date[8:10]
date_str = date
date_obj = datetime.datetime.strptime(date_str, '%Y%m%d')
date_str = date_obj.strftime('%Y-%m-%dT%H:%M:%S')
date_jd = Time(date_str)
date_jd.format = 'jd'
elif isinstance(date, float):
t = Time(date, format='jd')
date_obj = t.datetime
date_str = date_obj.strftime('%Y%m%d')
date_jd = date
else:
date_obj = date
date_str = date_obj.strftime('%Y%m%d')
date_jd = Time(date_str)
date_jd.format = 'jd'
return date_str, date_obj, date_jd
def read_sdo_csv(csv_file):
"""
function to read in csv file with package calculations
and return metrics
Parameters
----------
csv_file : str
path to csv file to read
Returns
-------
list : list
list of SDO derived metrics
"""
# create pandas dataframe
component_df = pd.read_csv(csv_file)
# get dates list
date_jd = component_df.date_jd.values
inds = np.argsort(date_jd)
rv_sun = component_df.rv_sun.values[inds]
rv_error = component_df.rv_error.values[inds]
non_nan = np.logical_not(np.isnan(rv_sun))
rv_med = np.nanmedian(np.abs(rv_sun))
good_sun = np.logical_and(
|
np.abs(rv_sun)
|
numpy.abs
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py
This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
python run_openai_gpt.py \
--model_name openai-gpt \
--do_train \
--do_eval \
--train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
--eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
--output_dir ../log \
--train_batch_size 16 \
"""
import argparse
import os
import random
import logging
from tqdm import tqdm, trange
from io import open
import datetime
from scipy.misc import logsumexp
import flair
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_transformers import (GPT2Config, GPT2LMHeadModel, GPT2Model, AdamW, cached_path, WEIGHTS_NAME, CONFIG_NAME)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', filename='gpt_2_2.log',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class TaggedCorpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.pos_dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'tagged_train.txt'))
self.valid = self.tokenize(os.path.join(path, 'tagged_valid.txt'))
self.test = self.tokenize(os.path.join(path, 'tagged_test.txt'))
def tokenize(self, path):
"""Tokenizes a text file into """
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
tokens = 0
for line in f:
if len(line.strip().split()[::2]) > 3:
words = ['<sos>'] + line.strip().split()[::2] + ['<eos>']
pos_tags = ['<SOS>'] + line.strip().split()[1::2] + ['<EOS>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
for tag in pos_tags:
self.pos_dictionary.add_word(tag)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
ids = torch.LongTensor(tokens)
pos_ids = torch.LongTensor(tokens)
token = 0
pos_token = 0
for line in f:
if len(line.strip().split()[::2]) > 3:
#print (line.strip().split())
words = ['<sos>']+line.strip().split()[::2] + ['<eos>']
pos_tags = ['<SOS>'] + line.strip().split()[1::2] + ['<EOS>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
for i,tag in enumerate(pos_tags):
pos_ids[pos_token] =self.pos_dictionary.word2idx[tag]
pos_token += 1
return ids,pos_ids
class ProbeModel(nn.Module):
def __init__(self, model, config):
super(ProbeModel, self).__init__()
self.model = model
self.probe_cls_fc1 = nn.Linear(config.n_embd, config.n_embd)
self.probe_cls_fc2 = nn.Linear(config.n_embd, config.pos_vocab_size)
def forward(self, input_ids, position_ids=None,pos_ids = None, token_type_ids=None, labels=None, past=None, head_mask=None):
model_outputs = self.model(input_ids, position_ids=None,pos_ids = None, token_type_ids=None, labels=None, past=None, head_mask=None)
sem_hid_state = model_outputs[-1]
syn_hid_states = model_outputs[-2]
pos_logits = self.probe_cls_fc2(torch.relu(self.probe_cls_fc1(syn_hid_states)))
#pos_logits = self.probe_cls_fc1(sem_hid_state)
shift_pos_logits = pos_logits[..., :-1, :].contiguous()
shift_pos_labels = pos_ids[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_pos_logits.view(-1, shift_pos_logits.size(-1)),
shift_pos_labels.view(-1))
return loss,shift_pos_logits
def load_tokenize_and_batchify(data_dir = '../SemSynLSTM/word_language_model/data/wikitext-2/', input_len = 128):
"""
load dataset and return train, val, test dataset
"""
tensor_datasets = []
corpus = TaggedCorpus(data_dir)
train_data = corpus.train
val_data = corpus.valid
test_data = corpus.test
for dataset in [train_data, val_data, test_data]:
##divide data by batch, truncate to fit into batches
n_batch = len(dataset[0]) // input_len
input_ids = dataset[0][: n_batch * input_len].reshape(n_batch, input_len)
pos_ids = dataset[1][: n_batch * input_len].reshape(n_batch, input_len)
all_inputs = (input_ids, pos_ids)
tensor_datasets.append(tuple(t for t in all_inputs))
return tensor_datasets[0], tensor_datasets[1],tensor_datasets[2], corpus.dictionary, corpus.pos_dictionary
class WrapperLMHead(GPT2LMHeadModel):
def __init__(self, model1, model2, config, model_option):
super(GPT2LMHeadModel, self).__init__(config)
self.syn_transformer = model1
self.sem_transformer = model2
self.model_option = model_option
if model_option == 'gpt_2_2':
self.lm_head = nn.Linear(2 * config.n_embd, config.vocab_size, bias=False)
elif model_option == 'syn_only':
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
elif model_option == 'adverse':
self.lm_head = nn.Linear(2 * config.n_embd, config.vocab_size, bias=False)
self.adv_pos_head = nn.Linear(config.n_embd, config.pos_vocab_size, bias=False)
self.pos_head = nn.Linear(config.n_embd, config.pos_vocab_size, bias=False)
self.apply(self.init_weights)
#self.tie_weights()
def forward(self, input_ids,pos_ids = None, position_ids=None, token_type_ids=None, labels=None, past=None, head_mask=None):
syn_transformer_outputs = self.syn_transformer(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
past=past, head_mask=head_mask)
syn_hidden_states = syn_transformer_outputs[0]
#TODO set position_id to be all zero or randomly permutated? for semTransformer
if self.model_option == 'gpt_2_2':
sem_transformer_outputs = self.sem_transformer(input_ids, position_ids=torch.zeros_like(input_ids), token_type_ids=token_type_ids,
past=past, head_mask=head_mask)
sem_hidden_states = sem_transformer_outputs[0]
hidden_states = torch.cat((syn_hidden_states, sem_hidden_states), dim=-1)
elif self.model_option == 'syn_only':
hidden_states = syn_hidden_states
lm_logits1 = self.lm_head(hidden_states)
pos_logits = self.pos_head(syn_hidden_states)
outputs = (lm_logits1,) + (hidden_states,syn_hidden_states, sem_hidden_states)
if labels is not None and pos_ids is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits1[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_pos_logits = pos_logits[..., :-1, :].contiguous()
shift_pos_ids = pos_ids[..., 1:].contiguous()
# Flatten the tokens
#TODO why are we ignoring -1 ?
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss_lm = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
loss_pos = loss_fct(shift_pos_logits.view(-1, shift_pos_logits.size(-1)),
shift_pos_ids.view(-1))
loss = loss_lm + loss_pos
outputs = ((loss,loss_lm, loss_pos),) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="pretrained_model.")
parser.add_argument("--model_option", type=str, default='gpt-2-2', help="pretrained_model.")
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--do_probe", action='store_true', help="Whether to run probing.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--data_dir', type=str, default='/home/xiongyi/dataxyz/repos/SemSynLSTM/word_language_model/data/wikitext-2/')
parser.add_argument('--seed', type=int, default=12)
parser.add_argument('--num_train_epochs', type=int, default=3)
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=6.25e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.002)
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--lm_coef', type=float, default=0.9)
parser.add_argument('--n_valid', type=int, default=374)
timenow = datetime.datetime.now().strftime("%b%d%H%M")
model_option = 'gpt_2_2'
outdir = model_option + timenow
args = parser.parse_args(['--output_dir', outdir,'--do_probe','--num_train_epochs', '10', '--model_option',model_option])
#args = parser.parse_args(['--output_dir', './tmp', '--do_eval', '--model_name', 'gpt2'])
print(args)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
#special_tokens = ['_start_', '_delimiter_']
#special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
# Compute the max input length for the Transformer
input_length = 128
data_dir = '../SemSynLSTM/word_language_model/data/wikitext-2/' if args.data_dir is None else args.data_dir
train_set, val_set, test_set, dictionary, pos_dictionary = load_tokenize_and_batchify(data_dir, input_length)
# Prepare inputs tensors and dataloaders
train_data = TensorDataset(*train_set)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=32)
eval_data = TensorDataset(*val_set)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=32)
config = GPT2Config(n_positions=256,n_ctx=256, n_layer=8,n_head=8, n_embd= 256)
config.vocab_size = dictionary.__len__()
config.pos_vocab_size = pos_dictionary.__len__()
config.n_ctx = input_length
config.n_positions = input_length
model1 = GPT2Model(config=config)
#TODO: GPTWithPOSPredicting
model2 = GPT2Model(config=config)
#TODO: Wrapp2Transformers together and add a LM head
model = WrapperLMHead(model1, model2, config, args.model_option)
model.to(device)
# TODO: Load and encode the datasets
logger.info("Encoding dataset...")
# Prepare optimizer
if args.do_train:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = len(train_dataloader) * args.num_train_epochs
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate,
#max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay)
#t_total=num_train_optimization_steps)
if args.do_train:
train_results = {}
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.train()
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
###eval on eval set
model.eval()
nb_eval_steps, nb_eval_examples = 0, 0
log_probs_sum = 0
perp = 0.0
average_loss = np.array([0.0,0.0,0.0])
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
input_ids, input_pos_ids = batch
with torch.no_grad():
loss, loss_lm, loss_pos = model(input_ids, pos_ids = input_pos_ids, labels=input_ids)[0]
loss = loss.detach().cpu().numpy()
loss_lm = loss_lm.detach().cpu().numpy()
loss_pos = loss_pos.detach().cpu().numpy()
perp_batch = np.exp(loss_lm)
perp += perp_batch
average_loss +=
|
np.array([loss, loss_lm, loss_pos])
|
numpy.array
|
import unittest
import dolphindb as ddb
import numpy as np
import pandas as pd
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pandas.testing import assert_series_equal
from pandas.testing import assert_frame_equal
class TestBasicDataTypes(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_int_scalar(self):
re = self.s.run("100")
self.assertEqual(re, 100)
re = self.s.run("int()")
self.assertIsNone(re)
def test_bool_scalar(self):
re = self.s.run("true")
self.assertEqual(re, True)
re = self.s.run("bool()")
self.assertIsNone(re)
def test_char_scalar(self):
re = self.s.run("'a'")
self.assertEqual(re, 97)
re = self.s.run("char()")
self.assertIsNone(re)
def test_short_scalar(self):
re = self.s.run("112h")
self.assertEqual(re, 112)
re = self.s.run("short()")
self.assertIsNone(re)
def test_long_scalar(self):
re = self.s.run("22l")
self.assertEqual(re, 22)
re = self.s.run("long()")
self.assertIsNone(re)
def test_date_scalar(self):
re = self.s.run("2012.06.12")
self.assertEqual(re, np.datetime64('2012-06-12'))
re = self.s.run("date()")
self.assertIsNone(re)
def test_month_scalar(self):
re = self.s.run("2012.06M")
self.assertEqual(re, np.datetime64('2012-06'))
re = self.s.run("month()")
self.assertIsNone(re)
def test_time_scalar(self):
re = self.s.run("12:30:00.008")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:00.008'))
re = self.s.run("time()")
self.assertIsNone(re)
def test_minute_scalar(self):
re = self.s.run("12:30m")
self.assertEqual(re, np.datetime64('1970-01-01T12:30'))
re = self.s.run("minute()")
self.assertIsNone(re)
def test_second_scalar(self):
re = self.s.run("12:30:10")
self.assertEqual(re, np.datetime64('1970-01-01T12:30:10'))
re = self.s.run("second()")
self.assertIsNone(re)
def test_datetime_scalar(self):
re = self.s.run('2012.06.13 13:30:10')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10'))
re = self.s.run("datetime()")
self.assertIsNone(re)
def test_timestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008'))
re = self.s.run("timestamp()")
self.assertIsNone(re)
def test_nanotime_scalar(self):
re = self.s.run('13:30:10.008007006')
self.assertEqual(re, np.datetime64('1970-01-01T13:30:10.008007006'))
re = self.s.run("nanotime()")
self.assertIsNone(re)
def test_nanotimestamp_scalar(self):
re = self.s.run('2012.06.13 13:30:10.008007006')
self.assertEqual(re, np.datetime64('2012-06-13T13:30:10.008007006'))
re = self.s.run("nanotimestamp()")
self.assertIsNone(re)
def test_float_scalar(self):
re = self.s.run('2.1f')
self.assertEqual(round(re), 2)
re = self.s.run("float()")
self.assertIsNone(re)
def test_double_scalar(self):
re = self.s.run('2.1')
self.assertEqual(re, 2.1)
re = self.s.run("double()")
self.assertIsNone(re)
def test_string_scalar(self):
re = self.s.run('"abc"')
self.assertEqual(re, 'abc')
re = self.s.run("string()")
self.assertIsNone(re)
def test_uuid_scalar(self):
re = self.s.run("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')")
self.assertEqual(re, '5d212a78-cc48-e3b1-4235-b4d91473ee87')
re = self.s.run("uuid()")
self.assertIsNone(re)
def test_ipaddr_sclar(self):
re = self.s.run("ipaddr('192.168.1.135')")
self.assertEqual(re, '192.168.1.135')
re = self.s.run("ipaddr()")
self.assertIsNone(re)
def test_int128_scalar(self):
re = self.s.run("int128('e1671797c52e15f763380b45e841ec32')")
self.assertEqual(re, 'e1671797c52e15f763380b45e841ec32')
re = self.s.run("int128()")
self.assertIsNone(re)
def test_python_datetime64_dolphindb_date_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('date', ts), np.datetime64('2019-01-01'))
def test_python_datetime64_dolphindb_month_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('month', ts), np.datetime64('2019-01'))
def test_python_datetime64_dolphindb_time_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('time', ts), np.datetime64('1970-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_minute_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('minute', ts), np.datetime64('1970-01-01T20:01'))
def test_python_datetime64_dolphindb_second_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('second', ts), np.datetime64('1970-01-01T20:01:01'))
def test_python_datetime64_dolphindb_datetime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('datetime', ts), np.datetime64('2019-01-01T20:01:01'))
def test_python_datetime64_dolphindb_timestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('timestamp', ts), np.datetime64('2019-01-01T20:01:01.122'))
def test_python_datetime64_dolphindb_nanotime_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotime', ts), np.datetime64('1970-01-01T20:01:01.122346100'))
def test_python_datetime64_dolphindb_nanotimestamp_scalar(self):
ts = np.datetime64('2019-01-01T20:01:01.1223461')
self.assertEqual(self.s.run('nanotimestamp', ts), np.datetime64('2019-01-01T20:01:01.122346100'))
def test_string_vector(self):
re = self.s.run("`IBM`GOOG`YHOO")
self.assertEqual((re == ['IBM', 'GOOG', 'YHOO']).all(), True)
re = self.s.run("['IBM', string(), 'GOOG']")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("[string(), string(), string()]")
self.assertEqual((re==['','','']).all(), True)
def test_function_def(self):
re = self.s.run("def f(a,b){return a+b}")
re = self.s.run("f(1, 2)")
self.assertEqual(re, 3)
def test_symbol_vector(self):
re = self.s.run("symbol(`IBM`MSFT`GOOG`BIDU)")
self.assertEqual((re == ['IBM', 'MSFT', 'GOOG', 'BIDU']).all(), True)
re = self.s.run("symbol(['IBM', '', 'GOOG'])")
self.assertEqual((re==['IBM', '', 'GOOG']).all(), True)
re = self.s.run("symbol(['', '', ''])")
self.assertEqual((re==['', '', '']).all(), True)
def test_char_vector(self):
re = self.s.run("['a', 'b', 'c']")
expected = [97, 98, 99]
self.assertEqual((re==expected).all(), True)
re = self.s.run("['a', char(), 'c']")
expected = [97.0, np.nan, 99.0]
assert_array_almost_equal(re, expected)
def test_bool_vector(self):
re = self.s.run("[true, false, true]")
expected = [True, False, True]
assert_array_equal(re, expected)
re = self.s.run("[true, false, bool()]")
assert_array_equal(re[0:2], [True, False])
self.assertTrue(np.isnan(re[2]))
re = self.s.run("[bool(), bool(), bool()]")
self.assertTrue(np.isnan(re[0]))
self.assertTrue(np.isnan(re[1]))
self.assertTrue(np.isnan(re[2]))
def test_int_vector(self):
re = self.s.run("2938 2920 54938 1999 2333")
self.assertEqual((re == [2938, 2920, 54938, 1999, 2333]).all(), True)
re = self.s.run("[2938, int(), 6552]")
expected = [2938.0, np.nan, 6552.0]
assert_array_almost_equal(re, expected, 1)
re = self.s.run("[int(), int(), int()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_short_vector(self):
re = self.s.run("[10h, 11h, 12h]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10h, short(), 12h]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[short(), short(), short()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_long_vector(self):
re = self.s.run("[10l, 11l, 12l]")
expected = [10, 11, 12]
assert_array_equal(re, expected)
re = self.s.run("[10l, long(), 12l]")
expected = [10.0, np.nan, 12.0]
assert_array_almost_equal(re, expected)
re = self.s.run("[long(), long(), long()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_double_vector(self):
re = self.s.run("rand(10.0,10)")
self.assertEqual(len(re), 10)
re = self.s.run("[12.5, 26.0, double()]")
expected = [12.5, 26.0, np.nan]
assert_array_almost_equal(re, expected)
re = self.s.run("[double(), double(), double()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_float_vector(self):
re = self.s.run("[12.5f, 26.34f, 25.896f]")
expected = [12.5, 26.34, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[12.5f, float(), 25.896f]")
expected = [12.5, np.nan, 25.896]
assert_array_almost_equal(re, expected, 3)
re = self.s.run("[float(), float(), float()]")
expected = [np.nan, np.nan, np.nan]
assert_array_almost_equal(re, expected)
def test_date_vector(self):
re = self.s.run("2012.10.01 +1..3")
expected = np.array(['2012-10-02','2012-10-03','2012-10-04'], dtype="datetime64")
self.assertEqual((re == expected).all(), True)
re = self.s.run("[2012.06.01, date(), 2012.06.03]")
expected = np.array(['2012-06-01', 'NaT', '2012-06-03'], dtype="datetime64")
assert_array_equal(re, expected)
re = self.s.run("[date(), date(), date()]")
expected = [
|
np.datetime64('NaT')
|
numpy.datetime64
|
"""
This module contains the loss classes.
Specific losses are used for regression, binary classification or multiclass
classification.
"""
# Author: <NAME>
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import expit
try: # logsumexp was moved from mist to special in 0.19
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
from .common import Y_DTYPE
from .common import G_H_DTYPE
from ._loss import _update_gradients_least_squares
from ._loss import _update_gradients_least_absolute_deviation
from ._loss import _update_gradients_hessians_binary_crossentropy
from ._loss import _update_gradients_hessians_categorical_crossentropy
class BaseLoss(ABC):
"""Base class for a loss."""
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
need_update_leaves_values = False
def init_gradients_and_hessians(self, n_samples, prediction_dim):
"""Return initial gradients and hessians.
Unless hessians are constant, arrays are initialized with undefined
values.
Parameters
----------
n_samples : int
The number of samples passed to `fit()`.
prediction_dim : int
The dimension of a raw prediction, i.e. the number of trees
built at each iteration. Equals 1 for regression and binary
classification, or K where K is the number of classes for
multiclass classification.
Returns
-------
gradients : ndarray, shape (prediction_dim, n_samples)
The initial gradients. The array is not initialized.
hessians : ndarray, shape (prediction_dim, n_samples)
If hessians are constant (e.g. for `LeastSquares` loss, the
array is initialized to ``1``. Otherwise, the array is allocated
without being initialized.
"""
shape = (prediction_dim, n_samples)
gradients = np.empty(shape=shape, dtype=G_H_DTYPE)
if self.hessians_are_constant:
# If the hessians are constant, we consider they are equal to 1.
# - This is correct for the half LS loss
# - For LAD loss, hessians are actually 0, but they are always
# ignored anyway.
hessians =
|
np.ones(shape=(1, 1), dtype=G_H_DTYPE)
|
numpy.ones
|
"""
2014 January 31
<NAME>
Varius utilities related to operations on uvfits data files.
"""
from __future__ import print_function
import numpy
from astropy.io import fits
def pcdload(visfile):
checker = visfile.find('uvfits')
if checker == -1:
uvfits = False
else:
uvfits = True
if uvfits:
# uv fits format
visdata = fits.open(visfile)
visheader = visdata[0].header
if visheader['NAXIS'] == 7:
# identify the phase center
try:
pcd_ra = visdata['AIPS SU '].data['RAEPO'][0]
pcd_dec = visdata['AIPS SU '].data['DECEPO'][0]
except:
pcd_ra = visheader['CRVAL6']
pcd_dec = visheader['CRVAL7']
if pcd_ra < 0:
pcd_ra += 360
pcd = [pcd_ra, pcd_dec]
return pcd
if visheader['NAXIS'] == 6:
pcd_ra = visdata[0].header['CRVAL5']
pcd_dec = visdata[0].header['CRVAL6']
if pcd_ra < 0:
pcd_ra += 360
pcd = [pcd_ra, pcd_dec]
return pcd
else:
# CASA MS
pcd = MSpcd(visfile)
return pcd
def MSpcd(msFile):
"""
Explore different ways to get phase center consistent with data that has been regridded from one epoch to another
Parameters
----------
msFile: string
measurement set filename
Returns
-------
pcd: list
phase center
Note
----
which method gives the phase center that correspond to that used in CASA imaging needs further investigation
"""
from taskinit import tb
tb.open(msFile + '/FIELD')
old = tb.getcol('DELAY_DIR') #, fieldid
new = tb.getcol('PHASE_DIR')
def shiftRA(ra):
try:
if len(ra) > 1:
ra = [ra_i + numpy.pi * 2 for ra_i in numpy.squeeze(ra) if ra_i < 0.]
else:
if ra < 0.:
ra += numpy.pi * 2.
# print(ra)
except:
if ra < 0.:
ra += numpy.pi * 2.
# print(ra)
return ra
# if old.shape[-1] > 1:
# raise("Should check if the MS file truly only contains the science target!")
if old.shape[-1] > 1:
# for this Cloverleaf set, cloverleaf is the last source
old_ra, old_dec = numpy.squeeze(old)[0][-1], numpy.squeeze(old)[-1][-1]
old = numpy.array([[old_ra], [old_dec]])
new_ra, new_dec = numpy.squeeze(new)[0][-1], numpy.squeeze(new)[-1][-1]
new = numpy.array([[new_ra], [new_dec]])
old[0] = shiftRA(old[0])
new[0] = shiftRA(new[0])
# old phase center
# _pcd_ra, _pcd_dec = old[0] * 180. / numpy.pi, old[1] * 180. / numpy.pi
# new phase center
pcd_ra, pcd_dec = new[0] * 180. / numpy.pi, new[1] * 180. / numpy.pi
# print(pcd_ra, pcd_dec)
tb.close()
try:
pcd = pcd_ra[0][0], pcd_dec[0][0]
except IndexError:
pcd = pcd_ra[0], pcd_dec[0]
return list(pcd)
def MSpcd2(msFile):
"""using CASA taskinit.ms
Parameters
----------
msFile: string
measurement set filename
Returns
-------
pcd:
phase center
Note
----
may need further modification if nField > 1
unless it is the same for all fields
(need further investigation)
"""
from taskinit import ms
ms.open(msFile)
pc = ms.getfielddirmeas()
if not isinstance(pc, dict) is True:
pc()
epoch = pc['refer']
pcd_dec = pc['m1']['value'] * 180 / numpy.pi
pcd_ra = pc['m0']['value'] * 180 / numpy.pi
if pcd_ra < 0:
pcd_ra += 360
ms.done()
pcd = [pcd_ra, pcd_dec]
return pcd
def oldMSpcd(msFile):
"""
get phase center
works for most data without uv-regrid
Parameters
----------
msFile: string
measurement set filename
Returns
-------
pcd:
phase center
Note
----
by Shane
works for data without uv-regrid using CASA's fixvis()
the following will give the old phase center otherwise
"""
from taskinit import tb
tb.open(msFile + '/SOURCE')
pcd_ra = tb.getcol('DIRECTION')[0][0] * 180 / numpy.pi
if pcd_ra < 0:
pcd_ra += 360
pcd_dec = tb.getcol('DIRECTION')[1][0] * 180 / numpy.pi
tb.close()
pcd = [pcd_ra, pcd_dec]
return pcd
def uvload(visfile):
"""load in visibilities from a uv-file
Parameters
----------
visfile: string
visibility data filename, can be model or data
Returns
-------
uu: numpy.array
u visibilities
vv: numpy.array
v visibilities
ww: numpy.array
Note
----
08-14-2015:
Although a better solution to fix the array size mismatch problem that arises when calling `checkvis.uvmcmcfitVis` maybe something similar to the to-be-implemented function: uvmodel.add
which looks for nspw to shape model_complex
"""
checker = visfile.find('uvfits')
if checker == -1:
uvfits = False
else:
uvfits = True
if uvfits:
visdata = fits.open(visfile)
visibilities = visdata[0].data
visheader = visdata[0].header
if visheader['NAXIS'] == 7:
# identify the channel frequency(ies):
visfreq = visdata[1].data
freq0 = visheader['CRVAL4']
dfreq = visheader['CDELT4']
cfreq = visheader['CRPIX4']
nvis = visibilities['DATA'][:, 0, 0, 0, 0, 0, 0].size
nspw = visibilities['DATA'][0, 0, 0, :, 0, 0, 0].size
nfreq = visibilities['DATA'][0, 0, 0, 0, :, 0, 0].size
npol = visibilities['DATA'][0, 0, 0, 0, 0, :, 0].size
if nfreq > 1:
uu = numpy.zeros([nvis, nspw, nfreq, npol])
vv = numpy.zeros([nvis, nspw, nfreq, npol])
ww = numpy.zeros([nvis, nspw, nfreq, npol])
else:
# if miriad is True:
# uu = numpy.zeros([nvis, nspw, nfreq, npol])
# vv = numpy.zeros([nvis, nspw, nfreq, npol])
# ww = numpy.zeros([nvis, nspw, nfreq, npol])
# else:
# uu = numpy.zeros([nvis, nspw, npol])
# vv = numpy.zeros([nvis, nspw, npol])
# ww = numpy.zeros([nvis, nspw, npol])
uu = numpy.zeros([nvis, nspw, npol])
vv = numpy.zeros([nvis, nspw, npol])
ww = numpy.zeros([nvis, nspw, npol])
#wgt = numpy.zeros([nvis, nspw, nfreq, npol])
# get spw frequencies
# reference freq + offset
for ispw in range(nspw):
if nspw > 1:
freqif = freq0 + visfreq['IF FREQ'][0][ispw]
else:
try:
freqif = freq0 + visfreq['IF FREQ'][0]
except:
freqif = freq0
#uu[:, ispw] = freqif * visibilities['UU']
#vv[:, ispw] = freqif * visibilities['VV']
for ipol in range(npol):
# then compute the spatial frequencies:
if nfreq > 1:
freq = (numpy.arange(nfreq) - cfreq + 1) * \
dfreq + freqif
freqvis = numpy.meshgrid(freq, visibilities['UU'])
uu[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
freqvis = numpy.meshgrid(freq, visibilities['VV'])
vv[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
freqvis = numpy.meshgrid(freq, visibilities['WW'])
ww[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
else:
# if miriad is True:
# freq = (numpy.arange(nfreq) - cfreq + 1) * dfreq + freqif
# freqvis = numpy.meshgrid(freq, visibilities['UU'])
# uu[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
# freqvis = numpy.meshgrid(freq, visibilities['VV'])
# vv[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
# freqvis = numpy.meshgrid(freq, visibilities['WW'])
# ww[:, ispw, :, ipol] = freqvis[0] * freqvis[1]
# else:
# uu[:, ispw, ipol] = freqif * visibilities['UU']
# vv[:, ispw, ipol] = freqif * visibilities['VV']
# ww[:, ispw, ipol] = freqif * visibilities['WW']
uu[:, ispw, ipol] = freqif * visibilities['UU']
vv[:, ispw, ipol] = freqif * visibilities['VV']
ww[:, ispw, ipol] = freqif * visibilities['WW']
if visheader['NAXIS'] == 6:
# identify the channel frequency(ies):
freq0 = visheader['CRVAL4']
dfreq = visheader['CDELT4']
cfreq = visheader['CRPIX4']
nvis = visibilities['DATA'][:, 0, 0, 0, 0, 0].size
nfreq = visibilities['DATA'][0, 0, 0, :, 0, 0].size
npol = visibilities['DATA'][0, 0, 0, 0, :, 0].size
if nfreq > 1:
uu = numpy.zeros([nvis, nfreq, npol])
vv = numpy.zeros([nvis, nfreq, npol])
ww = numpy.zeros([nvis, nfreq, npol])
else:
uu = numpy.zeros([nvis, npol])
vv = numpy.zeros([nvis, npol])
ww = numpy.zeros([nvis, npol])
#wgt = numpy.zeros([nvis, nspw, nfreq, npol])
freqif = freq0
#uu[:, ispw] = freqif * visibilities['UU']
#vv[:, ispw] = freqif * visibilities['VV']
for ipol in range(npol):
# then compute the spatial frequencies:
if nfreq > 1:
freq = (numpy.arange(nfreq) - cfreq + 1) * dfreq + freqif
freqvis = numpy.meshgrid(freq, visibilities['UU'])
uu[:, 0, :, ipol] = freqvis[0] * freqvis[1]
freqvis = numpy.meshgrid(freq, visibilities['VV'])
vv[:, 0, :, ipol] = freqvis[0] * freqvis[1]
freqvis = numpy.meshgrid(freq, visibilities['WW'])
ww[:, 0, :, ipol] = freqvis[0] * freqvis[1]
else:
uu[:, ipol] = freqif * visibilities['UU']
vv[:, ipol] = freqif * visibilities['VV']
ww[:, ipol] = freqif * visibilities['WW']
else:
from taskinit import tb
# read in the uvfits data
tb.open(visfile)
uvw = tb.getcol('UVW')
uvspw = tb.getcol('DATA_DESC_ID')
tb.close()
tb.open(visfile + '/SPECTRAL_WINDOW')
freq = tb.getcol('CHAN_FREQ')
tb.close()
tb.open(visfile + '/POLARIZATION')
polinfo = tb.getcol('NUM_CORR')
tb.close()
npol = polinfo[0]
nspw = len(freq[0])
for ispw in range(nspw):
ilam = 3e8 / freq[0][ispw]
indx_spw = uvspw == ispw
uvw[:, indx_spw] /= ilam
uu = []
vv = []
ww = []
for ipol in range(npol):
uu.append(uvw[0, :])
vv.append(uvw[1, :])
ww.append(uvw[2, :])
uu = numpy.array(uu)
vv = numpy.array(vv)
ww = numpy.array(ww)
if uu[:, 0].size == 1:
uu = uu.flatten()
vv = vv.flatten()
ww = ww.flatten()
return uu, vv, ww
def visload(visfile):
checker = visfile.find('uvfits')
if checker == -1:
uvfits = False
else:
uvfits = True
if uvfits:
visdata = fits.open(visfile)
# get the telescope name
visheader = visdata[0].header
#telescop = visheader['TELESCOP']
# if we are dealing with SMA data
if visheader['NAXIS'] == 6:
nfreq = visdata[0].data['DATA'][0, 0, 0, :, 0, 0].size
if nfreq > 1:
data_real = visdata[0].data['DATA'][:, 0, 0, :, :, 0]
data_imag = visdata[0].data['DATA'][:, 0, 0, :, :, 1]
data_wgt = visdata[0].data['DATA'][:, 0, 0, :, :, 2]
else:
data_real = visdata[0].data['DATA'][:, 0, 0, 0, :, 0]
data_imag = visdata[0].data['DATA'][:, 0, 0, 0, :, 1]
data_wgt = visdata[0].data['DATA'][:, 0, 0, 0, :, 2]
# if we are dealing with ALMA or PdBI data
if visheader['NAXIS'] == 7:
nfreq = visdata[0].data['DATA'][0, 0, 0, 0, :, 0, 0].size
if nfreq > 1:
data_real = visdata[0].data['DATA'][:, 0, 0, :, :, :, 0]
data_imag = visdata[0].data['DATA'][:, 0, 0, :, :, :, 1]
data_wgt = visdata[0].data['DATA'][:, 0, 0, :, :, :, 2]
else:
data_real = visdata[0].data['DATA'][:, 0, 0, :, 0, :, 0]
data_imag = visdata[0].data['DATA'][:, 0, 0, :, 0, :, 1]
data_wgt = visdata[0].data['DATA'][:, 0, 0, :, 0, :, 2]
data_complex = numpy.array(data_real) + \
1j * numpy.array(data_imag)
else:
from taskinit import tb
# read in the CASA MS
tb.open(visfile)
vis_complex = tb.getcol('DATA')
vis_weight = tb.getcol('WEIGHT')
tb.close()
#tb.open(visfile + '/POLARIZATION')
#polinfo = tb.getcol('NUM_CORR')
#npol = polinfo[0]
data_complex = vis_complex
data_wgt = vis_weight
wgtshape = data_wgt.shape
if len(wgtshape) == 2:
npol = wgtshape[0]
nrow = wgtshape[1]
wgtshape = (npol, 1, nrow)
data_wgt = data_wgt.reshape(wgtshape)
#data_complex = []
#data_wgt = []
# for ipol in range(npol):
# data_complex.append(vis_complex[ipol, 0, :])
# data_wgt.append(vis_weight[ipol, :])
#data_complex = numpy.array(data_complex)
#data_wgt = numpy.array(data_wgt)
return data_complex, data_wgt
def getStatWgt(real_raw, imag_raw, wgt_raw):
"""
Compute the weights as the rms scatter in the real and imaginary
visibilities.
"""
nvis = real_raw[:, 0].size
freqsize = real_raw[0, :].size
wgt_scaled = numpy.zeros([nvis, freqsize])
for i in range(nvis):
gwgt = wgt_raw[i, :] > 0
ngwgt = wgt_raw[i, gwgt].size
if ngwgt > 2:
reali = real_raw[i, gwgt]
imagi = imag_raw[i, gwgt]
rms_real = numpy.std(reali)
rms_imag = numpy.std(imagi)
rms_avg = (rms_real + rms_imag) / 2.
wgt_scaled[i, :] = 1 / rms_avg ** 2
return wgt_scaled
def statwt(visfileloc, newvisfileloc, ExcludeChannels=False):
"""
Replace the weights in 'visfile' with weights computed via getStatWgt.
"""
visfile = fits.open(visfileloc)
data_complex, data_wgt = visload(visfileloc)
data_real = numpy.real(data_complex)
data_imag = numpy.imag(data_complex)
wgt_original = data_wgt.copy()
if ExcludeChannels:
nwindows = len(ExcludeChannels) / 2
for win in range(0, nwindows * 2, 2):
chan1 = ExcludeChannels[win]
chan2 = ExcludeChannels[win + 1]
if data_real.ndim == 4:
data_wgt[:, :, chan1:chan2, :] = 0
else:
data_wgt[:, chan1:chan2, :] = 0
# get the number of visibilities, spws, frequencies, polarizations
if data_real.ndim == 4:
nvis = data_real[:, 0, 0, 0].size
nspw = data_real[0, :, 0, 0].size
nfreq = data_real[0, 0, :, 0].size
npol = data_real[0, 0, 0, :].size
wgt = numpy.zeros([nvis, nspw, nfreq, npol])
if data_real.ndim == 3:
# no spw
nvis = data_real[:, 0, 0].size
nspw = 0
nfreq = data_real[0, :, 0].size
npol = data_real[0, 0, :].size
wgt = numpy.zeros([nvis, nfreq, npol])
if nspw > 0:
for ispw in range(nspw):
for ipol in range(npol):
# compute real and imaginary components of the visibilities
real_raw = data_real[:, ispw, :, ipol]
imag_raw = data_imag[:, ispw, :, ipol]
wgt_raw = data_wgt[:, ispw, :, ipol]
wgt_orig = wgt_original[:, ispw, :, ipol]
oktoreplace = wgt_orig > 0
wgt_scaled = getStatWgt(real_raw, imag_raw, wgt_raw)
wgt_temp = wgt[:, ispw, :, ipol]
wgt_temp[oktoreplace] = wgt_scaled[oktoreplace]
wgt[:, ispw, :, ipol] = wgt_temp
visfile[0].data['DATA'][:, 0, 0, :, :, :, 2] = wgt
else:
for ipol in range(npol):
# compute real and imaginary components of the visibilities
real_raw = data_real[:, :, ipol]
imag_raw = data_imag[:, :, ipol]
wgt_raw = data_wgt[:, :, ipol]
wgt_scaled = getStatWgt(real_raw, imag_raw, wgt_raw)
wgt[:, :, ipol] = wgt_scaled
# visfile[0].data['DATA'][:, 0, 0, :, :, 2] = wgt
import pdb; pdb.set_trace()
if nfreq > 1:
try:
visfile[0].data['DATA'][:, 0, 0, :, :, :, 2] = wgt
except ValueError:
# wgt.ndim is 3 if data_real.dim is 3
if data_real.ndim == 3:
# wgt.shape is defined using nvis, nfreq, npol
visfile[0].data['DATA'][:, 0, 0, 0, :, :, 2] = wgt
elif nfreq == 1:
visfile[0].data['DATA'][:, 0, 0, :, 0, :, 2] = wgt
visfile.writeto(newvisfileloc)
return
def scalewt(visdataloc, newvisdataloc):
"""
scale the weights such that:
Sum(wgt * real^2 + wgt * imag^2) = N_visibilities
Parameters
----------
visdataloc: string
uv-data filename
Returns
-------
newvisdataloc: string
output scaled uv-data filename
"""
visfile = fits.open(visdataloc)
data_complex, data_wgt = visload(visdataloc)
data_real = numpy.real(data_complex)
data_imag = numpy.imag(data_complex)
# scale the weights such that:
# Sum(wgt * real^2 + wgt * imag^2) = N_visibilities
wgt_scaled = data_wgt
wgzero = wgt_scaled > 0
N_vis = 2 * wgt_scaled[wgzero].size
wgtrealimag = wgt_scaled * (data_real ** 2 + data_imag ** 2)
wgtsum = wgtrealimag[wgzero].sum()
wgtscale = N_vis / wgtsum
print("Scaling the weights by a factor of ", wgtscale)
wgt_scaled = wgt_scaled * wgtscale
# read in the uvfits data
if data_real.ndim == 4:
visfile[0].data['DATA'][:, 0, 0, :, :, :, 2] = wgt_scaled
else:
if visfile[0].header['NAXIS'] == 6:
nfreq = visfile[0].data['DATA'][0, 0, 0, :, 0, 0].size
if nfreq > 1:
visfile[0].data['DATA'][:, 0, 0, :, :, 2] = wgt_scaled
else:
visfile[0].data['DATA'][:, 0, 0, 0, :, 2] = wgt_scaled
if visfile[0].header['NAXIS'] == 7:
visfile[0].data['DATA'][:, 0, 0, 0, :, :, 2] = wgt_scaled
visfile.writeto(newvisdataloc, clobber=True)
def zerowt(visdataloc, newvisdataloc, ExcludeChannels):
visfile = fits.open(visdataloc)
data_real, data_imag, data_wgt = visload(visfile)
nwindows = len(ExcludeChannels) / 2
for win in range(0, nwindows * 2, 2):
chan1 = ExcludeChannels[win]
chan2 = ExcludeChannels[win + 1]
if data_real.ndim == 4:
visfile[0].data['DATA'][:, 0, 0, :, chan1:chan2, :, 2] = 0.0
else:
visfile[0].data['DATA'][:, 0, 0, chan1:chan2, :, 2] = 0.0
visfile.writeto(newvisdataloc)
# AS OF 2014-02-24, spectralavg IS NON-FUNCTIONAL
def spectralavg(visdataloc, newvisdataloc, Nchannels):
# bin in frequency space to user's desired spectral resolution
vis_data = fits.open(visdataloc)
data_real, data_imag, data_wgt = visload(vis_data)
# get the number of visibilities, spws, frequencies, polarizations
if data_real.ndim == 4:
nvis = data_real[:, 0, 0, 0].size
nspw = data_real[0, :, 0, 0].size
nchan = data_real[0, 0, :, 0].size
npol = data_real[0, 0, 0, :].size
real_bin = numpy.zeros([nvis, nspw, Nchannels, npol])
imag_bin = numpy.zeros([nvis, nspw, Nchannels, npol])
wgt_bin = numpy.zeros([nvis, nspw, Nchannels, npol])
if data_real.ndim == 3:
nvis = data_real[:, 0, 0].size
nspw = 0
nchan = data_real[0, :, 0].size
npol = data_real[0, 0, :].size
real_bin = numpy.zeros([nvis, Nchannels, npol])
imag_bin = numpy.zeros([nvis, Nchannels, npol])
wgt_bin = numpy.zeros([nvis, Nchannels, npol])
chan1 = 0
dchan = nchan / Nchannels
chan2 = chan1 + dchan
if nspw > 1:
for ispw in range(nspw):
for ipol in range(npol):
for ichan in range(Nchannels):
for i in range(nvis):
gwgt = data_wgt[i, ispw, chan1:chan2, ipol] > 0
ngwgt = data_wgt[i, ispw, gwgt, ipol].size
if ngwgt == 0:
continue
value = data_real[i, ispw, gwgt, ipol].sum() / ngwgt
real_bin[i, ispw, ichan, ipol] = value
value = data_imag[i, ispw, gwgt, ipol].sum() / ngwgt
imag_bin[i, ispw, ichan, ipol] = value
value = data_wgt[i, ispw, gwgt, ipol].mean() * ngwgt
wgt_bin[i, ispw, ichan, ipol] = value
chan1 = chan2
chan2 = chan1 + dchan
newvis = numpy.zeros([nvis, 1, 1, nspw, Nchannels, npol, 3])
newvis[:, 0, 0, :, :, :, 0] = real_bin
newvis[:, 0, 0, :, :, :, 1] = imag_bin
newvis[:, 0, 0, :, :, :, 2] = wgt_bin
oldcrpix4 = vis_data[0].header['CRPIX4']
newcrpix4 = numpy.float(oldcrpix4) / nchan * Nchannels
newcrpix4 = numpy.floor(newcrpix4) + 1
vis_data[0].header['CRPIX4'] = newcrpix4
oldcdelt4 = vis_data[0].header['CDELT4']
newcdelt4 = oldcdelt4 * nchan / Nchannels
vis_data[0].header['CDELT4'] = newcdelt4
else:
for ipol in range(npol):
for ichan in range(Nchannels):
for i in range(nvis):
gwgt = data_wgt[i, chan1:chan2, ipol] > 0
ngwgt = data_wgt[i, gwgt, ipol].size
if ngwgt == 0:
continue
value = data_real[i, gwgt, ipol].sum() / ngwgt
real_bin[i, ichan, ipol] = value
value = data_imag[i, gwgt, ipol].sum() / ngwgt
imag_bin[i, ichan, ipol] = value
value = data_wgt[i, gwgt, ipol].mean() * ngwgt
wgt_bin[i, ichan, ipol] = value
chan1 = chan2
chan2 = chan1 + dchan
newvis =
|
numpy.zeros([nvis, 1, 1, Nchannels, npol, 3])
|
numpy.zeros
|
# coding: utf-8
import numpy as np
import pandas as pd
import time
from scipy.linalg import block_diag
from sklearn.metrics import mean_squared_error
from .bspline import Bspline
from .utils import mm, check_constraint, check_constraint_dim1
from .utils import check_constraint_dim2, check_constraint_full_model
class Stareg():
def __init__(self):
print("Class initialization")
self.BS = Bspline()
def fit(self,description, X, y):
model = self.create_model_from_description(description, X, y)
iterIdx = 1
B = self.create_basis_matrix(model)
S = self.create_smoothness_matrix(model)
K = self.create_constraint_matrix(model)
BtB = B.T @ B
Bty = B.T @ y
weights_compare, model = check_constraint_full_model(model)
weights_old = dict()
while not weights_compare == weights_old:
weights_old = weights_compare
coef_cpls = np.linalg.pinv(BtB + S + K) @ Bty
weights_compare, model = check_constraint_full_model(model, coef=coef_cpls)
print(f"Iteration {iterIdx}".center(50, "="))
print(f"MSE: {mean_squared_error(y, B @ coef_cpls)}".center(50, " "))
# need
K = self.create_constraint_matrix(model)
time.sleep(1)
iterIdx += 1
if iterIdx > 15:
print("Stop the count!")
break
print("".center(50, "="))
print("Iteration Finished!".center(50,"-"))
print("".center(50, "="))
return dict(coef_=coef_cpls, B=B, S=S, K=K, model=model)
def predict(self, Xpred, model, coef_):
"""Calculate the predictions for Xpred and the given model and coef_.
Parameters:
-----------
Xpred : array - Input data to calculate the predictions for.
model : dict - Model dictionary.
coef_ : array - Coefficients of the constraint B-splines.
Returns:
--------
ypred : array - Predicted values.
"""
basis = []
for submodel in model.keys():
type_ = model[submodel]["type"]
nr_splines = model[submodel]["nr_splines"]
knot_types = model[submodel]["knot_type"]
knots = model[submodel]["knots"]
order = model[submodel]["order"]
print("Process ", type_)
#time.sleep(0.2)
if type_.startswith("s"):
dim = int(type_[2])-1
data = Xpred[:,dim]
B = np.zeros((len(data), nr_splines))
for j in range(order, len(knots)-1):
B[:,j-order] = self.BS.basisfunction(data, knots, j, order).ravel()
elif type_.startswith("t"):
dim1, dim2 = int(type_[2])-1, int(type_[4])-1
data = Xpred[:,[dim1, dim2]]
n_samples = len(data[:,0])
B1, B2 = np.zeros((n_samples, len(knots["k1"])-1-order[0])), np.zeros((n_samples, len(knots["k2"])-1-order[1]))
B = np.zeros((n_samples, np.prod(nr_splines)))
for j in range(order[0], len(knots["k1"])-1):
B1[:,j-order[0]] = self.BS.basisfunction(data[:,0], knots["k1"], j, order[0])
for j in range(order[1], len(knots["k2"])-1):
B2[:,j-order[1]] = self.BS.basisfunction(data[:,1], knots["k2"], j, order[1])
for i in range(n_samples):
B[i,:] = np.kron(B2[i,:], B1[i,:])
else:
print("Only B-splines (s) and tensor-product B-splines (t) are supported!")
basis.append(B)
# create combined basis matrix
B = np.concatenate(basis, axis=1)
y = B @ coef_
return y
def create_model_from_description(self, description, X, y):
model = dict()
parameter = ("type", "nr_splines", "constraint", "lambda_c", "knot_type")
for idx, submodel in enumerate(description):
model[f"f{idx+1}"] = dict()
for type_and_value in zip(parameter, submodel):
model[f"f{idx+1}"][type_and_value[0]] = type_and_value[1]
for submodel in model:
for key in model[submodel].keys():
if key == "type":
type_ = model[submodel][key]
nr_splines = model[submodel]["nr_splines"]
knot_type = model[submodel]["knot_type"]
constraint = model[submodel]["constraint"]
lambda_c = model[submodel]["lambda_c"]
if type_.startswith("s"):
dim = int(type_[2])-1
data = X[:,dim]
order = 3
B, knots = self.BS.basismatrix(X=data, nr_splines=nr_splines, l=order, knot_type=knot_type).values()
Ds = mm(nr_splines, constraint="smooth")
Dc = mm(nr_splines, constraint=constraint)
lam = self.BS.calc_GCV(data, y, nr_splines=nr_splines, l=order, knot_type=knot_type, nr_lam=100, plot_=0)["best_lambda"]
coef_pls = self.BS.fit_Pspline(data, y, nr_splines=nr_splines, l=order, knot_type=knot_type, lam=lam)["coef_"]
W = check_constraint(coef_pls, constraint, y=y, B=B)
elif type_.startswith("t"):
dim = [int(type_[2])-1, int(type_[4])-1]
data = X[:,[dim[0],dim[1]]]
order = (3,3)
B, knots1, knots2 = self.BS.tensorproduct_basismatrix(X=data, nr_splines=nr_splines, l=order, knot_type=knot_type).values()
Ds1 = mm(nr_splines, constraint="smooth", dim=0)
Ds2 = mm(nr_splines, constraint="smooth", dim=1)
Dc1 = mm(nr_splines, constraint=constraint[0], dim=0)
Dc2 = mm(nr_splines, constraint=constraint[1], dim=1)
lam = self.BS.calc_GCV_2d(data, y, nr_splines=nr_splines, l=order, knot_type=knot_type, nr_lam=100, plot_=0)["best_lambda"]
coef_pls = self.BS.fit_Pspline(data, y, nr_splines=nr_splines, l=order, knot_type=knot_type, lam=lam)["coef_"]
W1 = check_constraint_dim1(coef_pls, constraint[0], nr_splines)
W2 = check_constraint_dim2(coef_pls, constraint[1], nr_splines)
knots, Ds, Dc, W = dict(), dict(), dict(), dict()
knots["k1"], knots["k2"] = knots1, knots2
Ds["Ds1"], Ds["Ds2"] = Ds1, Ds2
Dc["Dc1"], Dc["Dc2"] = Dc1, Dc2
W["v1"], W["v2"] = W1, W2
model[submodel]["B"] = B
model[submodel]["knots"] = knots
model[submodel]["Ds"] = Ds
model[submodel]["Dc"] = Dc
model[submodel]["weights"] = W
model[submodel]["coef_pls"] = coef_pls
model[submodel]["best_lambda"] = lam
model[submodel]["order"] = order
return model
def create_basis_matrix(self, model):
B = []
[B.append(model[submodel]["B"]) for submodel in model.keys()]
basis_matrix = np.concatenate(B,axis=1)
return basis_matrix
def create_smoothness_matrix(self, model):
Ds = []
for submodel in model.keys():
type_ = model[submodel]["type"]
if type_.startswith("s"):
Ds.append(model[submodel]["best_lambda"] * model[submodel]["Ds"].T @ model[submodel]["Ds"])
elif type_.startswith("t"):
Ds1 = model[submodel]["Ds"]["Ds1"]
Ds2 = model[submodel]["Ds"]["Ds2"]
Ds.append(model[submodel]["best_lambda"] * (Ds1.T@Ds1 + Ds2.T@Ds2))
smoothness_matrix = block_diag(*Ds)
return smoothness_matrix
def create_constraint_matrix(self, model):
Dc = []
for submodel in model.keys():
type_ = model[submodel]["type"]
if type_.startswith("s"):
Dc.append(model[submodel]["lambda_c"] * model[submodel]["Dc"].T @ np.diag(model[submodel]["weights"]) @ model[submodel]["Dc"])
elif type_.startswith("t"):
Dc1 = model[submodel]["Dc"]["Dc1"]
Dc2 = model[submodel]["Dc"]["Dc2"]
weights1 = np.diag(model[submodel]["weights"]["v1"])
weights2 = np.diag(model[submodel]["weights"]["v2"])
Dc.append(model[submodel]["lambda_c"][0]*(Dc1.T@weights1@Dc1) + model[submodel]["lambda_c"][1]*(Dc2.T@weights2@Dc2))
constraint_matrix = block_diag(*Dc)
return constraint_matrix
def create_coef_vector(self, model):
coef = []
[list(coef.append(model[submodel]["coef_pls"])) for submodel in model.keys()]
return coef
def calc_edof(self, B, S, K):
"""Calculates the effective degree of freedom according to Fahrmeir, Regression 2013, p.475.
Parameters:
-----------
B : matrix - Basis matrix of the model.
S : matrix - Smoothness penalty matrix of the model, aka. lam_s * D_2.T @ D_2.
K : matrix - Constraint penalty matrix of the model, aka. lam_c * D_c.T @ D_c.
Returns:
--------
edof : float - Effective degree of freedom.
"""
BtB = B.T @ B
edof = np.trace(BtB @ np.linalg.pinv(BtB + S + K))
return edof
""" LEGACY CODE STARTS HERE """
def star_model(descr, X, y):
"""Fit a structured additive regression model using B-splines to the data in (X,y).
Parameters:
-----------
descr : tuple of tuples - Describes the model structure, e.g.
descr = ( ("s(1)", 100, "inc", 6000, "e"),
("t(1,2)", (12,10), ("none", "none"), (6000,6000), ("e", "e")), ),
describing a model using a P-spline with increasing constraint and 100
basis functions for dimension 1 and a tensor-product P-spline without
constraints using 12 and 10 basis functions for the respective dimension.
X : array - np.array of the input data, shape (n_samples, n_dim)
y : array - np.array of the target data, shape (n_samples, )
Returns:
--------
d : dict - Returns a dictionary with the following key-value pairs:
basis=B,
smoothness=S,
constraint=K,
opt_lambdas=optimal_lambdas,
coef_=coef_pls,
weights=weights
"""
BS, TS = Bspline(), Bspline()
coefs = []
basis, smoothness = [], []
constr, optimal_lambdas = [], []
weights, weights_compare = [], []
S, K = [], []
for e in descr:
type_, nr_splines, constraints, lam_c, knot_types = e[0], e[1], e[2], e[3], e[4]
print("Process ", type_)
if type_.startswith("s"):
dim = int(type_[2])
B = BS.basismatrix(X=X[:,dim-1], nr_splines=nr_splines, l=3, knot_type=knot_types)["basis"]
Ds = mm(nr_splines, constraint="smooth", dim=dim-1)
Dc = mm(nr_splines, constraint=constraints, dim=dim-1)
lam = BS.calc_GCV(X=X[:,dim-1], y=y, nr_splines=nr_splines, l=3, knot_type=knot_types, nr_lam=50)["best_lambda"]
coef_pls = BS.fit_Pspline(X=X[:,dim-1], y=y, nr_splines=nr_splines, l=3, knot_type=knot_types, lam=lam)["coef_"]
W = check_constraint(coef=coef_pls, constraint=constraints, y=y, B=B)
weights.append(W)
weights_compare += list(W)
elif type_.startswith("t"):
print("Constraint = ", constraints)
dim1, dim2 = int(type_[2]), int(type_[4])
B = TS.tensorproduct_basismatrix(X=X[:,[dim1-1, dim2-1]], nr_splines=nr_splines, l=(3,3), knot_type=knot_types)["basis"]
Ds1 = mm(nr_splines, constraint="smooth", dim=dim1-1)
Ds2 = mm(nr_splines, constraint="smooth", dim=dim2-1)
Dc1 = mm(nr_splines, constraint=constraints[0], dim=dim1-1)
Dc2 = mm(nr_splines, constraint=constraints[1], dim=dim2-1)
lam = TS.calc_GCV_2d(X=X[:,[dim1-1, dim2-1]], y=y, nr_splines=nr_splines, l=(3,3), knot_type=knot_types, nr_lam=50)["best_lambda"]
coef_pls = TS.fit_Pspline(X=X[:,[dim1-1, dim2-1]], y=y, nr_splines=nr_splines, l=(3,3), knot_type=knot_types, lam=lam)["coef_"]
W1 = check_constraint_dim1(coef_pls, nr_splines=nr_splines, constraint=constraints[0])
W2 = check_constraint_dim2(coef_pls, nr_splines=nr_splines, constraint=constraints[1])
weights.append((W1, W2))
weights_compare += list(W1) + list(W2)
Ds = (Ds1, Ds2)
Dc = (Dc1, Dc2)
else:
print("Only B-splines (s) and tensor-product B-splines (t) are supported!")
basis.append(B)
smoothness.append(Ds)
constr.append(Dc)
optimal_lambdas.append(lam)
coefs.append(coef_pls)
coef_pls = np.concatenate(coefs)
# create combined basis matrix
B = np.concatenate(basis, axis=1)
# create combined smoothness matrix
for i, s in enumerate(smoothness):
if len(s) == 2:
S.append(optimal_lambdas[i]*(s[0].T @ s[0] + s[1].T@s[1]))
else:
S.append(optimal_lambdas[i]*(s.T@s))
S = block_diag(*S)
# create combined constraint matrix
for i, c in enumerate(constr):
if len(c) == 2:
K.append(6000*(c[0].T @ np.diag(weights[i][0]) @ c[0]) + 6000*(c[1].T @np.diag(weights[i][1]) @ c[1]))
else:
K.append(6000* (c.T@ np.diag(weights[i])@c))
K = block_diag(*K)
weights_old = [0]*len(weights_compare)
iterIdx = 1
BtB = B.T @ B
Bty = B.T @ y
# Iterate till no change in weights
df = pd.DataFrame(data=dict(w0=np.ones(2*12*10+100+100-2)))
while not (weights_compare == weights_old):
weights_old = weights_compare
coef_pls = np.linalg.pinv(BtB + S + K) @ (Bty)
weights, weights_compare = check_constraint_full(coef_=coef_pls, descr=descr, basis=B, y=y)
print(f" Iteration {iterIdx} ".center(50, "="))
print(f"MSE = {mean_squared_error(y, B@coef_pls).round(7)}".center(50, "-"))
K = []
print("Calculate new constraint matrix K".center(50,"-"))
for i, c in enumerate(constr):
if len(c) == 2:
K.append(descr[i][3][0]*(c[0].T @ np.diag(weights[i][0]) @ c[0]) + descr[i][3][0]*(c[1].T @
|
np.diag(weights[i][1])
|
numpy.diag
|
"""
The ``block`` class represents a block of material in a simulation. Blocks are not meant to be
interacted with directly -- when using the ``problem`` class to set up a simulation, blocks are
automatically added or deleted as needed using the provided interfaces. This documentation
is provided for completeness, but should not need to be used in regular use of the code.
The class contains information on the number of grid points in the block, the location of the block
within the simulation, the material properties using the ``material`` class, the types of boundary
conditions at the block edges, and any complex geometries specified through the ``surface``
and ``curve`` classes.
"""
from __future__ import division, print_function
from os.path import join
from .surface import surface, curve
from .material import material
import numpy as np
class block(object):
'''
Class representing a block in a simulation
A block contains the following internal variables:
:ivar ndim: Number of dimensions (2 or 3)
:type ndim: int
:ivar mode: Rupture mode (2 or 3, relevant only for 2D problems)
:type mode: int
:ivar nx: Number of grid points (tuple of 3 positive integers)
:type nx: tuple
:ivar xm: Coordinates of lower left corner in simulation (tuple of 3 nonnegative integers)
:type xm: tuple
:ivar coords: Location of block within simulation domain (tuple of 3 nonnegative integers)
:type coords: tuple
:ivar lx: Block length in each spatial dimension (tuple of 3 positive floats, can be overridden
by setting a curve or surface to one of the edges)
:type lx: tuple
:param m: Material properties (see ``material`` class)
:type m: material
:param bounds: List of boundary conditions. Position indicates boundary location (0 = left,
1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top). Possible strings for
boundary condition include ``'absorbing'`` (no incoming wave), ``'free'``
(traction free surface), ``'rigid'`` (no displacement), or ``'none'`` (boundary
conditions determined by interface conditions)
:type bounds: list
:param surfs: List of bounding surfaces. Position indicates boundary location (0 = left,
1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top). For 2D problems,
you can only populate the list with curves, and 3D problems require
surfaces. If the default rectangular surface is to be used, use ``None``
for a particular surface.
:type surfs: list
'''
def __init__(self, ndim, mode, nx, mat):
'''
Initialize a new ``block`` instance
Creates a new block instance with the given dimensionality, rupture mode, number of
grid points, and material type. If the problem is 2d, the number of z grid points will
be automatically set to one. By default, the block length is unity in each direction,
the lower left coordinate is ``(0., 0., 0.)``, all boundary conditions are set to ``'none'``,
material properties take on their default values, and there are no irregular edge shapes.
All of these default properties can be modified using the provided interfaces.
:param ndim: Number of spatial dimensions (must be 2 or 3)
:type ndim: int
:param mode: Slip mode (2 or 3, only relevant if the problem is in 2D)
:type mode: int
:param nx: Tuple of length 3 with number of grid points ``(nx, ny, nz)``
:type nx: tuple or list
:param mat: Block material type (string, must be ``'elastic'`` or ``'plastic'``). The
method initializes a default set of material properties based on this type.
:type mat: str
:returns: New block instance
:rtype: block
'''
assert(ndim == 2 or ndim == 3), "ndim must be 2 or 3"
assert(mode == 2 or mode == 3), "mode must be 2 or 3"
assert len(nx) == 3, "nx must be a list or tuple of positive integers"
assert (nx[0] > 0 and nx[1] > 0 and nx[2] >0), "nx must be a list or tuple of positive integers"
assert (mat == "elastic" or mat == "plastic"), "material type must be elastic or plastic"
self.ndim = int(ndim)
self.mode = int(mode)
self.coords = (0, 0, 0)
if (self.ndim == 2):
self.nx = (int(nx[0]), int(nx[1]), 1)
else:
self.nx = (int(nx[0]), int(nx[1]), int(nx[2]))
self.xm = (0., 0., 0.)
self.lx = (1., 1., 1.)
if (self.ndim == 2):
self.lx = (1., 1., 0.)
self.m = material(mat)
self.bounds = 2*self.ndim*["none"]
self.surfs = 2*self.ndim*[None]
def get_mode(self):
"""
Returns rupture mode (2 or 3), only valid for 2D problems (stored at domain level)
:returns: Rupture mode
:rtype: int
"""
return self.mode
def set_mode(self,mode):
"""
Sets rupture mode
Rupture mode is only valid for 2D problems, and is either 2 or 3 (other values will
cause an error, and non-integer values will be converted to integers). For 3D problems,
entering a different value of the rupture mode will alter the rupture mode cosmetically
but will have no effect on the simulation.
:param mode: New value of rupture mode
:type mode: int
:returns: None
"""
assert(mode == 2 or mode == 3), "Rupture mode must be 2 or 3"
self.mode = int(mode)
def get_ndim(self):
"""
Returns Number of spatial dimensions
:returns: Number of spatial dimensions
:rtype: int
"""
return self.ndim
def set_ndim(self,ndim):
"""
Sets number of dimensions
The new number of spatial dimensions must be an integer, either 2 or 3. If a different
value is given, the code will raise an error. If a non-integer value is given that is acceptable,
the code will convert it to an integer.
**Note:** Converting a 3D problem into a 2D problem will automatically collapse the
number of grid points and the number of blocks in the $z$ direction to be 1. Any
modifications to these quantities that were done previously will be lost.
:param ndim: New value for ndim (must be 2 or 3)
:type ndim: int
:returns: None
"""
assert(ndim == 2 or ndim == 3), "Number of dimensions must be 2 or 3"
self.ndim = int(ndim)
if self.ndim == 2:
self.nx = (self.nx[0], self.nx[1], 1)
self.xm = (self.xm[0], self.xm[1], 0.)
self.lx = (self.lx[0], self.lx[1], 0.)
self.bounds = self.bounds[0:4]
self.surfs = self.surfs[0:4]
else:
if len(self.bounds) == 4:
self.bounds += 2*["none"]
if len(self.surfs) == 4:
self.surfs += 2*[None]
def get_nx(self):
"""
Returns number of grid points in (nx, ny, nz) format for the given block
:returns: Number of grid points (tuple of three integers)
:rtype: tuple
"""
return self.nx
def set_nx(self,nx):
"""
Sets number of grid points
Changes the number of grid points to the specified tuple/list of 3 nonnegative integers.
Bad values of ``nx`` will raise an error.
:param nx: New value of number of grid points (tuple of 3 positive integers)
:type nx: tuple or list
:returns: None
"""
assert len(nx) == 3, "nx must be a list or tuple of length 3 of positive integers"
for i in range(3):
assert nx[i] >= 0, "nx must be a list or tuple of length 3 of positive integers"
if (self.ndim == 2):
self.nx = (int(nx[0]), int(nx[1]), 1)
else:
self.nx = (int(nx[0]), int(nx[1]), int(nx[2]))
def get_xm(self):
"""
Returns starting index (zero-indexed) of block (tuple of 3 integers)
:returns: Coordinates of lower left corner (tuple of 3 integers)
:rtype: tuple
"""
return self.xm
def set_xm(self,xm):
"""
Sets block lower left coordinate
Changes lower left coordinate of a block to the provided tuple/list of integers.
:param xm: New value of lower left coordinate (list/tuple of integers)
:type xm: tuple or list
:returns: None
"""
assert len(xm) == 3 or (self.ndim == 2 and len(xm) == 2), "xm must be a list or tuple of length 3 of floats"
if self.ndim == 2:
self.xm = (float(xm[0]), float(xm[1]), 0.)
else:
self.xm = (float(xm[0]), float(xm[1]), float(xm[2]))
def get_lx(self):
"""
Returns block lengths as (lx, ly, lz) tuple
:returns: Block dimensions (tuple of 3 floats) in x, y, and z dimensions
:rtype: tuple
"""
return self.lx
def set_lx(self,lx):
"""
Sets block lengths
Changes block length to ``lx`` (tuple of 2 (2D only) or 3 floats) where the block length
in each dimension is given by ``(lx, ly, lz)``
:param lx: New value of block lengths (tuple of 2 (2D) or 3 floats)
:type lx: tuple or list
:returns: None
"""
assert (len(lx) == 3 or (len(lx) == 2 and self.ndim == 2)), "lx must be a list or tuple of length 3 of positive floats"
for l in lx:
assert l >= 0., "lx must be a list or tuple of length 3 of positive floats"
if self.ndim == 3:
self.lx = (float(lx[0]), float(lx[1]), float(lx[2]))
else:
self.lx = (float(lx[0]), float(lx[1]), 0.)
def get_coords(self):
"""
Returns block coordinates (tuple of integer indices in each coordinate direction)
:returns: Block coordinates (tuple of 3 integers)
:rtype: tuple
"""
return self.coords
def set_coords(self,coords):
"""
Sets block coordinates to a new value
Set block coordinates to ``coords``, and tuple of nonnegative integers denoting the location
of the block in the domain.
:param coords: New coordaintes (tuple or list of nonnegative integers)
:type coords: tuple or list
:returns: None
"""
assert len(coords) == 3, "coords must be a list or tuple of length 3 of nonnegative integers"
for i in range(3):
assert coords[i] >= 0, "coords must be a list or tuple of length 3 of floats"
self.coords = (int(coords[0]), int(coords[1]), int(coords[2]))
if self.ndim == 2:
self.coords = (int(coords[0]), int(coords[1]), 0)
def get_bounds(self, loc = None):
"""
Returns boundary types
If ``loc`` (int) is provided, the method returns a specific location (str). Otherwise it returns a list
of all boundaries, which will have length 4 for 2D problems and length 6 for 3D problems.
``loc`` serves effectively as an index into the list, and the indices correspond to the following:
0 = left, 1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top. Note that the location must be
0 <= loc < 2*ndim
:param loc: Location of boundary that is desired (optional). If ``loc`` is not provided, returns
a list
:type loc: int or None
:returns: Boundary type (if ``loc`` provided, returns a string of the boundary type for the
desired location, of not returns a list of strings indicating all boundary types)
:rtype: str or list
"""
if loc is None:
return self.bounds
elif loc >= 0 and loc < 2*self.ndim:
return self.bounds[loc]
else:
raise TypeError("loc must be None or an integer location")
def set_bounds(self, bounds, loc = None):
"""
Sets boundary types
Changes the type of boundary conditions on a block. Acceptable values are 'absorbing'
(incoming wave amplitude set to zero), 'free' (no traction on boundary), 'rigid' (no displacement
of boundary), or 'none' (boundary conditions set by imposing interface conditions).
There are two ways to use ``set_bounds``:
1. Set ``loc`` to be ``None`` (default) and provide a list of strings specifying boundary
type for ``bounds``. The length of ``bounds`` is 4 for a 2D simulation and 6 for 3D.
2. Set ``loc`` to be an integer denoting location and give ``bounds`` as a single string.
The possible locations correspond to the following: 0 = left, 1 = right, 2 = front, 3 = back,
4 = bottom, 5 = top. 4 and 5 are only applicable to 3D simulations (0 <= loc < 2*ndim).
:param bounds: New boundary condition type (string or list of strings)
:type bounds: str or list
:param loc: If provided, only change one type of boundary condition rather than all (optional,
loc serves as an index into the list if used)
:type loc: int or None
:returns: None
"""
if loc is None:
assert len(bounds) == 2*self.ndim, "Must give 2*ndim boundary types"
for i in range(2*self.ndim):
assert (bounds[i] == "none") or (bounds[i] == "absorbing") or (bounds[i] == "free") or (bounds[i] == "rigid"), "Boundary types must be none, absorbing, free, or rigid"
self.bounds = bounds
elif loc >=0 and loc < 2*self.ndim:
assert (bounds == "none") or (bounds == "absorbing") or (bounds == "free") or (bounds == "rigid"), "Boundary types must be none, absorbing, free, or rigid"
self.bounds[loc] = bounds
else:
raise TypeError("loc must either be None or an integer location")
def get_surf(self, loc):
"""
Returns block boundary surface for a block edge
Returns the surface assigned to a specific edge. ``loc`` determines the edge that is
returned (integer, corresponding to an index). Location indices correspond to the
following: 0 = left, 1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top
Note that the location must be 0 <= loc < 2*ndim (for 2D problems, ``loc`` cannot be 5 or 6).
Returns either a curve (2D problems) or surface (3D problems) or None
If ``loc`` indices are out of bounds, the code will raise an error.
:param loc: Location of desired boundary (0 = left, 1 = right, 2 = front, 3 = back,
4 = bottom, 5 = top). For 2D problems, ``loc`` must be between 0 and 3.
:type loc: int
:returns: curve or surface corresponding to the selected location. If the
desired edge does not have a bounding surface, returns None.
:rtype: curve or surface or None
"""
assert type(loc) is int and (loc >= 0 and loc < 2*self.ndim), "location out of range"
return self.surfs[loc]
def set_surf(self, loc, surf):
"""
Sets boundary surface for a particular block edge
Changes the bounding surface of a particular block edge. Location is determined
by ``loc`` which is an integer that indexes into a list. Locations correspond to the
following: 0 = left, 1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top. Note that the
location must be 0 <= loc < 2*ndim
For 2D problems, ``surf`` must be a curve. For 3D problems, ``surf`` must be a surface.
Other choices will raise an error. If ``loc`` is out of bounds, the code
will also signal an error.
:param loc: Location of desired boundary (0 = left, 1 = right, 2 = front, 3 = back,
4 = bottom, 5 = top). For 2D problems, ``loc`` must be between 0 and 3.
:type loc: int
:param surf: curve or surface corresponding to the selected block and location
:type surf: curve or surface
:returns: None
"""
assert type(loc) is int and (loc >= 0 and loc < 2*self.ndim), "location out of range"
if self.ndim == 3:
assert type(surf) is surface
if loc == 0 or loc == 1:
assert surf.get_direction() == 'x', "surface direction does not match location"
assert surf.get_n1() == self.nx[1] and surf.get_n2() == self.nx[2], "number of grid points does not match"
elif loc == 2 or loc == 3:
assert surf.get_direction() == 'y', "surface direction does not match location"
assert surf.get_n1() == self.nx[0] and surf.get_n2() == self.nx[2], "number of grid points does not match"
else:
assert surf.get_direction() == 'z', "surface direction does not match location"
assert surf.get_n1() == self.nx[0] and surf.get_n2() == self.nx[1], "number of grid points does not match"
else:
assert type(surf) is curve
if loc == 0 or loc == 1:
assert surf.get_direction() == 'x', "surface direction does not match location"
assert surf.get_n1() == self.nx[1], "number of grid points does not match"
else:
assert surf.get_direction() == 'y', "surface direction does not match location"
assert surf.get_n1() == self.nx[0], "number of grid points does not match"
self.surfs[loc] = surf
def delete_surf(self, loc):
"""
Removes boundary surface for a particular block edge
Removes the bounding surface of a particular block edge. Location is determined by
``loc`` which is an integer that indexes into a list. Locations correspond to the following:
0 = left, 1 = right, 2 = front, 3 = back, 4 = bottom, 5 = top. Note that the location must be
0 <= loc < 2*ndim
If ``loc`` is out of bounds, the code will also signal an error.
:param loc: Location of desired boundary to be removed (0 = left, 1 = right, 2 = front,
3 = back, 4 = bottom, 5 = top). For 2D problems, ``loc`` must be between 0 and 3.
:type loc: int
:returns: None
"""
assert type(loc) is int and (loc >= 0 and loc < 2*self.ndim), "location out of range"
self.surfs[loc] = None
def get_material(self):
"""
Returns material
Returns the material class associated with this block
:returns: Material class with properties for this block
:rtype: material
"""
return self.m
def set_mattype(self, mattype):
"""
Sets block material type ('elastic' or 'plastic')
Sets the material type for the block. Options are 'elastic' for an elastic simulation
and 'plastic' for a plastic simulation. Anything else besides these options will cause the
code to raise an error.
:param mattype: New material type ('elastic' or 'plastic')
:type mattype: str
:returns: None
"""
self.m.set_type(mattype)
def set_material(self,mat):
"""
Sets block material properties
Sets new material properties stored in an instance of the ``material`` class.
:param newmaterial: New material properties
:type newmaterial: material
:param coords: Coordinates of block to be changed (optional, omitting changes all blocks).
``coords`` must be a tuple or list of three integers that match the coordinates
of a block.
:type coords: tuple or list
:returns: None
"""
assert type(mat) is material
self.m = mat
def get_x(self, coord):
"""
Returns grid value for given spatial index
For a given problem set up, returns the location of a particular set of coordinate indices.
Note that since blocks are set up by setting values only on the edges, coordinates on
the interior are not specified *a priori* and instead determined using transfinite interpolation
to generate a regular grid on the block interiors. Calling ``get_x`` generates the interior grid
to find the coordinates of the desired point.
Within each call to ``get_x``, the grid is generated on the fly only for the relevant block
where the desired point is located. It is not stored. This helps reduce memory requirements
for large 3D problems (since the Python module does not run in parallel), but is slower.
Because the computational grid is regular, though, it can be done in a single step in closed
form.
Returns a numpy array of length 3 holding the spatial location (x, y, z).
:param coord: Spatial coordinate where grid values are desired (tuple or list of 3 integers
or 2 integers for 2D problems)
:type coord: tuple or list
:returns: (x, y, z) coordinates of spatial location
:rtype: ndarray
"""
if self.ndim == 2:
assert (len(coord) == 2 or len(coord) == 3), "Coordinates must have length 2 or 3"
coord = (coord[0], coord[1])
else:
assert len(coord) == 3, "Coordinates must have length 3"
for i in range(self.ndim):
assert (coord[i] >= 0 and coord[i] < self.nx[i]), "Coordinate value out of range"
# make temporary surfaces and check that edges match
tmpsurfs = self.make_tempsurfs()
self.checksurfs(tmpsurfs)
p = float(coord[0])/float(self.nx[0]-1)
q = float(coord[1])/float(self.nx[1]-1)
x = np.zeros(3)
if self.ndim == 2:
x[0] = ((1.-p)*tmpsurfs[0].get_x(coord[1])+p*tmpsurfs[1].get_x(coord[1])+
(1.-q)*tmpsurfs[2].get_x(coord[0])+q*tmpsurfs[3].get_x(coord[0])-
(1.-p)*(1.-q)*tmpsurfs[0].get_x(0)-(1.-q)*p*tmpsurfs[1].get_x(0)-
q*(1.-p)*tmpsurfs[0].get_x(-1)-q*p*tmpsurfs[1].get_x(-1))
x[1] = ((1.-p)*tmpsurfs[0].get_y(coord[1])+p*tmpsurfs[1].get_y(coord[1])+
(1.-q)*tmpsurfs[2].get_y(coord[0])+q*tmpsurfs[3].get_y(coord[0])-
(1.-p)*(1.-q)*tmpsurfs[0].get_y(0)-(1.-q)*p*tmpsurfs[1].get_y(0)-
q*(1.-p)*tmpsurfs[0].get_y(-1)-q*p*tmpsurfs[1].get_y(-1))
else:
r = float(coord[2])/float(self.nx[2]-1)
x[0] = ((1.-p)*tmpsurfs[0].get_x((coord[1], coord[2]))+p*tmpsurfs[1].get_x((coord[1], coord[2]))+
(1.-q)*tmpsurfs[2].get_x((coord[0], coord[2]))+q*tmpsurfs[3].get_x((coord[0], coord[2]))+
(1.-r)*tmpsurfs[4].get_x((coord[0], coord[1]))+r*tmpsurfs[5].get_x((coord[0], coord[1])))
x[1] = ((1.-p)*tmpsurfs[0].get_y((coord[1], coord[2]))+p*tmpsurfs[1].get_y((coord[1], coord[2]))+
(1.-q)*tmpsurfs[2].get_y((coord[0], coord[2]))+q*tmpsurfs[3].get_y((coord[0], coord[2]))+
(1.-r)*tmpsurfs[4].get_y((coord[0], coord[1]))+r*tmpsurfs[5].get_y((coord[0], coord[1])))
x[2] = ((1.-p)*tmpsurfs[0].get_z((coord[1], coord[2]))+p*tmpsurfs[1].get_z((coord[1], coord[2]))+
(1.-q)*tmpsurfs[2].get_z((coord[0], coord[2]))+q*tmpsurfs[3].get_z((coord[0], coord[2]))+
(1.-r)*tmpsurfs[4].get_z((coord[0], coord[1]))+r*tmpsurfs[5].get_z((coord[0], coord[1])))
x[0] -= ((1.-q)*(1.-p)*tmpsurfs[0].get_x((0, coord[2]))+(1.-q)*p*tmpsurfs[1].get_x((0, coord[2]))+
q*(1.-p)*tmpsurfs[0].get_x((-1,coord[2]))+q*p*tmpsurfs[1].get_x((-1,coord[2]))+
(1.-p)*(1.-r)*tmpsurfs[0].get_x((coord[1], 0))+p*(1.-r)*tmpsurfs[1].get_x((coord[1], 0))+
(1.-q)*(1.-r)*tmpsurfs[2].get_x((coord[0], 0))+q*(1.-r)*tmpsurfs[3].get_x((coord[0], 0))+
(1.-p)*r*tmpsurfs[0].get_x((coord[1], -1))+p*r*tmpsurfs[1].get_x((coord[1],-1))+
(1.-q)*r*tmpsurfs[2].get_x((coord[0], -1))+q*r*tmpsurfs[3].get_x((coord[0], -1)))
x[1] -= ((1.-q)*(1.-p)*tmpsurfs[0].get_y((0, coord[2]))+(1.-q)*p*tmpsurfs[1].get_y((0, coord[2]))+
q*(1.-p)*tmpsurfs[0].get_y((-1,coord[2]))+q*p*tmpsurfs[1].get_y((-1,coord[2]))+
(1.-p)*(1.-r)*tmpsurfs[0].get_y((coord[1], 0))+p*(1.-r)*tmpsurfs[1].get_y((coord[1], 0))+
(1.-q)*(1.-r)*tmpsurfs[2].get_y((coord[0], 0))+q*(1.-r)*tmpsurfs[3].get_y((coord[0], 0))+
(1.-p)*r*tmpsurfs[0].get_y((coord[1], -1))+p*r*tmpsurfs[1].get_y((coord[1],-1))+
(1.-q)*r*tmpsurfs[2].get_y((coord[0], -1))+q*r*tmpsurfs[3].get_y((coord[0], -1)))
x[2] -= ((1.-q)*(1.-p)*tmpsurfs[0].get_z((0, coord[2]))+(1.-q)*p*tmpsurfs[1].get_z((0, coord[2]))+
q*(1.-p)*tmpsurfs[0].get_z((-1,coord[2]))+q*p*tmpsurfs[1].get_z((-1,coord[2]))+
(1.-p)*(1.-r)*tmpsurfs[0].get_z((coord[1], 0))+p*(1.-r)*tmpsurfs[1].get_z((coord[1], 0))+
(1.-q)*(1.-r)*tmpsurfs[2].get_z((coord[0], 0))+q*(1.-r)*tmpsurfs[3].get_z((coord[0], 0))+
(1.-p)*r*tmpsurfs[0].get_z((coord[1], -1))+p*r*tmpsurfs[1].get_z((coord[1],-1))+
(1.-q)*r*tmpsurfs[2].get_z((coord[0], -1))+q*r*tmpsurfs[3].get_z((coord[0], -1)))
x[0] += ((1.-p)*(1.-q)*(1.-r)*tmpsurfs[0].get_x((0,0))+p*(1.-q)*(1.-r)*tmpsurfs[1].get_x((0,0))+
(1.-p)*q*(1.-r)*tmpsurfs[0].get_x((-1,0))+(1.-p)*(1.-q)*r*tmpsurfs[0].get_x((0,-1))+
p*q*(1.-r)*tmpsurfs[1].get_x((-1,0))+p*(1.-q)*r*tmpsurfs[1].get_x((0,-1))+
(1.-p)*q*r*tmpsurfs[0].get_x((-1,-1))+p*q*r*tmpsurfs[1].get_x((-1,-1)))
x[1] += ((1.-p)*(1.-q)*(1.-r)*tmpsurfs[0].get_y((0,0))+p*(1.-q)*(1.-r)*tmpsurfs[1].get_y((0,0))+
(1.-p)*q*(1.-r)*tmpsurfs[0].get_y((-1,0))+(1.-p)*(1.-q)*r*tmpsurfs[0].get_y((0,-1))+
p*q*(1.-r)*tmpsurfs[1].get_y((-1,0))+p*(1.-q)*r*tmpsurfs[1].get_y((0,-1))+
(1.-p)*q*r*tmpsurfs[0].get_y((-1,-1))+p*q*r*tmpsurfs[1].get_y((-1,-1)))
x[2] += ((1.-p)*(1.-q)*(1.-r)*tmpsurfs[0].get_z((0,0))+p*(1.-q)*(1.-r)*tmpsurfs[1].get_z((0,0))+
(1.-p)*q*(1.-r)*tmpsurfs[0].get_x((-1,0))+(1.-p)*(1.-q)*r*tmpsurfs[0].get_z((0,-1))+
p*q*(1.-r)*tmpsurfs[1].get_z((-1,0))+p*(1.-q)*r*tmpsurfs[1].get_z((0,-1))+
(1.-p)*q*r*tmpsurfs[0].get_z((-1,-1))+p*q*r*tmpsurfs[1].get_z((-1,-1)))
return np.array(x)
def make_tempsurfs(self):
"""
Create temporary surface list
This method generates all six (four in 2D) bounding surfaces (curves in 2D). Note that
these surfaces are not usually stored for rectangular block edges to save memory,
as they are trivial to create. The temporary surfaces can be used to check that the
edges of the surfaces/curves match or to use transfinite interpolation to generate the grid.
:returns: List of all bounding surfaces (not stored beyond the time they are needed)
:rtype: list
"""
tmpsurf = []
if self.ndim == 2:
for i in range(4):
if self.surfs[i] is None:
if i == 0:
tmpsurf.append(curve(self.nx[1], 'x', np.ones(self.nx[1])*self.xm[0],
np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1])))
elif i == 1:
tmpsurf.append(curve(self.nx[1], 'x', np.ones(self.nx[1])*(self.xm[0]+self.lx[0]),
np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1])))
elif i == 2:
tmpsurf.append(curve(self.nx[0], 'y', np.linspace(self.xm[0], self.xm[0]+self.lx[0], self.nx[0]),
np.ones(self.nx[0])*self.xm[1]))
else:
tmpsurf.append(curve(self.nx[0], 'y',np.linspace(self.xm[0], self.xm[0]+self.lx[0], self.nx[0]),
np.ones(self.nx[0])*(self.xm[1]+self.lx[1])))
else:
tmpsurf.append(self.surfs[i])
else:
for i in range(6):
if self.surfs[i] is None:
if i == 0:
tmpsurf.append(surface(self.nx[1], self.nx[2], 'x', np.ones((self.nx[1], self.nx[2]))*self.xm[0],
np.meshgrid(np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1]),np.linspace(self.xm[2], self.xm[2]+self.lx[2], self.nx[2]), indexing='ij')[0],
np.meshgrid(np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1]),np.linspace(self.xm[2], self.xm[2]+self.lx[2], self.nx[2]), indexing='ij')[1]))
elif i == 1:
tmpsurf.append(surface(self.nx[1], self.nx[2], 'x', np.ones((self.nx[1], self.nx[2]))*(self.xm[0]+self.lx[0]),
np.meshgrid(np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1]),np.linspace(self.xm[2], self.xm[2]+self.lx[2], self.nx[2]), indexing='ij')[0],
np.meshgrid(
|
np.linspace(self.xm[1], self.xm[1]+self.lx[1], self.nx[1])
|
numpy.linspace
|
import numpy as np
from numpy.fft import fft2, ifft2
from scipy.signal import convolve2d
def deconv(y: np.ndarray, k: np.ndarray, lam: float,
beta=1, beta_rate=2*np.sqrt(2), beta_max=256) -> np.ndarray:
"""Perform non-blind deconvolution of a corrupted input image using
the given blur kernel. It is assumed that the exponent alpha=2/3.
Parameters
----------
y : np.ndarray
Corrupted input image
k : np.ndarray
Blur kernel for non-blind deconvolution
lam : float
Regularization parameter that trades off closeness to input image
vs. closeness to assumed gradient statistics
small => preserve L2 closeness to input image
large => preserve assumed hyper-Laplacian gradient statistics
beta : float
Initial value of beta parameter for half-quadratic splitting iteration
beta_rate : float
Multiplicative factor by which to increase beta each iteration
beta_max : float
Terminate iteration when beta exceeds this value
Returns
-------
np.ndarray
Deconvolved image
"""
# validate inputs
y = np.array(y)
k = np.array(k)
if y.ndim != 2:
raise ValueError('image y must be 2-dimensional')
if k.ndim != 2:
raise ValueError('kernel k must be 2-dimensional')
if lam < 0:
raise ValueError('regularization parameter lam must be >= 0')
if beta <= 0:
raise ValueError('beta must be > 0')
if beta_rate <= 1:
raise ValueError('beta_rate must be > 1')
k = pad_to_odd(k)
k = k / np.sum(k) # kernel should be normalized
alpha = 2/3 # TODO: allow any value of alpha
nomin1, denom1, denom2 = precompute_fft_terms(y, k)
xr = y
while beta < beta_max:
v1 = circ_diff_1(xr)
v2 = circ_diff_2(xr)
w1 = compute_w(v1, alpha, beta)
w2 = compute_w(v2, alpha, beta)
xr = compute_x(nomin1, denom1, denom2, w1, w2, lam, beta)
# translate to compensate for off-center kernel
xr = np.roll(xr, (k.shape[0]//2-1, k.shape[1]//2-1), axis=(0, 1))
beta *= beta_rate
return xr
def pad_to_shape(a: np.ndarray, shape: tuple) -> np.ndarray:
a = np.array(a)
b = np.zeros(shape)
ny, nx = a.shape
b[:ny, :nx] = a
return b
def pad_to_odd(k: np.ndarray) -> np.ndarray:
k_shape = list(k.shape)
if k_shape[0] % 2 == 0:
k_shape[0] += 1
if k_shape[1] % 2 == 0:
k_shape[1] += 1
k = pad_to_shape(k, k_shape)
return k
def circ_diff_1(a: np.ndarray) -> np.ndarray:
# return np.diff(np.hstack([a, a[:, 0, np.newaxis]]), axis=1)
return np.hstack([np.diff(a, axis=1), a[:, 0, np.newaxis] - a[:, -1, np.newaxis]])
def circ_diff_2(a: np.ndarray) -> np.ndarray:
# return np.diff(np.vstack([a, a[0, :]]), axis=0)
return np.vstack([np.diff(a, axis=0), a[0, :] - a[-1, :]])
def precompute_fft_terms(y: np.ndarray, k: np.ndarray) -> tuple:
kp = pad_to_shape(k, y.shape)
FK = fft2(kp)
nomin1 = np.conj(FK) * fft2(y)
denom1 = np.abs(FK)**2
FF1 = fft2(pad_to_shape([[1, -1]], y.shape))
FF2 = fft2(pad_to_shape([[1], [-1]], y.shape))
denom2 = np.abs(FF1)**2 + np.abs(FF2)**2
return (nomin1, denom1, denom2)
def compute_x(nomin1: np.ndarray, denom1: np.ndarray, denom2: np.ndarray,
w1: np.ndarray, w2: np.ndarray, lam: float, beta: float) -> np.ndarray:
gamma = beta / lam
denom = denom1 + gamma * denom2
w11 = -circ_diff_1(w1)
w22 = -circ_diff_2(w2)
nomin2 = w11 + w22
nomin = nomin1 + gamma * fft2(nomin2)
xr = np.real(ifft2(nomin / denom))
return xr
def compute_w(v: np.ndarray, alpha: np.ndarray, beta: float):
# TODO: extend this function to handle any value of alpha
if np.allclose(alpha, 2/3):
return compute_w23(v, beta)
raise ValueError('only alpha=2/3 is currently supported')
def compute_w23(v: np.ndarray, beta: float):
# direct analytic solution when alpha=2/3
# see Algorithm 3 in the source paper
eps = 1e-6
m = 8/(27*beta**3)
t1 = -9/8*v**2
t2 = v**3/4
t3 = -1/8*m*v**2
t4 = -t3/2 + np.sqrt(0j - m**3/27 + (m*v**2)**2/256)
t5 = np.power(t4, 1/3)
t6 = 2*(-5/18*t1 + t5 + m/(3*t5))
t7 = np.sqrt(t1/3 + t6)
r1 = 3*v/4 + np.sqrt(t7 + np.sqrt(0j-(t1+t6+t2/t7)))/2
r2 = 3*v/4 + np.sqrt(t7 - np.sqrt(0j-(t1+t6+t2/t7)))/2
r3 = 3*v/4 + np.sqrt(-t7 + np.sqrt(0j-(t1+t6-t2/t7)))/2
r4 = 3*v/4 + np.sqrt(-t7 - np.sqrt(0j-(t1+t6-t2/t7)))/2
r = [r1, r2, r3, r4]
c1 = np.abs(np.imag(r)) < eps
c2 = np.real(r)*np.sign(v) > np.abs(v)/2
c3 = np.real(r)*np.sign(v) < np.abs(v)
wstar = np.max((c1 & c2 & c3) * np.real(r)*
|
np.sign(v)
|
numpy.sign
|
import pylab as plt
import numpy as np
from axycolor import rgb2lab
from itertools import izip, product
def middle_divisors(n):
for i in range(int(n ** (0.5)), 2, -1):
if n % i == 0:
return i, n // i
return middle_divisors(n+1) # If prime number take next one
def resolution(N, numerator=16, divisor=9):
width = int(np.ceil(np.sqrt(16./9. * N)))
height = int(np.ceil(float(N)/width))
return width, height
def show_image(pixels, width, height, cmap=plt.cm.gray, scale=1, blocking=False):
pixels = np.asarray(pixels)
plt.imshow(scale*pixels.reshape((height, width)), interpolation="nearest", cmap=cmap)
plt.show(blocking)
def show_images2d(list_of_pixels, shape, cmap=plt.cm.gray, scale=1, blocking=False):
height, width = shape
ncols, nrows = resolution(len(list_of_pixels))
image = scale/2. * np.ones((nrows * (height+1) - 1, ncols * (width+1) - 1), dtype=float)
for pixels, (row, col) in izip(list_of_pixels, product(range(nrows), range(ncols))):
pixels = np.asarray(pixels).reshape(height, width)
image[row*(height+1):(row+1)*(height+1)-1, col*(width+1):(col+1)*(width+1)-1] = pixels
plt.imshow(scale*image, interpolation="nearest", cmap=cmap)
plt.subplots_adjust(left=0., right=1., top=0.95, bottom=0.05)
plt.show(blocking)
def show_images3d(list_of_pixels, shape, cmap=plt.cm.gray, scale=1, blocking=False):
height, width, depth = shape
ncols, nrows = resolution(len(list_of_pixels))
for d in range(depth):
plt.figure(d)
image = scale/2. * np.ones((nrows * (height+1) - 1, ncols * (width+1) - 1), dtype=float)
for pixels, (row, col) in izip(list_of_pixels, product(range(nrows), range(ncols))):
pixels = np.asarray(pixels).reshape(height, width, depth)[:, :, d]
image[row*(height+1):(row+1)*(height+1)-1, col*(width+1):(col+1)*(width+1)-1] = pixels
plt.imshow(scale*image, interpolation="nearest", cmap=cmap)
plt.subplots_adjust(left=0., right=1., top=0.95, bottom=0.05)
plt.show(blocking)
def cmap(bg=(0, 0, 0), exclude=[], n_colors=30):
# Generate a sizable number of RGB triples. This represents our space of
# possible choices. By starting in RGB space, we ensure that all of the
# colors can be generated by the monitor.
bg = np.array([bg] + exclude)
x = np.linspace(0, 1, n_colors) # divisions along each axis in RGB space
R, G, B = np.meshgrid(x, x, x)
rgb = np.c_[R.flatten(), G.flatten(), B.flatten()]
lab = rgb2lab(rgb)
bglab = rgb2lab(bg)
lastlab = bglab[0]
mindist2 = np.ones(len(rgb)) * np.inf
for bglab_i in bglab[1:]:
dist2 =
|
np.sum((lab-bglab_i)**2, axis=1)
|
numpy.sum
|
# -*- coding:utf-8 -*-
import math
import phate
import anndata
import shutil
import warnings
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from scipy.spatial.distance import cdist
from scipy.stats import wilcoxon, pearsonr
from scipy.spatial import distance_matrix
from sklearn.decomposition import PCA
# from python_codes.train.train import train
from python_codes.train.clustering import clustering
from python_codes.train.pseudotime import pseudotime
from python_codes.util.util import load_breast_cancer_data, preprocessing_data, save_features
from python_codes.util.exchangeable_loom import write_exchangeable_loom
warnings.filterwarnings("ignore")
from python_codes.util.util import *
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial','Roboto']
rcParams['savefig.dpi'] = 300
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable, inset_locator
title_sz = 16
####################################
#----------Get Annotations---------#
####################################
def get_adata_from_embeddings(args, sample_name, dataset="breast_cancer"):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
feature_fp = os.path.join(output_dir, "features.tsv")
adata = sc.read_csv(feature_fp, delimiter="\t", first_column_names=None)
return adata
def get_clusters(args, sample_name, method="leiden", dataset="breast_cancer"):
original_spatial = args.spatial
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
pred_clusters = pd.read_csv(f"{output_dir}/{method}.tsv", header=None).values.flatten().astype(str)
args.spatial = original_spatial
cluster_color_dict = get_cluster_colors(args, sample_name)
unique_cluster_dict = {cluster:cluster_color_dict[cluster]["abbr"] for cluster in cluster_color_dict.keys()}
uniq_pred = np.unique(pred_clusters)
for cid, cluster in enumerate(uniq_pred):
pred_clusters[pred_clusters == cluster] = unique_cluster_dict[int(cluster)]
return pred_clusters
def get_cluster_colors_and_labels_original():
ann_dict = {
0: "Cancer 1",
1: "Immune:B/plasma",
2: "Adipose",
3: "Immune:APC/B/T cells",
4: "Cancer:Immune rich",
5: "Cancer 2",
6: "Cancer Connective"
}
color_dict = {
0: "#771122",
1: "#AA4488",
2: "#05C1BA",
3: "#F7E54A",
4: "#D55802",
5: "#137777",
6: "#124477"
}
return ann_dict, color_dict
def get_cluster_colors(args, sample_name):
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
clusters = df["Cluster ID"].values.astype(int)
annotations = df["Annotations"].values.astype(str)
colors = df["Color"].values.astype(str)
abbrs = df["Abbr"].values.astype(str)
cur_dict = {}
for cid, cluster in enumerate(clusters):
cur_dict[cluster] = {
"annotation" : annotations[cid],
"color" : colors[cid],
"abbr" : abbrs[cid]
}
return cur_dict
def get_top_n_cluster_specific_genes(args, sample_name, method, dataset="breast_cancer", top_n=3):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t")
df = df.loc[:top_n-1, df.columns.str.endswith("_n")]
cluster_specific_genes_dict = {}
for cluster_abbr in df.columns:
cluster_specific_genes_dict[cluster_abbr.strip("_n")] = df[cluster_abbr].values.astype(str)
return cluster_specific_genes_dict
def save_cluster_specific_genes(args, adata, sample_name, method, dataset="breast_cancer", qval=0.05):
args.spatial = True
output_dir = f'{args.output_dir}/{get_target_fp(args, dataset, sample_name)}'
fp = f'{args.dataset_dir}/Visium/Breast_Cancer/putative_cell_type_colors/{sample_name}.csv'
df = pd.read_csv(fp)
abbrs = np.array(np.unique(df["Abbr"].values.astype(str)))
cluster_marker_genes_fp = f'{output_dir}/marker_genes_pval_gby_{method}.tsv'
df = pd.read_csv(cluster_marker_genes_fp, sep="\t", header=0)
for cid, cluster_name in enumerate(abbrs):
sub_df = df.loc[df.loc[:, f"{cluster_name}_p"] <= qval, f"{cluster_name}_n"]
genes = np.array(np.unique(sub_df.values.flatten().astype(str)))
output_fp = f'{output_dir}/cluster_specific_marker_genes/{cluster_name}.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
all_genes = np.array(list(adata.var_names))
output_fp = f'{output_dir}/cluster_specific_marker_genes/background_genes.tsv'
mkdir(os.path.dirname(output_fp))
np.savetxt(output_fp, all_genes[:], delimiter="\n", fmt="%s")
print(f"Saved at {output_fp}")
def get_GO_term_dict(args):
base_dir = f"{args.dataset_dir}/Visium/Breast_Cancer/analysis"
genes_with_go_ids_fp = f'{base_dir}/genes_with_go_ids.csv'
go_id_to_genes_dict_pkl_fp = f"{base_dir}/go_id_to_genes_dict.pkl"
if os.path.exists(go_id_to_genes_dict_pkl_fp):
with open(go_id_to_genes_dict_pkl_fp, 'rb') as f:
go_terms_dict = pickle.load(f)
return go_terms_dict
else:
df = pd.read_csv(genes_with_go_ids_fp).values.astype(str)
go_terms = np.array(np.unique(df[:, 1]))
go_terms_dict = {go_id : df[df[:, 1] == go_id, 0] for go_id in go_terms}
with open(go_id_to_genes_dict_pkl_fp, 'wb') as f:
pickle.dump(go_terms_dict, f, -1)
print(f"Saved at {go_id_to_genes_dict_pkl_fp}")
return go_terms_dict
def get_GO_terms_with_spatial_coherent_expr(args, adata, sample_name, go_term_dict, dataset="breast_cancer"):
coords = adata.obsm["spatial"]
index = np.arange(coords.shape[0])
genes = np.array(adata.var_names)
GO_high_expressed = {}
GO_high_expressed_pvals = {}
n_go_terms = len(go_term_dict)
for gid, (go_id, go_genes) in enumerate(go_term_dict.items()):
if (gid + 1) % 500 == 0:
print(f"Processed {gid + 1}/{n_go_terms}: {100. * (gid + 1)/n_go_terms}% GO terms")
expr = adata.X[:, np.isin(genes, go_genes)].mean(axis=1)
avg_expr = expr.mean()
std_expr = expr.std()
outlier_val = avg_expr + std_expr
ind = np.array(np.where(expr > outlier_val)).flatten()
if ind.size > 5:
sub_coords = coords[ind, :]
sub_dists = distance.cdist(sub_coords, sub_coords, 'euclidean')
rand_index =
|
np.random.choice(index, size=ind.size)
|
numpy.random.choice
|
import cv2
import numpy as np
import pandas as pd
import random
def create_windows(size_out, input_shape, overlap):
# overlap is number of images that overlap so 2 will give 50% overlap of images
row_st = 0
row_ed = size_out[0]
gfrcrowst = []
gfrcrowed = []
while row_ed < input_shape[0]:
gfrcrowst.append(row_st)
gfrcrowed.append(row_ed)
row_st = int(row_st + size_out[0] / overlap)
row_ed = int(row_st + size_out[0])
row_ed = input_shape[0]
row_st = row_ed - size_out[0]
gfrcrowst.append(row_st)
gfrcrowed.append(row_ed)
col_st = 0
col_ed = size_out[1]
gfrccolst = []
gfrccoled = []
while col_ed < input_shape[1]:
gfrccolst.append(col_st)
gfrccoled.append(col_ed)
col_st = int(col_st + size_out[1] / overlap)
col_ed = int(col_st + size_out[1])
col_ed = input_shape[1]
col_st = col_ed - size_out[1]
gfrccolst.append(col_st)
gfrccoled.append(col_ed)
nrow = len(gfrcrowst)
ncol = len(gfrccolst)
gfrcrowst = np.reshape(np.tile(gfrcrowst, ncol), (nrow * ncol, 1))
gfrcrowed = np.reshape(np.tile(gfrcrowed, ncol), (nrow * ncol, 1))
gfrccolst = np.reshape(np.repeat(gfrccolst, nrow), (nrow * ncol, 1))
gfrccoled = np.reshape(np.repeat(gfrccoled, nrow), (nrow * ncol, 1))
gfrcwindz = np.hstack((gfrcrowst, gfrccolst, gfrcrowed, gfrccoled))
gfrcwindz = np.array(gfrcwindz, dtype=np.int)
return gfrcwindz
def get_detect_in_wind(file_boxes, dict_in):
xmin = dict_in["xmin"]
xmax = dict_in["xmax"]
ymin = dict_in["ymin"]
ymax = dict_in["ymax"]
min_size_x = dict_in["min_size_x"]
min_size_y = dict_in["min_size_y"]
min_pct = dict_in["min_pct"]
out_string = ""
out_list = []
n_out = 0
for ln in range(file_boxes.shape[0]):
line = file_boxes.iloc[ln]
# check if detection is in window
if line.xmax >= xmin and line.xmin < xmax and line.ymax >= ymin and line.ymin < ymax:
# get original size of box
orig_wid = line.xmax - line.xmin
orig_hei = line.ymax - line.ymin
# find new position of bbox
line.xmax = np.minimum(line.xmax, xmax)
line.xmin = np.maximum(line.xmin, xmin)
line.ymax = np.minimum(line.ymax, ymax)
line.ymin = np.maximum(line.ymin, ymin)
# get new width and height
line.wid = line.xmax - line.xmin
# if this makes the box too thin skip to next detection
if line.wid < min_size_x:
continue
if line.wid < (orig_wid / min_pct):
continue
line.height = line.ymax - line.ymin
# if this makes the box too short skip to next detection
if line.height < min_size_y:
continue
if line.height < (orig_hei / min_pct):
continue
line.wid_half = np.divide(line.wid, 2)
line.hei_half = np.divide(line.height, 2)
line.xc = np.add(line.xmin, line.wid_half)
line.yc = np.add(line.ymin, line.hei_half)
# convert position in image to position in window
line.xc = (line.xc - xmin) / (xmax - xmin)
line.yc = (line.yc - ymin) / (ymax - ymin)
line.wid = line.wid / (xmax - xmin)
line.height = line.height / (ymax - ymin)
line_out = [line.oc, line.xc, line.yc, line.wid, line.height]
out_list.extend(line_out)
n_out += 1
# output position in window
out_string = out_string + str(line.oc) + ' ' + str(line.xc) + ' ' + str(line.yc) + ' '
out_string = out_string + str(line.wid) + ' ' + str(line.height) + '\n'
out_array = np.array(out_list)
out_array = np.reshape(out_array, (n_out, 5))
return out_string, out_array
def get_tile_image(image_in, options):
xmin = options["xmin"]
xmax = options["xmax"]
ymin = options["ymin"]
ymax = options["ymax"]
# get just this window from image and write it out
image_out = image_in[ymin:ymax, xmin:xmax]
fsize = options["final_size"]
osize = options["size_out"]
resize = fsize[0] == osize[0] and fsize[1] == osize[1]
if resize:
final_size = options["final_size"]
image_out = cv2.resize(image_out, final_size)
return image_out
def get_tile_name(wnd, options):
file_out = options["file_out"]
out_path = options["out_path"]
# if filename contains a directory split out
split_filename = file_out.split("/")
if len(split_filename) > 1:
file_out = split_filename[0] + '_'
for splt in range(1, len(split_filename)):
file_out = file_out + split_filename[splt] + '_'
# remove uneccesary extra underscore
file_out = file_out[:-1]
# create text file name and write output to text file
out_name = file_out + '_' + str(wnd)
# get just this window from image and write it out
out_name = out_path + out_name
return out_name
def write_txt_file(out_string, out_name):
# get rid of final line separator
out_string = out_string[:-1]
txt_path = out_name + '.txt'
with open(txt_path, "w") as text_file:
text_file.write(out_string)
def blank_txt_file(out_name):
txt_path = out_name + '.txt'
with open(txt_path, "w") as text_file:
text_file.write("")
def create_tiled_data(filez, fl, windowz, options, keep_blanks=False, pblank=1.0):
base_dir = options["base_dir"]
data_list = options["data_list"]
out_path = options["out_path"]
image_shape = options["image_shape"]
size_out = options["size_out"]
cols = size_out[1]
rows = size_out[0]
final_size = options["final_size"]
cols = final_size[1]
rows = final_size[0]
file_name = filez[fl]
print(file_name)
# get root of file name
file_out = file_name[:-4]
# get list of detections in this image
keep_list = data_list.file_loc == file_name
file_boxes = data_list[keep_list]
# get min and max positions for boxes
file_boxes['wid_half'] = np.divide(file_boxes.wid, 2)
file_boxes['hei_half'] = np.divide(file_boxes.height, 2)
file_boxes['xmin'] = np.subtract(file_boxes.xc, file_boxes.wid_half)
file_boxes['xmax'] =
|
np.add(file_boxes.xc, file_boxes.wid_half)
|
numpy.add
|
#########################################################
# #
# HYBRID GENETIC ALGORITHM (24.05.2016) #
# #
# <NAME> #
# #
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY #
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #
# THE SOFWTARE CAN BE USED BY ANYONE SOLELY FOR THE #
# PURPOSES OF EDUCATION AND RESEARCH. #
# #
#########################################################
import math
import random
import numpy as np
import matplotlib.pyplot as plt
#########################################################
# ALGORITHM PARAMETERS #
#########################################################
N=50 # Define here the population size
Genome=4 # Define here the chromosome length
generation_max= 650 # Define here the maximum number of
# generations/iterations
#########################################################
# VARIABLES ALGORITHM #
#########################################################
popSize=N+1
genomeLength=Genome+1
top_bottom=3
QuBitZero = np.array([[1], [0]])
QuBitOne = np.array([[0], [1]])
AlphaBeta = np.empty([top_bottom])
fitness = np.empty([popSize])
probability = np.empty([popSize])
# qpv: quantum chromosome (or population vector, QPV)
qpv = np.empty([popSize, genomeLength, top_bottom])
nqpv = np.empty([popSize, genomeLength, top_bottom])
# chromosome: classical chromosome
chromosome = np.empty([popSize, genomeLength],dtype=np.int)
child1 = np.empty([popSize, genomeLength, top_bottom])
child2 =
|
np.empty([popSize, genomeLength, top_bottom])
|
numpy.empty
|
import itertools
from operator import attrgetter
import numpy as np
from glypy.structure.glycan_composition import (
FrozenMonosaccharideResidue,
Composition, from_iupac_lite)
from glycopeptidepy.structure.glycan import GlycanCompositionProxy
from glycan_profiling.structure import SpectrumGraph
from glycan_profiling.tandem.oxonium_ions import SignatureSpecification, single_signatures, compound_signatures
from .base import GlycopeptideSpectrumMatcherBase
_WATER = Composition("H2O")
keyfn = attrgetter("intensity")
def base_peak_tuple(peaks):
if peaks:
return max(peaks, key=keyfn)
else:
return None
try:
from glycan_profiling._c.tandem.tandem_scoring_helpers import base_peak_tuple
except ImportError:
pass
class GlycanCompositionSignatureMatcher(GlycopeptideSpectrumMatcherBase):
minimum_intensity_ratio_threshold = 0.025
def __init__(self, scan, target, mass_shift=None):
super(GlycanCompositionSignatureMatcher, self).__init__(scan, target, mass_shift)
self._init_signature_matcher()
def _init_signature_matcher(self):
self.glycan_composition = self._copy_glycan_composition()
self.expected_matches = dict()
self.unexpected_matches = dict()
self.maximum_intensity = float('inf')
def _copy_glycan_composition(self):
return GlycanCompositionProxy(self.target.glycan_composition)
signatures = single_signatures
compound_signatures = compound_signatures
all_signatures = single_signatures.copy()
all_signatures.update(compound_signatures)
def match(self, error_tolerance=2e-5, rare_signatures=False, *args, **kwargs):
if len(self.spectrum) == 0:
return
self.maximum_intensity = self.base_peak()
water = _WATER
spectrum = self.spectrum
for mono in self.signatures:
is_expected = mono.is_expected(self.glycan_composition)
peak = ()
for mass in mono.masses:
peak += spectrum.all_peaks_for(mass, error_tolerance)
if peak:
peak = base_peak_tuple(peak)
else:
if is_expected:
self.expected_matches[mono] = None
continue
if is_expected:
self.expected_matches[mono] = peak
else:
self.unexpected_matches[mono] = peak
if rare_signatures:
for compound in self.compound_signatures:
is_expected = compound.is_expected(self.glycan_composition)
peak = ()
for mass in compound.masses:
peak += spectrum.all_peaks_for(mass, error_tolerance)
if peak:
peak = base_peak_tuple(peak)
else:
if is_expected:
self.expected_matches[compound] = None
continue
if is_expected:
self.expected_matches[compound] = peak
else:
self.unexpected_matches[compound] = peak
def _find_peak_pairs(self, error_tolerance=2e-5, include_compound=False, *args, **kwargs):
peak_set = self.spectrum
if len(peak_set) == 0:
return []
pairs = SpectrumGraph()
minimum_intensity_threshold = 0.01
blocks = [(part, part.mass()) for part in self.glycan_composition]
if include_compound:
compound_blocks = list(itertools.combinations(self.target, 2))
compound_blocks = [(block, sum(part.mass() for part in block))
for block in compound_blocks]
blocks.extend(compound_blocks)
max_peak = self.maximum_intensity
threshold = max_peak * minimum_intensity_threshold
for peak in peak_set:
if peak.intensity < threshold or peak.neutral_mass < 150:
continue
for block, mass in blocks:
for other in peak_set.all_peaks_for(peak.neutral_mass + mass, error_tolerance):
if other.intensity < threshold:
continue
pairs.add(peak, other, block)
self.spectrum_graph = pairs
return pairs
def estimate_missing_ion_importance(self, key):
count = key.count_of(self.glycan_composition)
weight = self.all_signatures[key]
return min(weight * count, 0.99)
def _signature_ion_score(self, error_tolerance=2e-5):
penalty = 0
for key, peak in self.unexpected_matches.items():
ratio = peak.intensity / self.maximum_intensity
if ratio < self.minimum_intensity_ratio_threshold:
continue
x = 1 - ratio
if x <= 0:
component = 20
else:
component = -10 *
|
np.log10(x)
|
numpy.log10
|
from src.agent.agent import Agent
from src.config.config import Config
from config.key import CONFIG_KEY
import numpy as np
from src.util.sampler import Sampler
import easy_tf_log
from src.core import Basic
from src.util.noiseAdder import noise_adder
class TargetAgent(Agent):
key_list = Config.load_json(file_path=CONFIG_KEY + '/ddpgAgentKey.json')
def __init__(self, config, real_env, cyber_env, model, sampler=Sampler()):
super(TargetAgent, self).__init__(config=config,
env=real_env,
model=model,
sampler=sampler)
self.real_env = real_env
self.cyber_env = cyber_env
self.env = None
self._env_status = self.config.config_dict['REAL_ENVIRONMENT_STATUS']
self._real_env_sample_count = 0
self._cyber_env_sample_count = 0
self.SamplerTraingCount=0
@property
def env_sample_count(self):
if self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
return self._real_env_sample_count
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
return self._cyber_env_sample_count
@env_sample_count.setter
def env_sample_count(self, new_value):
if self.status == self.status_key['TEST']:
return
if self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self._real_env_sample_count = new_value
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
self._cyber_env_sample_count = new_value
@property
def status(self):
return self._status
@status.setter
def status(self, new_value):
if new_value != Basic.status_key['TRAIN'] and new_value != Basic.status_key['TEST']:
raise KeyError('New Status: %d did not existed' % new_value)
if new_value == Basic.status_key['TEST'] and self.env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self.sampler.env_status = ['TEST_ENVIRONMENT_STATUS']
if self._status == new_value:
return
self._status = new_value
self.model.status = new_value
@property
def env_status(self):
return self._env_status
@env_status.setter
def env_status(self, new_sta):
self._env_status = new_sta
if self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
self.env = self.real_env
self.model.env_status = self.model.config.config_dict['REAL_ENVIRONMENT_STATUS']
if self.status == self.status_key['TEST']:
self.sampler.env_status = self.sampler.config.config_dict['TEST_ENVIRONMENT_STATUS']
else:
self.sampler.env_status = self.sampler.config.config_dict['REAL_ENVIRONMENT_STATUS']
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
self.env = self.cyber_env
self.model.env_status = self.model.config.config_dict['CYBER_ENVIRONMENT_STATUS']
self.sampler.env_status = self.sampler.config.config_dict['CYBER_ENVIRONMENT_STATUS']
else:
raise ValueError('Wrong Agent Environment Env Status: %d' % new_sta)
@property
def current_env_status(self):
if self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
return 'REAL_ENVIRONMENT_STATUS'
elif self._env_status == self.config.config_dict['CYBER_ENVIRONMENT_STATUS']:
return 'CYBER_ENVIRONMENT_STATUS'
def predict(self, state, *args, **kwargs):
state = np.reshape(state, [-1])
count = self._real_env_sample_count
eps = 1.0 - (self.config.config_dict['EPS'] - self.config.config_dict['EPS_GREEDY_FINAL_VALUE']) * \
(count / self.config.config_dict['EPS_ZERO_FLAG'])
if eps < 0:
eps = 0.0
rand_eps = np.random.rand(1)
if self.config.config_dict['EPS_GREEDY_FLAG'] == 1 and rand_eps < eps and self.status == self.status_key['TRAIN']:
res = self.env.action_space.sample()
else:
res = np.array(self.model.predict(state))
if self.config.config_dict['NOISE_FLAG'] > 0 and self.status == self.status_key['TRAIN']:
res, noise = noise_adder(action=res, agent=self)
for i in range(len(noise)):
easy_tf_log.tflog(key=self.name + '_ACTION_NOISE_DIM_' + str(i), value=noise[i])
return np.reshape(res, [-1])
def sample(self, env, sample_count, store_flag=False, agent_print_log_flag=False, resetNoise_Flag=False):
if self.status == self.status_key['TEST']:
self.sampler.reset(env=env, agent=self)
if self.model.config.config_dict['NOISE_FLAG']==2:
resetNoise_Flag = True
else:
resetNoise_Flag = False
return super().sample(env, sample_count, store_flag, agent_print_log_flag, resetNoise_Flag)
def train(self, sampler_train_flag=0):
if self.model.memory_length >= self.model.config.config_dict['BATCH_SIZE']:
res_dict = self.model.update()
else:
res_dict = None
# TODO add the train process of sampler
# if sampler_train_flag>0 and self._env_status == self.config.config_dict['REAL_ENVIRONMENT_STATUS']:
# self.SamplerTraingCount +=1.0
# ####do the training
# ###get the ne_data from memory
# self.sampler.count_new_real_samples =sampler_train_flag
# new_idx = np.arange(self.model.real_data_memory.observations0.length-self.sampler.count_new_real_samples, self.model.real_data_memory.observations0.length)
# new_data_states = self.model.real_data_memory.observations0.get_batch(new_idx)
# ###get all data from memory
# all_idx = new_idx #np.arange(self.model.real_data_memory.observations0.length)
# all_data_states = self.model.real_data_memory.observations0.get_batch(all_idx)
# all_data_actions = self.model.real_data_memory.actions.get_batch(all_idx)
# all_data_nstates = self.model.real_data_memory.observations1.get_batch(all_idx)
# ####predcit the states
# state_est_input = new_data_states
# state_est_label = self.SamplerTraingCount*np.ones([new_data_states.shape[0],1])
# dyn_error_est_input = all_data_states
# prd_nstates = self.cyber_env.model.predict(sess=self.cyber_env.sess,
# state_input=all_data_states,
# action_input=all_data_actions)
# ####get the error for each sample
# dyn_error_est_label = np.sum((all_data_nstates-prd_nstates)**2,1)
# ####normalize the error into range [0,1]
# # dyn_error_est_label = (dyn_error_est_label-np.min(dyn_error_est_label))/(np.max(dyn_error_est_label)-np.min(dyn_error_est_label))
# # print("dyn_error_est_label=", dyn_error_est_label)
# dyn_error_est_label = dyn_error_est_label.reshape([-1,1])
# print("state_est_input.shape=", state_est_input.shape)
# print("dyn_error_est_input.shape=", dyn_error_est_input.shape)
# self.sampler.train(state_est_input, state_est_label, dyn_error_est_input, dyn_error_est_label)
return res_dict
def store_one_sample(self, state, next_state, action, reward, done):
self.model.store_one_sample(state=state,
next_state=next_state,
action=action,
reward=reward,
done=done)
def init(self):
self.model.init()
self.model.reset()
super().init()
def print_log_queue(self, status):
self.status = status
reward_list = []
while self.log_queue.qsize() > 0:
reward_list.append(self.log_queue.get()[self.name + '_SAMPLE_REWARD'])
if len(reward_list) > 0:
reward_list =
|
np.array(reward_list)
|
numpy.array
|
# import cv2
import numpy as np
import os
from PIL import Image
from wavedata.tools.obj_detection import obj_panoptic_utils
from wavedata.tools.obj_detection import evaluation
from pplp.core import box_3d_panoptic_encoder, anchor_panoptic_projector
from pplp.core import anchor_encoder
from pplp.core import anchor_filter
from pplp.core.anchor_generators import grid_anchor_3d_generator
# import for MRCNN
# Add this block for ROS python conflict
import sys
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
sys.path.remove('$HOME/segway_kinetic_ws/devel/lib/python2.7/dist-packages')
except ValueError:
pass
import cv2
# import random
# import math
# import numpy as np
# import skimage.io
# import matplotlib
# import matplotlib.pyplot as plt
from pplp.core.maskrcnn import coco
from pplp.core.maskrcnn import maskrcnn_utils
from pplp.core.models import maskrcnn_model as modellib
from pplp.core.maskrcnn import visualize
# from pplp.core.models.maskrcnn_model import log
class MiniBatchPreprocessor(object):
def __init__(self,
dataset,
mini_batch_dir,
anchor_strides,
density_threshold,
neg_iou_3d_range,
pos_iou_3d_range):
"""Preprocesses anchors and saves info to files for RPN training
Args:
dataset: Dataset object
mini_batch_dir: directory to save the info
anchor_strides: anchor strides for generating anchors (per class)
density_threshold: minimum number of points required to keep an
anchor
neg_iou_3d_range: 3D iou range for an anchor to be negative
pos_iou_3d_range: 3D iou range for an anchor to be positive
"""
self._dataset = dataset
self.mini_batch_panoptic_3d_utils = self._dataset.panoptic_3d_utils.mini_batch_panoptic_3d_utils
self._mini_batch_dir = mini_batch_dir
self._area_extents = self._dataset.panoptic_3d_utils.area_extents
self._anchor_strides = anchor_strides
self._density_threshold = density_threshold
self._negative_iou_range = neg_iou_3d_range
self._positive_iou_range = pos_iou_3d_range
def _calculate_anchors_info(self,
all_anchor_boxes_3d,
empty_anchor_filter,
gt_labels):
"""Calculates the list of anchor information in the format:
N x 8 [anchor_indices, max_gt_iou(2d or 3d), (6 x offsets), class_index]
max_gt_out - highest 3D iou with any ground truth box
offsets - encoded offsets [dx, dy, dz, d_dimx, d_dimy, d_dimz]
class_index - the anchor's class as an index
(e.g. 0 or 1, for "Background" or "Car")
Args:
all_anchor_boxes_3d: list of anchors in box_3d format
N x [x, y, z, l, w, h, ry]
empty_anchor_filter: boolean mask of which anchors are non empty
gt_labels: list of Object Label data format containing ground truth
labels to generate positives/negatives from.
Returns:
list of anchor info
"""
# Check for ground truth objects
if len(gt_labels) == 0:
raise Warning("No valid ground truth label to generate anchors.")
panoptic_utils = self._dataset.panoptic_3d_utils
# Filter empty anchors
anchor_indices = np.where(empty_anchor_filter)[0]
anchor_boxes_3d = all_anchor_boxes_3d[empty_anchor_filter]
# Convert anchor_boxes_3d to anchor format
anchors = box_3d_panoptic_encoder.box_3d_to_anchor(anchor_boxes_3d)
# Convert gt to boxes_3d -> anchors -> iou format
# print('mini_batch_panoptic_preprocessor.py :')
# print('gt_labels = ', gt_labels)
gt_boxes_3d = np.asarray(
[box_3d_panoptic_encoder.object_label_to_box_3d(gt_obj)
for gt_obj in gt_labels])
gt_anchors = box_3d_panoptic_encoder.box_3d_to_anchor(gt_boxes_3d,
ortho_rotate=True)
rpn_iou_type = self.mini_batch_panoptic_3d_utils.rpn_iou_type
if rpn_iou_type == '2d':
# Convert anchors to 2d iou format
anchors_for_2d_iou, _ = np.asarray(anchor_panoptic_projector.project_to_bev(
anchors, panoptic_utils.bev_extents))
gt_boxes_for_2d_iou, _ = anchor_panoptic_projector.project_to_bev(
gt_anchors, panoptic_utils.bev_extents)
elif rpn_iou_type == '3d':
# Convert anchors to 3d iou format for calculation
anchors_for_3d_iou = box_3d_panoptic_encoder.box_3d_to_3d_iou_format(
anchor_boxes_3d)
gt_boxes_for_3d_iou = \
box_3d_panoptic_encoder.box_3d_to_3d_iou_format(gt_boxes_3d)
else:
raise ValueError('Invalid rpn_iou_type {}', rpn_iou_type)
# Initialize sample and offset lists
num_anchors = len(anchor_boxes_3d)
all_info = np.zeros((num_anchors,
self.mini_batch_panoptic_3d_utils.col_length))
# Update anchor indices
all_info[:, self.mini_batch_panoptic_3d_utils.col_anchor_indices] = anchor_indices
# For each of the labels, generate samples
# print('gt_boxes_for_2d_iou = ', gt_boxes_for_2d_iou)
# print('anchors_for_2d_iou = ', anchors_for_2d_iou)
for gt_idx in range(len(gt_labels)):
gt_obj = gt_labels[gt_idx]
gt_box_3d = gt_boxes_3d[gt_idx]
# Get 2D or 3D IoU for every anchor
# Default setting 2D
if self.mini_batch_panoptic_3d_utils.rpn_iou_type == '2d':
gt_box_for_2d_iou = gt_boxes_for_2d_iou[gt_idx]
ious = evaluation.two_d_iou(gt_box_for_2d_iou,
anchors_for_2d_iou)
elif self.mini_batch_panoptic_3d_utils.rpn_iou_type == '3d':
gt_box_for_3d_iou = gt_boxes_for_3d_iou[gt_idx]
ious = evaluation.three_d_iou(gt_box_for_3d_iou,
anchors_for_3d_iou)
# Only update indices with a higher iou than before
update_indices = np.greater(
ious, all_info[:, self.mini_batch_panoptic_3d_utils.col_ious])
# Get ious to update
ious_to_update = ious[update_indices]
# Calculate offsets, use 3D iou to get highest iou
anchors_to_update = anchors[update_indices]
gt_anchor = box_3d_panoptic_encoder.box_3d_to_anchor(gt_box_3d,
ortho_rotate=True)
offsets = anchor_encoder.anchor_to_offset(anchors_to_update,
gt_anchor)
# Convert gt type to index
class_idx = panoptic_utils.class_str_to_index(gt_obj.type)
# Update anchors info (indices already updated)
# [index, iou, (offsets), class_index]
all_info[update_indices,
self.mini_batch_panoptic_3d_utils.col_ious] = ious_to_update
all_info[update_indices,
self.mini_batch_panoptic_3d_utils.col_offsets_lo:
self.mini_batch_panoptic_3d_utils.col_offsets_hi] = offsets
all_info[update_indices,
self.mini_batch_panoptic_3d_utils.col_class_idx] = class_idx
return all_info
def preprocess(self, indices):
"""Preprocesses anchor info and saves info to files
Args:
indices (int array): sample indices to process.
If None, processes all samples
"""
# Get anchor stride for class
anchor_strides = self._anchor_strides
dataset = self._dataset
dataset_utils = self._dataset.panoptic_3d_utils
classes_name = dataset.classes_name
# Make folder if it doesn't exist yet
output_dir = self.mini_batch_panoptic_3d_utils.get_file_path(classes_name,
anchor_strides,
sample_name=None)
os.makedirs(output_dir, exist_ok=True)
# Get clusters for class
all_clusters_sizes, _ = dataset.get_cluster_info()
anchor_generator = grid_anchor_3d_generator.GridAnchor3dGenerator()
# Load indices of data_split
all_samples = dataset.sample_list
if indices is None:
indices = np.arange(len(all_samples))
num_samples = len(indices)
# For each image in the dataset, save info on the anchors
for sample_idx in indices:
# Get image name for given cluster
sample_name = all_samples[sample_idx].name
img_idx = int(sample_name)
# Check for existing files and skip to the next
if self._check_for_existing(classes_name, anchor_strides,
sample_name):
print("{} / {}: Sample already preprocessed".format(
sample_idx + 1, num_samples, sample_name))
continue
# Get ground truth and filter based on difficulty
ground_truth_list = obj_panoptic_utils.read_labels(dataset.label_dir,
img_idx)
# Filter objects to dataset classes
# print('mini_batch_panoptic_preprocessor.py : ')
# print('ground_truth_list = ', ground_truth_list)
# If no valid ground truth, skip this image
if not ground_truth_list:
print("{} / {} No {}s for sample {} "
"(Ground Truth Filter)".format(
sample_idx + 1, num_samples,
classes_name, sample_name))
# Output an empty file and move on to the next image.
self._save_to_file(classes_name, anchor_strides, sample_name)
continue
filtered_gt_list = dataset_utils.filter_labels(ground_truth_list)
filtered_gt_list = np.asarray(filtered_gt_list)
# Filtering by class has no valid ground truth, skip this image
if len(filtered_gt_list) == 0:
print("{} / {} No {}s for sample {} "
"(Ground Truth Filter)".format(
sample_idx + 1, num_samples,
classes_name, sample_name))
# Output an empty file and move on to the next image.
self._save_to_file(classes_name, anchor_strides, sample_name)
continue
# Get ground plane
ground_plane = obj_panoptic_utils.get_road_plane(img_idx,
dataset.planes_dir)
image = Image.open(dataset.get_rgb_image_path(sample_name))
image_shape = [image.size[1], image.size[0]]
# Generate sliced 2D voxel grid for filtering
# print('******** Generate sliced 2D voxel grid for filtering *********')
# print('sample_name = ', sample_name)
# print('dataset.bev_source = ', dataset.bev_source)
# print('image_shape = ', image_shape)
# If run with density filter:
vx_grid_2d = dataset_utils.create_sliced_voxel_grid_2d(
sample_name,
source=dataset.bev_source,
image_shape=image_shape)
# List for merging all anchors
all_anchor_boxes_3d = []
# Create anchors for each class
# print('mini_batch_panoptic_preprocessor.py : Create anchors for each class')
# print('mini_batch_panoptic_preprocessor.py : dataset.classes = ', dataset.classes)
for class_idx in range(len(dataset.classes)):
# Generate anchors for all classes
# print('class_idx = ', class_idx)
# print('len(dataset.classes) = ', len(dataset.classes))
# print('self._area_extents = ', self._area_extents)
# print('all_clusters_sizes = ', all_clusters_sizes)
# print('self._anchor_strides = ', self._anchor_strides)
# print('ground_plane = ', ground_plane)
grid_anchor_boxes_3d = anchor_generator.generate(
area_3d=self._area_extents,
anchor_3d_sizes=all_clusters_sizes[class_idx],
anchor_stride=self._anchor_strides[class_idx],
ground_plane=ground_plane)
all_anchor_boxes_3d.extend(grid_anchor_boxes_3d)
# Filter empty anchors
all_anchor_boxes_3d =
|
np.asarray(all_anchor_boxes_3d)
|
numpy.asarray
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This routine determines the surface pressure (P, hPa), surface air temperature (T, K), relative humidity (RH, %)
weighed mean temperature (Tm, K), zenith hydrostatic delay (ZHD, m), zenith wet delay (ZWD, m), and
precipitable water vapor (PWV, m) from binary coefficient files
As available from:
https://github.com/pjmateus/hgpt_model (release v2.0)
press_grid.bin; temp_grid.bin; tm_grid.bin; and rh_grid.bin
It is admitted that the binary files with the coefficients are in the same directory as this script.
In alternative you can define the "coeffiles" variable
The epoch can be an array of size 1, and in this case is the Modified Julian Date (MJD)
or can be an array of size 6, with the Gregorian Calendar in the following format (year, month, day, hour, min, sec)
All parameters are bilinear interpolated to the input ellipsoidal longitude and latitude
Reference for HGPT and HGPT2:
<NAME>.; <NAME>.; <NAME>.; <NAME>. An ERA5-Based Hourly Global Pressure and Temperature (HGPT) Model.
Remote Sens. 2020, 12, 1098. https://doi.org/10.3390/rs12071098
HGPT2: an ERA5-based global model to estimate relative humidity (Remote Sensing, MDPI)
INPUT:
dt : if size(dt)=1 => modified julian date
if size(dt)=6 => year, month, day, hour, min, sec
x0 : ellipsoidal longitude (degrees)
y0 : ellipsoidal latitude (degrees)
z0 : height (m)
z0_type : ‘orth’ for orthometric height or ‘elli’ for ellipsoidal height
OUTPUT:
P : surface pressure valid at (x0, y0, z0), in hPa
T : surface air temperature valid at (x0, y0, z0), in Kelvins
RH : relative humidity valid at (x0, y0, z0), in %
Tm : weighed mean temperature valid at (x0, y0, z0), in Kelvins
ZHD : zenith hydrostatic delay, valid at (x0, y0, z0), in meters
ZWD : zenith wet delay, valid at (x0, y0, z0), in meters
PWV : precipitable water vapor, valid at (x0, y0, z0), in meters
--------------------------------------------------------------------------
Example:
y0 = 38.5519
x0 = -9.0147
z0 = 25
dt = 58119.5 or dt = np.array([2018, 1, 1, 12, 0, 0])
P, T, RH, Tm, ZHD, ZWD, PWV = hgpt2(dt, x0, y0, z0, 'orth')
(1020.262,
286.589,
74.908,
277.236,
2.324,
0.117,
0.018)
--------------------------------------------------------------------------
written by <NAME> (2021/05/15)
Instituto Dom Luiz (IDL), Faculdade de Ciências, Universidade de Lisboa, 1749-016 Lisboa, Portugal
<EMAIL>
Dependencies:
https://pypi.org/project/julian/
pip install julian
"""
import numpy as np
import julian
from datetime import datetime
def es_wexler(T, P):
''' Saturation Water Vapor Pressure (es) using Wexler formulation with new coefficients (adjusted for ITS-90)
INPUT : T = Temperature (in Kelvins)
P = atmospheric Pressure (in hPa)
OUTPUT: es= saturation water vapor pressure (in hPa)
References:
1)
<NAME>. Vapor Pressure Formulation for Water in Range 0 to 100 Degrees C. A Revision.
J. Res. Natl. Bur. Stand. 1976, 80A, 775–785.
2)
Wexler, A. Vapor Pressure Formulation for Ice. J. Res. Natl. Bur. Stand. 1977, 81A, 5–20.
3)
<NAME>. ITS-90 Formulations for Water Vapor Pressure, Frostpoint Temperature, Dewpoint Temperature, and Enhancement Factors in Range -100 to +100 C.
In Proceedings of the Third International Symposium on Humidity and Moisture;
UK National Physical Laboratory (NPL): Teddington, UK, April 6 1998; pp. 1–8.'''
if T >= 273.15:
# Saturation Vapor Pressure over Water
g0 =-2.8365744*10**3
g1 =-6.028076559*10**3
g2 = 1.954263612*10**1
g3 =-2.737830188*10**-2
g4 = 1.6261698*10**-5
g5 = 7.0229056*10**-10
g6 =-1.8680009*10**-13
g7 = 2.7150305
es = 0.01 * np.exp(g0*T**-2 + g1*T**-1 + g2 + g3*T + g4*T**2 + g5*T**3 + g6*T**4 + g7*np.log(T))
# Enhancement Factor coefficients for Water 0 to 100°C
A0 =-1.6302041*10**-1
A1 = 1.8071570*10**-3
A2 =-6.7703064*10**-6
A3 = 8.5813609*10**-9
B0 =-5.9890467*10**1
B1 = 3.4378043*10**-1
B2 =-7.7326396*10**-4
B3 = 6.3405286*10**-7
else:
# Saturation Vapor Pressure over Ice
k0 =-5.8666426*10**3
k1 = 2.232870244*10**1
k2 = 1.39387003*10**-2
k3 =-3.4262402*10**-5
k4 = 2.7040955*10**-8
k5 = 6.7063522*10**-1
es = 0.01 * np.exp(k0*T**-1 + k1 + k2*T + k3*T**2 + k4*T**3 + k5*np.log(T))
# Enhancement Factor coefficients for Ice –100 to 0°C
A0 =-6.0190570*10**-2
A1 = 7.3984060*10**-4
A2 =-3.0897838*10**-6
A3 = 4.3669918*10**-9
B0 =-9.4868712*10**1
B1 = 7.2392075*10**-1
B2 =-2.1963437*10**-3
B3 = 2.4668279*10**-6
# Enhancement Factor
alpha = A0 + A1*T + A2*T**2 + A3*T**3
beta = np.exp(B0 + B1*T + B2*T**2 + B3*T**3)
f = np.exp( alpha*(1-es/P) + beta*(P/es-1) )
return es * f
def hgpt2(dt, x0, y0, z0, z0_type):
# Grid files location
coeffiles='' # put '/' or '\' at the end
# Constants
row = 721
col = 1440
p1 = 365.250
p2 = 182.625
p3 = 91.3125
# Geographic coordinates ( equal to ERA5 )
lon = np.linspace(-180, 179.75, col)
lat = np.linspace(-90, 90, row)
# Modified Julian date
if np.size(dt) == 6:
# Input: Gregorian calendar
mjd = julian.to_jd(datetime(np.int(dt[0]),np.int(dt[1]),np.int(dt[2]), \
np.int(dt[3]),np.int(dt[4]),np.int(dt[5])), fmt='mjd')
hour = np.int(dt[3])
elif np.size(dt) == 1:
# Input: Modified Julian date
gre = julian.from_jd(dt, fmt='mjd')
mjd = dt
hour = np.int(np.around(gre.hour))
else:
raise NameError('Use 1) Modified Julian Date (MJD) or 2) Gregorian date (y,m,d,HH,MM,SS).')
# Finding indexes for bilinear interpolation
# x-location
indx = np.argsort( np.sqrt( (lon-x0)**2 ) )
ix1 = indx[ 0 ]; ix2 = indx[ 1 ]
x1 = lon[ ix1 ]; x2 = lon[ ix2 ]
x = [ix1, ix1, ix2, ix2]
# y-location
indy = np.argsort( np.sqrt( (lat-y0)**2 ) )
jy1 = indy[ 0 ]; jy2 = indy[ 1 ]
y1 = lat[ jy1 ]; y2 = lat[ jy2 ]
y = [jy1, jy2, jy1, jy2]
# xy-distances (weights)
dx1y1= 1/np.sqrt( (x1 - x0)**2 + (y1 - y0)**2 )
dx1y2= 1/np.sqrt( (x1 - x0)**2 + (y2 - y0)**2 )
dx2y1= 1/np.sqrt( (x2 - x0)**2 + (y1 - y0)**2 )
dx2y2= 1/np.sqrt( (x2 - x0)**2 + (y2 - y0)**2 )
dxy = np.array([dx1y1, dx1y2, dx2y1, dx2y2], dtype=np.float64)
if np.any(np.isinf(dxy)) == True:
# Exact point grid
dxy = np.array([1,0,0,0], dtype=np.float64)
# ******************************************************************
# Open and read the surface air temperature coefficients file
# ******************************************************************
fid = open(coeffiles+'temp_grid.bin', 'rb')
fid.seek((row*col*26)*hour, 0)
a0 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
b0 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
a1 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f1 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
a2 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f2 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
a3 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f3 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
fid.close()
# Surface air temperature model
fun_t = lambda a0, b0, a1, f1, a2, f2, a3, f3: a0 + b0*(mjd - 51178) + a1*np.cos(2*np.pi*(mjd - 51178)/p1+f1) + \
a2*np.cos(2*np.pi*(mjd - 51178)/p2+f2) + a3*np.cos(2*np.pi*(mjd - 51178)/p3+f3)
# Applying the bilinear interpolation
tij = np.array([0,0,0,0], dtype=np.float64)
for j in range(0, len(x)):
tij[j] = fun_t(a0[y[j],x[j]], b0[y[j],x[j]], a1[y[j],x[j]], f1[y[j],x[j]], \
a2[y[j],x[j]], f2[y[j],x[j]], a3[y[j],x[j]], f3[y[j],x[j]])
T = np.sum(tij*dxy)/np.sum(dxy)
# ******************************************************************
# Open and read the surface pressure coefficients file
# ******************************************************************
fid = open(coeffiles+'press_grid.bin', 'rb')
fid.seek((row*col*20)*hour, 0)
a0 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
b0 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
a1 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f1 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
a2 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f2 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
fid.close()
# Surface pressure model
fun_p = lambda a0, b0, a1, f1, a2, f2: a0 + b0*(mjd - 51178) + a1*np.cos(2*np.pi*(mjd - 51178)/p1+f1) + \
a2*np.cos(2*np.pi*(mjd - 51178)/p2+f2)
# Applying the bilinear interpolation
pij = np.array([0,0,0,0], dtype=np.float64)
for j in range(0, len(x)):
pij[j] = fun_p(a0[y[j],x[j]], b0[y[j],x[j]], a1[y[j],x[j]], f1[y[j],x[j]], a2[y[j],x[j]], f2[y[j],x[j]])
P = np.sum(pij*dxy)/np.sum(dxy)
# ******************************************************************
# Open and read the surface relative humidity coefficients file
# ******************************************************************
fid = open(coeffiles+'rh_grid.bin', 'rb')
fid.seek((row*col*22)*hour, 0)
a0 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
a1 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f1 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
a2 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f2 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
a3 = np.fromfile(fid, dtype=np.float32, count=row*col).reshape((row, col), order='F')
f3 = np.fromfile(fid, dtype=np.int16, count=row*col).reshape((row, col), order='F')/10000.0
fid.close()
# Surface relative humidity model
fun_rh = lambda a0, a1, f1, a2, f2, a3, f3: a0 + a1*np.cos(2*np.pi*(mjd - 51178)/p1+f1) + \
a2*
|
np.cos(2*np.pi*(mjd - 51178)/p2+f2)
|
numpy.cos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.