prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import numpy as np
from collections import defaultdict
import random
import os
import pdb
'''
code book:
box: 0
sphere: 1
cylinder: 2
ellipsoid: 3
pyramid: 4
cone: 5
blue: 6
red: 7
yellow: 8
green: 9
cyan: 10
magenta: 11
small: 12
large: 13
upper_left: 14
upper_right: 15
lower_left: 16
lower_right: 17
'''
def get_combination(samples, num):
if num == 0:
return [[]]
else:
combinations = []
while len(samples) > 0:
s = samples[0]
samples = samples[1:]
sub_combinations = get_combination(samples, num - 1)
combinations.extend([sc + [s] for sc in sub_combinations])
return combinations
class Concept:
def __init__(self, game_config, is_train):
self.attributes = game_config.attributes
assert(len(self.attributes) == 18)
self.attributes_np = np.array(self.attributes)
self.shapes = list(range(6))
self.colors = list(range(6, 12))
self.sizes = list(range(12, 14))
self.positions = list(range(14, 18))
self.num_distractors = game_config.num_distractors
self.num_distractors_range_np = np.array(range(self.num_distractors))
self.concept_size = 4 # color, shape, size, position
self.dataset_attributes_path = game_config.dataset_attributes_path
self.img_h = game_config.img_h
self.img_w = game_config.img_w
self.save_dir = game_config.save_dir
if not os.path.exists(self.dataset_attributes_path):
print(self.dataset_attributes_path)
raise Exception('dataset_attributes_path does not exist')
self.dataset_attributes = np.load(self.dataset_attributes_path)
assert(self.dataset_attributes.shape[1] == len(self.attributes))
self.data_count = self.dataset_attributes.shape[0]
if 'images_path' in game_config and game_config.images_path is not None:
self.img_as_data = True
self.images_path = game_config.images_path
if not os.path.exists(self.images_path):
print(self.images_path)
raise Exception('images_path does not exist')
self.images = np.load(self.images_path)
assert(self.images.shape[1] == self.img_h and self.images.shape[2] == self.img_w)
assert(self.images.shape[0] == self.data_count)
else:
if 'feat_dir' in game_config and game_config.feat_dir is not None:
load_dir = game_config.feat_dir
else:
load_dir = self.save_dir
self.img_as_data = False
self.teacher_feat_path = os.path.join(load_dir, 'Geometry3D_4_Teacher_features_%d_distractors.npy' % self.num_distractors)
self.student_feat_path = os.path.join(load_dir, 'Geometry3D_4_Student_features_%d_distractors.npy' % self.num_distractors)
if not os.path.exists(self.teacher_feat_path) or not os.path.exists(self.student_feat_path):
print(self.teacher_feat_path, self.student_feat_path)
raise Exception('teacher_feat_path or student_feat_path does not exist')
self.teacher_features = np.load(self.teacher_feat_path)
self.student_features = np.load(self.student_feat_path)
assert(self.teacher_features.shape == self.student_features.shape)
assert(self.teacher_features.shape[0] == self.data_count)
self.permute = game_config.permutation_train if is_train else game_config.permutation_test
if game_config.generated_dataset:
self.generated_dataset = np.load(game_config.generated_dataset_path_train, allow_pickle = True).item() if is_train\
else np.load(game_config.generated_dataset_path_test, allow_pickle = True).item()
self.dataset_size = len(self.generated_dataset['discrete_concepts'])
self.fetch_idx = np.arange(self.dataset_size)
self.num_used = np.random.randint(self.dataset_size)
else:
self.generated_dataset = None
def store_features(self, teacher, student):
def helper(agent):
save_path = os.path.join(self.save_dir, 'Geometry3D_4_%s_features_%d_distractors.npy' % (agent.role_, self.num_distractors))
mod = self.images.shape[0] % self.num_distractors
if mod == 0:
cnn_input = self.images.reshape([-1, self.num_distractors, *self.images.shape[1:]])
else:
dim_to_append = self.num_distractors - mod
padding = np.zeros([dim_to_append, *self.images.shape[1:]])
cnn_input = np.concatenate([self.images, padding], axis = 0).reshape([-1, self.num_distractors, *self.images.shape[1:]])
cnn_output = agent.sess_.run(agent.perception_core_.visual_features_, feed_dict = {agent.distractors_: cnn_input})
if mod == 0:
features = cnn_output.reshape([self.images.shape[0], cnn_output.shape[-1]])
else:
features = cnn_output.reshape([self.images.shape[0] + dim_to_append, cnn_output.shape[-1]])[:self.images.shape[0]]
np.save(save_path, features)
helper(teacher)
helper(student)
def rd_generate_concept(self):
if self.generated_dataset is not None:
if self.num_used >= self.dataset_size:
# np.random.shuffle(self.fetch_idx)
self.num_used = 0
chosen_idx = self.generated_dataset['discrete_concepts'][self.num_used]
else:
chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False)
concept_embed = self.dataset_attributes[chosen_idx]
concepts = []
included_attributes = set()
for embed in concept_embed:
concept = np.where(embed)[0].tolist()
concepts.append(tuple(concept))
included_attributes.update(concept)
if self.generated_dataset is not None:
stu_concept_idx = self.generated_dataset['stu_concept_idx'][self.num_used]
else:
stu_concept_idx = np.random.permutation(self.num_distractors)
if self.permute:
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
if self.generated_dataset is not None:
shuffle_mapping = self.generated_dataset['shuffle_mapping'][self.num_used]
else:
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
reverse_shuffle_mapping = [p[0] for p in sorted(zip(self.attributes_np, shuffle_mapping), key = lambda x: x[1])]
stu_concept_embed = concept_embed[stu_concept_idx, :][:, reverse_shuffle_mapping]
stu_concepts = []
stu_included_attributes = set()
stu_chosen_idx = []
for embed in stu_concept_embed:
concept = np.where(embed)[0].tolist()
stu_concepts.append(tuple(concept))
concept_idx = concept[0] * 48 + (concept[1] - 6) * 8 + (concept[2] - 12) * 4 + (concept[3] - 14)
stu_chosen_idx.append(concept_idx)
stu_included_attributes.update(concept)
else:
stu_concepts = [concepts[i] for i in stu_concept_idx]
stu_included_attributes = included_attributes
stu_chosen_idx = chosen_idx[stu_concept_idx]
shuffle_mapping = self.attributes_np
if self.img_as_data:
distractors = self.images[chosen_idx]
stu_distractors = self.images[stu_chosen_idx]
else:
distractors = self.teacher_features[chosen_idx]
stu_distractors = self.student_features[stu_chosen_idx]
if self.generated_dataset is not None:
target_idx = self.generated_dataset['target_idx'][self.num_used]
self.num_used += 1
else:
target_idx = np.random.randint(self.num_distractors)
return (concepts, stu_concepts), (list(included_attributes), list(stu_included_attributes)), \
distractors, stu_distractors, shuffle_mapping, stu_concept_idx, target_idx
def teaching_dim(self, concepts, included_attrs):
td_dict = {}
teaching_sample = defaultdict(list)
sample_size = 1
smallest_sample_size = self.concept_size
for i in range(len(concepts)):
for j in range(len(concepts)):
if set(concepts[i]).issubset(set(concepts[j])) and i != j:
td_dict[tuple(concepts[i])] = (self.concept_size, tuple(concepts[i]))
while len(td_dict) < len(concepts):
all_teaching_samples = get_combination(included_attrs, sample_size)
for ts in all_teaching_samples:
for concept in concepts:
if set(ts).issubset(set(concept)):
teaching_sample[tuple(ts)].append(concept)
for ts in teaching_sample:
if len(teaching_sample[ts]) == 1:
concept = teaching_sample[ts][0]
if td_dict.get(tuple(concept)) is None:
td_dict[tuple(concept)] = (sample_size, ts)
smallest_sample_size = min(smallest_sample_size, sample_size)
###
# if len(td_dict) == len(concepts):
# return True
# else:
# return False
###
sample_size += 1
###
# return False
###
return td_dict, smallest_sample_size
def recursive_teaching_dim(self, concepts, current_most = 0):
if len(concepts) == 0:
return current_most
included_attributes = []
for c in concepts:
for e in c:
included_attributes.append(e)
included_attributes = list(set(included_attributes))
td_dict, smallest_sample_size = self.teaching_dim(concepts, included_attributes)
new_concepts = [c for c in concepts if td_dict[tuple(c)][0] > smallest_sample_size]
return self.recursive_teaching_dim(new_concepts, max(smallest_sample_size, current_most))
def teaching_level_for_each_concept(self, concepts):
levels = np.ones(self.num_distractors).astype(int)
non_ui_indicator = np.ones(self.num_distractors).astype(int)
check_ui = np.zeros(self.num_distractors)
if self.recursive_teaching_dim(concepts) > 1:
levels = np.zeros(self.num_distractors).astype(int)
else:
while len([x for y in concepts for x in y]) > 0:
for idx in range(self.num_distractors):
included_attributes = []
for i in range(len(concepts)):
if i != idx:
for e in concepts[i]:
included_attributes.append(e)
set_other = (set(included_attributes))
set_all = (set([x for y in concepts for x in y ]))
check_ui[idx] = (set_other == set_all)
non_ui_indicator = np.multiply(non_ui_indicator, check_ui).astype(int)
levels += non_ui_indicator
new_concepts = []
for i in range(4):
if non_ui_indicator[i] == 0:
new_concepts.append(())
else:
new_concepts.append(concepts[i])
concepts = new_concepts
#concepts = [c for c in (concepts * non_ui_indicator)]
#pdb.set_trace()
return levels
def bayesian_update(self, old_belief, concepts, info):
likelihood = []
for concept in concepts:
prob = 1.0 * (info in concept) / len(concept)
likelihood.append(prob)
new_belief = old_belief * np.array(likelihood)
new_belief /= np.sum(new_belief) + 1e-9
return new_belief
def average_instance_share_check(self):
from tqdm import tqdm
self.num_distractors = 7
counts = []
training_set = np.load('/home/luyao/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_1.npy' % self.num_distractors).item()
test_set = np.load('/home/luyao/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Test_1.npy' % self.num_distractors).item()
training_concept_class = training_set['discrete_concepts']
training_concept_class_set = []
for cc in training_concept_class:
training_concept_class_set.append(set(cc))
test_concept_class = test_set['discrete_concepts'][:10000]
for cc in tqdm(test_concept_class):
cc_counts = []
for cc_set_train in training_concept_class_set:
count = 0
for c in cc:
if c in cc_set_train:
count += 1
cc_counts.append(count)
counts.append(np.mean(cc_counts))
print('mean count: ', np.mean(counts))
def generate_training_dataset(self, size, novel_ratio = None, novel_concept = False, random_novel_concept = False):
self.permute = True
if not novel_concept:
save_path = '/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_2.npy' % self.num_distractors
data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
for i in range(size):
chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False)
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
stu_concept_idx = np.random.permutation(self.num_distractors)
target_idx = np.random.randint(self.num_distractors)
data['discrete_concepts'].append(chosen_idx)
data['target_idx'].append(target_idx)
data['shuffle_mapping'].append(shuffle_mapping)
data['stu_concept_idx'].append(stu_concept_idx)
for k in data:
data[k] = np.array(data[k])
np.save(save_path, data)
return data
else:
if not random_novel_concept:
separated_concepts = np.array([[0, 11, 13, 14],
[1, 10, 13, 14],
[2, 9, 13, 14],
[3, 8, 13, 14],
[4, 7, 13, 14],
[5, 6, 13, 14],
[0, 6, 12, 15],
[1, 7, 12, 15],
[2, 8, 12, 15],
[3, 9, 12, 15],
[4, 10, 12, 15],
[5, 11, 12, 15],
[0, 11, 12, 16],
[1, 10, 12, 16],
[2, 9, 12, 16],
[3, 8, 12, 16],
[4, 7, 12, 16],
[5, 6, 12, 16],
[0, 6, 13, 17],
[1, 7, 13, 17],
[2, 8, 13, 17],
[3, 9, 13, 17],
[4, 10, 13, 17],
[5, 11, 13, 17]])
embeds = np.zeros((len(separated_concepts), len(self.attributes)))
for row_i, col_j in enumerate(separated_concepts):
embeds[row_i, col_j] = 1
np.set_printoptions(threshold=np.inf, suppress=True, precision=3)
separated_idx = []
for embed in embeds:
np_find = np.where(np.all(self.dataset_attributes == embed, axis = 1))
assert(len(np_find) == 1 and len(np_find[0]) == 1)
separated_idx.append(np_find[0][0])
separated_idx_set = set(separated_idx)
assert(len(separated_idx_set) == len(separated_idx))
else:
# continued = True
# while continued:
separated_idx = np.random.choice(self.data_count, round(novel_ratio * self.data_count), replace = False)
# remain_idx = np.delete(np.arange(self.data_count), separated_idx)
# remain_embed = self.dataset_attributes[remain_idx]
# remain_att_count = np.sum(remain_embed, axis = 0)
# if not np.all(remain_att_count >= 33):
# continue
# assert(len(separated_idx) == len(set(separated_idx)))
# print(remain_att_count)
# continued = False
print(separated_idx)
self.separated_idx = separated_idx
self.separated_idx_set = set(separated_idx)
train_sample_dist = np.ones(self.data_count)
train_sample_dist[separated_idx] = 0
train_sample_dist = train_sample_dist / np.sum(train_sample_dist)
if random_novel_concept:
save_path = '/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_Novel_Concept_1.npy' % self.num_distractors
else:
save_path = '/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_Fixed_Novel_Concept.npy' % self.num_distractors
data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
for i in range(size):
chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False, p = train_sample_dist)
# inner_continued = False
# for idx in chosen_idx:
# if idx in self.separated_idx_set:
# raise Exception()
# if inner_continued:
# continue
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
stu_concept_idx = np.random.permutation(self.num_distractors)
target_idx = np.random.randint(self.num_distractors)
data['discrete_concepts'].append(chosen_idx)
data['target_idx'].append(target_idx)
data['shuffle_mapping'].append(shuffle_mapping)
data['stu_concept_idx'].append(stu_concept_idx)
# i += 1
for k in data:
data[k] = np.array(data[k])
np.save(save_path, data)
return data
def generate_novel_dataset(self):
self.num_distractors = 7
train_size = 600000
test_size = 50000
test_ratio = 0.2
train_path = '/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_Novel_Concept_3.npy' % self.num_distractors
test_path = '/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Test_Novel_Concept_3.npy' % self.num_distractors
separated_idx = np.random.choice(self.data_count, round(test_ratio * self.data_count), replace = False)
self.separated_idx_set = set(separated_idx)
train_sample_dist = np.ones(self.data_count)
train_sample_dist[separated_idx] = 0
train_sample_dist = train_sample_dist / np.sum(train_sample_dist)
train_data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
for i in range(train_size):
chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False, p = train_sample_dist)
# inner_continued = False
for idx in chosen_idx:
if idx in self.separated_idx_set:
raise Exception()
# if inner_continued:
# continue
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
stu_concept_idx = np.random.permutation(self.num_distractors)
target_idx = np.random.randint(self.num_distractors)
train_data['discrete_concepts'].append(chosen_idx)
train_data['target_idx'].append(target_idx)
train_data['shuffle_mapping'].append(shuffle_mapping)
train_data['stu_concept_idx'].append(stu_concept_idx)
# i += 1
for k in train_data:
train_data[k] = np.array(train_data[k])
np.save(train_path, train_data)
test_data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
for i in range(test_size):
chosen_idx = np.random.choice(separated_idx, self.num_distractors, replace = False)
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
stu_concept_idx = np.random.permutation(self.num_distractors)
target_idx = np.random.randint(self.num_distractors)
test_data['discrete_concepts'].append(chosen_idx)
test_data['target_idx'].append(target_idx)
test_data['shuffle_mapping'].append(shuffle_mapping)
test_data['stu_concept_idx'].append(stu_concept_idx)
for k in test_data:
test_data[k] = np.array(test_data[k])
np.save(test_path, test_data)
def generate_testing_dataset(self, size, novel_concept = False, random_novel_concept = False):
self.permute = True
if not novel_concept:
training_set = np.load('../../../../Datasets/Geometry3D_4/Geometry3D_4_%ddis_Train_2.npy' % self.num_distractors).item()
# if self.permute:
training_teacher_concept_class = training_set['discrete_concepts']
# training_shuffle_mapping = training_set['shuffle_mapping']
training_target_idx = training_set['target_idx']
training_teacher_concept_class_new = []
for zip_pair in zip(training_teacher_concept_class, training_target_idx):
concept_class, idx = zip_pair
sorted_p = []
sorted_p.append(tuple(sorted(concept_class)))
idx_mapped = sorted_p[0].index(concept_class[idx])
sorted_p.append(idx_mapped)
training_teacher_concept_class_new.append(tuple(sorted_p))
training_teacher_concept_class_set = set(training_teacher_concept_class_new)
print('unique training set data: {}'.format(len(training_teacher_concept_class_set)))
data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
i = 0
while i != size:
chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False)
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping = np.concatenate([shape_permu, color_permu, size_permu, position_permu])
stu_concept_idx = np.random.permutation(self.num_distractors)
target_idx = np.random.randint(self.num_distractors)
sorted_p = []
sorted_p.append(tuple(sorted(chosen_idx)))
idx_mapped = sorted_p[0].index(chosen_idx[target_idx])
sorted_p.append(idx_mapped)
if tuple(sorted_p) in training_teacher_concept_class_set:
continue
data['discrete_concepts'].append(chosen_idx)
data['target_idx'].append(target_idx)
data['shuffle_mapping'].append(shuffle_mapping)
data['stu_concept_idx'].append(stu_concept_idx)
i += 1
for k in data:
data[k] = np.array(data[k])
np.save('../../../../Datasets/Geometry3D_4/Geometry3D_4_%ddis_Test_2.npy' % self.num_distractors, data)
return data
# else:
# training_teacher_concept_class = training_set['discrete_concepts']
# training_target_idx = training_set['target_idx']
# training_teacher_concept_class_new = []
# for zip_pair in zip(training_teacher_concept_class, training_target_idx):
# concept_class, idx = zip_pair
# sorted_p = []
# sorted_p.append(tuple(sorted(concept_class)))
# idx_mapped = sorted_p[0].index(concept_class[idx])
# sorted_p.append(idx_mapped)
# training_teacher_concept_class_new.append(tuple(sorted_p))
# training_teacher_concept_class_set = set(training_teacher_concept_class_new)
# print('unique training set data: {}'.format(len(training_teacher_concept_class_set)))
# data = {'target_idx': [], 'discrete_concepts': []}
# i = 0
# while i != size:
# chosen_idx = np.random.choice(self.data_count, self.num_distractors, replace = False)
# target_idx = np.random.randint(self.num_distractors)
# sorted_p = []
# sorted_p.append(tuple(sorted(chosen_idx)))
# idx_mapped = sorted_p[0].index(chosen_idx[target_idx])
# sorted_p.append(idx_mapped)
# if tuple(sorted_p) in training_teacher_concept_class_set:
# continue
# data['discrete_concepts'].append(chosen_idx)
# data['target_idx'].append(target_idx)
# i += 1
# for k in data:
# data[k] = np.array(data[k])
# np.save('/home/Datasets/Geometry3D_4/Geometry3D_4_%ddis_Test_No_Permutation.npy' % self.num_distractors, data)
# return data
else:
data = {'target_idx': [], 'shuffle_mapping': [], 'discrete_concepts': [], 'stu_concept_idx': []}
i = 0
while i != size:
target = np.random.choice(self.separated_idx)
chosen_idx = np.random.choice(self.data_count, self.num_distractors - 1, replace = False)
if target in chosen_idx:
continue
chosen_idx = np.random.permutation(np.append(chosen_idx, target))
target_idx = chosen_idx.tolist().index(target)
shape_permu = np.random.permutation(self.shapes)
color_permu = np.random.permutation(self.colors)
size_permu = np.random.permutation(self.sizes)
position_permu = np.random.permutation(self.positions)
shuffle_mapping =
|
np.concatenate([shape_permu, color_permu, size_permu, position_permu])
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
# https://dsp.stackexchange.com/q/76463/50076 ################################
import numpy as np
from numpy.fft import fft, ifft
from scipy.io import wavfile
from ssqueezepy import ssq_cwt, Wavelet
from ssqueezepy.visuals import imshow, plot
#%%# Helper methods ##########################################################
def frequency_modulate(slc, fc=None, b=.3):
N = len(slc)
if fc is None:
fc = N / 18 # arbitrary
# track actual `b` for demodulation purposes
b_effective = b
t_min, t_max = start / fs, end / fs
t = np.linspace(t_min, t_max, N, endpoint=False)
assert np.allclose(fs, 1 / np.diff(t))
x0 = slc[:N]
# ensure it's [-.5, .5] so diff(phi) is b*[-pi, pi]
x0max = np.abs(x0).max()
x0 /= (2*x0max)
b_effective /= (2*x0max)
# generate phase
phi0 = 2*np.pi * fc * t
phi1 = 2*np.pi * b * np.cumsum(x0)
phi = phi0 + phi1
diffmax = np.abs(np.diff(phi)).max()
# `b` correction
if diffmax > np.pi or np.allclose(phi, np.pi):
diffmax0 = np.abs(np.diff(phi0)).max()
diffmax1 = np.abs(np.diff(phi1)).max()
# epsilon term for stable inversion / pi-unambiguity
eps = 1e-7
factor = ((np.pi - diffmax0 - eps) / diffmax1)
phi1 *= factor
b_effective *= factor
phi = phi0 + phi1
assert np.abs(np.diff(phi)).max() <= np.pi
# modulate
x = np.cos(phi)
return x, t, phi0, phi1, b_effective
def analytic(x):
N = len(x)
xf = fft(x)
xaf = np.zeros(N, dtype='complex128')
xaf[:N//2 + 1] = 2 * xf[:N//2 + 1]
xaf[0] /= 2
xaf[N//2] /= 2
xa = ifft(xaf)
assert
|
np.allclose(xa.real, x)
|
numpy.allclose
|
"""Script used for profiling pandas merges to produce call graphs for blog article."""
import cProfile
import pandas
import numpy
def generate_dataframes(
N: int, set_index: bool, type_def: str, duplicates: float, sorted: bool
):
numpy.random.seed(11)
if duplicates > 0:
sample = int((1 - duplicates) * N) + 1
array_1 =
|
numpy.random.choice(sample, N, replace=True)
|
numpy.random.choice
|
"""
Copyright (c) 2019 CRISP
functions to extract experiment results save in results*.h5.
:author: <NAME>
"""
import warnings
warnings.filterwarnings("ignore")
import click
import yaml
import h5py
import numpy as np
import fnmatch
import os
import sys
import scipy.io as sio
import itertools
import scipy
from scipy.signal import find_peaks
import matplotlib.pyplot as plt
import sklearn
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from keras.datasets import mnist
import matplotlib
matplotlib.use("GTK")
sys.path.append("..")
PATH = sys.path[-1]
from src.models.CRsAE import *
from src.prints.parameters import *
from src.plotter.plot_experiment_results import *
from src.run_experiments.extract_results_helpers import *
@click.group(chain=True)
def extract_results():
pass
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def run_mnist(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
(y_train, label_train), (y_test, label_test) = mnist.load_data()
# convert to float32
y_train = y_train.astype("float32")
y_test = y_test.astype("float32")
# normalize
y_train /= 255
y_test /= 255
y_train = np.expand_dims(y_train, axis=-1)
y_test = np.expand_dims(y_test[: config_d["num_test"], :, :], axis=-1)
print(y_train.shape)
print(y_test.shape)
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "results_training_*"):
file_number = file[17:-3]
print("file number:", file_number)
# load training results
print("load training results.")
hf_training = h5py.File(
"{}/experiments/{}/results/results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
print("load prediction results.")
hf_prediction = h5py.File(
"{}/experiments/{}/results/results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
y_test_hat = np.array(hf_prediction.get("y_test_hat"))
z_test_hat = np.array(hf_prediction.get("z_test_hat"))
y_test_hat_separate = np.array(hf_prediction.get("y_test_hat_separate"))
H_init = np.array(hf_prediction.get(("H_init")))
lr_iterations = np.array(hf_training.get("lr_iterations"))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
if config_m["lambda_trainable"]:
val_l1_norm_loss = np.array(hf_training.get("val_l1_norm_loss"))
loglambda_loss = np.array(hf_training.get("loglambda_loss"))
lambda_prior_loss = np.array(hf_training.get("lambda_prior_loss"))
train_l1_norm_loss = np.array(hf_training.get("train_l1_norm_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
H_epochs = np.array(hf_training.get("H_epochs"))
H_learned = np.array(hf_training.get("H_learned"))
lambda_donoho = np.array(hf_training.get("lambda_donoho"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
hf_training.close()
hf_prediction.close()
################################################
best_val_epoch = [np.argmin(monitor_val_loss)]
best_epoch = np.min(best_val_epoch)
plot_loss(
val_loss,
train_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["lambda_trainable"]:
plot_lambda(
lambda_init,
lambda_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
row=1,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_noiseSTD(
noiseSTD_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_lambda_loss(
val_lambda_loss,
train_lambda_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
# plot dictionary
plot_H_real_2d(
H_init,
H_learned,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_denoise_real_2d(
9,
y_test,
y_test_hat,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_code_real_2d(
9,
z_test_hat,
PATH,
folder_name,
file_number,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# RMSE
if y_test.shape[0] == 1:
RMSE_y_yhat_test = np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=0,
)
)
else:
RMSE_y_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=1,
)
)
)
# l1 norm
l1_norm_z_test_hat = np.mean(np.sum(np.abs(z_test_hat), axis=1), axis=0)
summary = {
"distance error init learned": np.round(dist_init_learned, 3).tolist(),
"averaged distance error init learned": np.mean(
np.round(dist_init_learned, 3)
).tolist(),
"noiseSTD": np.round(noiseSTD, 3).tolist(),
"RMSE test": np.round(RMSE_y_yhat_test, 3).tolist(),
"l1 norm test estimated code": np.round(l1_norm_z_test_hat, 3).tolist(),
"lambda donoho": np.round(lambda_donoho, 5).tolist(),
"lambda learned": np.round(lambda_learned, 5).tolist(),
}
with open(
"{}/experiments/{}/reports/summary_{}.yaml".format(
PATH, folder_name, file_number
),
"w",
) as outfile:
yaml.dump(summary, outfile, default_flow_style=False)
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def real(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
hf_data = h5py.File("{}/experiments/{}/data/data.h5".format(PATH, folder_name), "r")
g_ch = hf_data.get("{}".format(config_d["ch"]))
y_test = np.array(g_ch.get("y_test"))
hf_data.close()
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "results_training_*"):
file_number = file[17:-3]
print("file number:", file_number)
# load training results
print("load training results.")
hf_training = h5py.File(
"{}/experiments/{}/results/results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
print("load prediction results.")
hf_prediction = h5py.File(
"{}/experiments/{}/results/results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
g_ch = hf_prediction.get("{}".format(config_d["ch"]))
y_test_hat = np.array(g_ch.get("y_test_hat"))
z_test_hat = np.array(g_ch.get("z_test_hat"))
y_test_hat_separate = np.array(g_ch.get("y_test_hat_separate"))
H_init = np.array(g_ch.get(("H_init")))
lr_iterations = np.array(hf_training.get("lr_iterations"))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
if config_m["lambda_trainable"]:
val_l1_norm_loss = np.array(hf_training.get("val_l1_norm_loss"))
loglambda_loss = np.array(hf_training.get("loglambda_loss"))
lambda_prior_loss = np.array(hf_training.get("lambda_prior_loss"))
train_l1_norm_loss = np.array(hf_training.get("train_l1_norm_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
H_epochs = np.array(hf_training.get("H_epochs"))
H_learned = np.array(hf_training.get("H_learned"))
lambda_donoho = np.array(hf_training.get("lambda_donoho"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
hf_training.close()
hf_prediction.close()
################################################
# get distance error of the dictionary
dist_init_learned, best_permutation_index = get_err_h1_h2(H_init, H_learned)
num_conv = H_epochs.shape[-1]
num_epochs = H_epochs.shape[0]
dist_init_learned_epochs = np.zeros((num_conv, num_epochs))
for epoch in range(num_epochs):
dist_init_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_init, H_epochs[epoch, :, :, :], best_permutation_index
)
################################################
best_val_epoch = [np.argmin(monitor_val_loss)]
best_epoch = np.min(best_val_epoch)
plot_loss(
val_loss,
train_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["cycleLR"]:
plot_lr_iterations(
lr_iterations,
val_loss.shape[0],
PATH,
folder_name,
file_number,
line_width=2,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["lambda_trainable"]:
plot_lambda(
lambda_init,
lambda_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
row=1,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_noiseSTD(
noiseSTD_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_lambda_loss(
val_lambda_loss,
train_lambda_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_H_err_epochs_real(
dist_init_learned_epochs,
best_epoch,
PATH,
folder_name,
file_number,
y_fine=0.5,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
# plot dictionary
plot_H_real(
H_init,
H_learned,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_denoise_real(
0,
y_test,
y_test_hat,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_code_real(
0,
z_test_hat,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_H_epochs_real(
H_init,
H_learned,
H_epochs,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# RMSE
if y_test.shape[0] == 1:
RMSE_y_yhat_test = np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=0,
)
)
else:
RMSE_y_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=1,
)
)
)
# l1 norm
l1_norm_z_test_hat = np.mean(np.sum(np.abs(z_test_hat), axis=1), axis=0)
summary = {
"distance error init learned": np.round(dist_init_learned, 3).tolist(),
"averaged distance error init learned": np.mean(
np.round(dist_init_learned, 3)
).tolist(),
"noiseSTD": np.round(noiseSTD, 3).tolist(),
"RMSE test": np.round(RMSE_y_yhat_test, 3).tolist(),
"l1 norm test estimated code": np.round(l1_norm_z_test_hat, 3).tolist(),
"lambda donoho": np.round(lambda_donoho, 5).tolist(),
"lambda learned": np.round(lambda_learned, 5).tolist(),
}
with open(
"{}/experiments/{}/reports/summary_{}.yaml".format(
PATH, folder_name, file_number
),
"w",
) as outfile:
yaml.dump(summary, outfile, default_flow_style=False)
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def real_series(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
hf_data = h5py.File("{}/experiments/{}/data/data.h5".format(PATH, folder_name), "r")
g_ch = hf_data.get("{}".format(config_d["ch"]))
# y_test = np.array(g_ch.get("y_test"))
y_test = np.array(g_ch.get("y_train"))
y_series = np.array(g_ch.get("y_series"))
noiseSTD = np.array(g_ch.get("noiseSTD"))
max_y = np.array(g_ch.get("max_y"))
hf_data.close()
# load spikes
print("load spikes.")
spikes = np.load("{}/experiments/{}/data/spikes.npy".format(PATH, folder_name))
all_missed_list = []
all_false_list = []
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "results_training_*"):
# skip files related to multiple val shuffle of the same training
if file[-5] == "-":
continue
file_number = file[17:-3]
print("file number:", file_number)
H_epochs = []
best_val_epoch = []
# load training results
hf_training = h5py.File(
"{}/experiments/{}/results/results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
hf_prediction = h5py.File(
"{}/experiments/{}/results/results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
g_ch = hf_prediction.get("{}".format(config_d["ch"]))
# y_test_hat = np.array(g_ch.get("y_test_hat"))
y_test_hat = np.array(g_ch.get("y_train_hat"))
# z_test_hat = np.array(g_ch.get("z_test_hat"))
z_test_hat = np.array(g_ch.get("z_train_hat"))
# y_test_hat_separate = np.array(g_ch.get("y_test_hat_separate"))
y_test_hat_separate = np.array(g_ch.get("y_train_hat_separate"))
y_series_hat = np.array(g_ch.get("y_series_hat"))
z_series_hat = np.array(g_ch.get("z_series_hat"))
y_series_hat_separate = np.array(g_ch.get("y_series_hat_separate"))
H_init = np.array(g_ch.get(("H_init")))
lr_iterations = np.array(hf_training.get("lr_iterations"))
val_loss = np.array(hf_training.get("val_loss"))
if config_m["lambda_trainable"]:
val_l1_norm_loss = np.array(hf_training.get("val_l1_norm_loss"))
loglambda_loss = np.array(hf_training.get("loglambda_loss"))
lambda_prior_loss = np.array(hf_training.get("lambda_prior_loss"))
train_l1_norm_loss = np.array(hf_training.get("train_l1_norm_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
train_loss = np.array(hf_training.get("train_loss"))
H_epochs = np.array(hf_training.get("H_epochs"))
H_learned = np.array(hf_training.get("H_learned"))
lambda_donoho = np.array(hf_training.get("lambda_donoho"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
if config_m["lambda_trainable"]:
val_lambda_loss = np.array(hf_training.get("val_lambda_loss"))
train_lambda_loss = np.array(hf_training.get("train_lambda_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
H_epochs = np.array(hf_training.get("H_epochs"))
best_val_epoch.append(np.argmin(monitor_val_loss))
H_learned = np.array(hf_training.get("H_learned"))
hf_training.close()
hf_prediction.close()
################################################
# get distance error of the dictionary
dist_init_learned, best_permutation_index = get_err_h1_h2(H_init, H_learned)
num_conv = H_epochs.shape[-1]
num_epochs = H_epochs.shape[0]
dist_init_learned_epochs = np.zeros((num_conv, num_epochs))
for epoch in range(num_epochs):
dist_init_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_init, H_epochs[epoch, :, :, :], best_permutation_index
)
################################################
# get miss-false result and plot
spikes_channel = config_d["spikes_channel"]
event_range = config_d["event_range"]
for n in range(config_m["num_conv"]):
spikes_filter = n
y_series_hat_conv = y_series_hat_separate[:, :, spikes_filter]
th_list = np.double(np.arange(0, np.max(-y_series_hat_conv), 0.001))
print(th_list)
if file_number != "2018-08-10-11-56-14":
pass
# th_list /= max_y
z_conv = np.expand_dims(np.copy(spikes), axis=0)
missed_events, missed_list, false_events, false_list = get_miss_false(
z_conv, y_series_hat_conv, spikes_filter, th_list, event_range
)
# print(missed_list)
# print(false_list)
all_missed_list.append(missed_list)
all_false_list.append(false_list)
plot_miss_false(
missed_list,
false_list,
PATH,
folder_name,
file_number,
spikes_filter,
config_d["ch"],
line_width=4,
marker_size=20,
scale=1.2,
scale_height=1,
text_font=50,
title_font=50,
axes_font=50,
legend_font=50,
number_font=50,
)
filename = "{}/data/filters/miss_data_single.mat".format(PATH)
data = sio.loadmat(filename)
miss_single = data["cummissrate"] * 100
filename = "{}/data/filters/false_data_single.mat".format(PATH)
data = sio.loadmat(filename)
false_single = data["cumfprate"] * 100
plot_crsae_cbp_miss_false(
missed_list,
false_list,
miss_single,
false_single,
PATH,
folder_name,
file_number + "crsae_cbp",
spikes_filter,
config_d["ch"],
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=30,
axes_font=30,
legend_font=30,
number_font=30,
)
plot_H_and_miss_false(
H_init,
H_learned,
missed_list,
false_list,
miss_single,
false_single,
PATH,
folder_name,
file_number,
spikes_filter,
config_d["ch"],
config_d["sampling_rate"],
line_width=2.5,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=38,
title_font=60,
axes_font=40,
legend_font=45,
number_font=45,
)
################################################
best_epoch = np.min(best_val_epoch)
plot_separate_real_series(
0,
60000,
spikes,
y_series_hat_separate,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=30,
title_font=30,
axes_font=30,
legend_font=30,
number_font=25,
)
plot_loss(
val_loss,
train_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["cycleLR"]:
plot_lr_iterations(
lr_iterations,
val_loss.shape[0],
PATH,
folder_name,
file_number,
line_width=2,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["lambda_trainable"]:
plot_lambda(
lambda_init,
lambda_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
row=1,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_noiseSTD(
noiseSTD_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_lambda_loss(
val_lambda_loss,
train_lambda_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
# plot_H_err_epochs_real(
# dist_init_learned_epochs,
# best_epoch,
# best_val_epoch,
# PATH,
# folder_name,
# file_number,
# y_fine=0.5,
# line_width=2,
# marker_size=15,
# scale=1.2,
# scale_height=1,
# text_font=20,
# title_font=20,
# axes_font=20,
# legend_font=20,
# number_font=20,
# )
# plot dictionary
plot_H_real(
H_init,
H_learned,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
scale = 4
scale_height = 0.5
plot_denoise_real(
0,
y_series,
y_series_hat,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=scale,
scale_height=scale_height,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_code_real(
0,
z_test_hat,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=scale,
scale_height=scale_height,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_H_epochs_real(
H_init,
H_learned,
H_epochs,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# RMSE
if y_test.shape[0] == 1:
RMSE_y_yhat_test = np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=0,
)
)
else:
# RMSE_y_yhat_test = np.mean(
# np.sqrt(
# np.mean(
# np.power(
# (np.squeeze(y_test) - np.squeeze(y_test_hat)), 2
# ),
# axis=1,
# )
# )
# )
RMSE_y_yhat_test = 0
# l1 norm
# l1_norm_z_test_hat = np.mean(np.sum(np.abs(z_test_hat), axis=1), axis=0)
l1_norm_z_test_hat = 0
summary = {
"distance error init learned": np.round(dist_init_learned, 3).tolist(),
"averaged distance error init learned": np.mean(
np.round(dist_init_learned, 3)
).tolist(),
"noiseSTD": np.round(noiseSTD, 3).tolist(),
"RMSE test": np.round(RMSE_y_yhat_test, 3).tolist(),
"l1 norm test estimated code": np.round(l1_norm_z_test_hat, 3).tolist(),
"lambda donoho": np.round(lambda_donoho, 5).tolist(),
"lambda learned": np.round(lambda_learned, 5).tolist(),
}
with open(
"{}/experiments/{}/reports/summary_{}.yaml".format(
PATH, folder_name, file_number
),
"w",
) as outfile:
yaml.dump(summary, outfile, default_flow_style=False)
plot_all_miss_false(
all_missed_list,
all_false_list,
PATH,
folder_name,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def simulated(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
hf_data = h5py.File("{}/experiments/{}/data/data.h5".format(PATH, folder_name), "r")
y_train = np.array(hf_data.get("y_train"))
y_test = np.array(hf_data.get("y_test"))
y_train_noisy = np.array(hf_data.get("y_train_noisy"))
y_test_noisy = np.array(hf_data.get("y_test_noisy"))
z_test = np.array(hf_data.get("z_test"))
noiseSTD = np.array(hf_data.get("noiseSTD"))
hf_data.close()
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "results_training_*"):
# skip files related to multiple val shuffle of the same training
if file[-5] == "-":
continue
file_number = file[17:-3]
print("file number:", file_number)
# load H_true
H_true = np.load(
"{}/experiments/{}/data/H_true.npy".format(PATH, folder_name)
)
H_epochs = []
best_val_epoch = []
# load training results
hf_training = h5py.File(
"{}/experiments/{}/results/results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
hf_prediction = h5py.File(
"{}/experiments/{}/results/results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
g_ch = hf_prediction.get("{}".format(config_d["ch"]))
y_test_hat = np.array(g_ch.get("y_test_hat"))
z_test_hat = np.array(g_ch.get("z_test_hat"))
H_init = np.array(g_ch.get(("H_init")))
lambda_init = np.array(g_ch.get(("lambda_init")))
if config_m["cycleLR"]:
lr_iterations = np.array(hf_training.get("lr_iterations"))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
if config_m["lambda_trainable"]:
val_lambda_loss = np.array(hf_training.get("val_lambda_loss"))
train_lambda_loss = np.array(hf_training.get("train_lambda_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
H_epochs = np.array(hf_training.get("H_epochs"))
lambda_epochs = np.array(hf_training.get("lambda_epochs"))
noiseSTD_epochs = np.array(hf_training.get("noiseSTD_epochs"))
best_val_epoch.append(np.argmin(monitor_val_loss))
H_learned = np.array(hf_training.get("H_learned"))
lambda_donoho = np.array(hf_training.get("lambda_donoho"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
hf_training.close()
hf_prediction.close()
################################################
# get distance error of the dictionary
dist_true_learned, best_permutation_index = get_err_h1_h2(H_true, H_learned)
dist_true_init, temp = get_err_h1_h2(H_true, H_init, best_permutation_index)
dist_true_init_notswap, temp = get_err_h1_h2(H_true, H_init)
H_last = H_epochs[-1, :, :, :]
dist_true_last, best_permutation_index_last = get_err_h1_h2(H_true, H_last)
dist_true_init_last, temp = get_err_h1_h2(
H_true, H_init, best_permutation_index_last
)
num_conv = H_epochs.shape[-1]
num_epochs = H_epochs.shape[0]
dictionary_dim = H_epochs.shape[1]
dist_true_learned_epochs = np.zeros((num_conv, num_epochs))
dist_true_learned_epochs_last = np.zeros((num_conv, num_epochs))
for epoch in range(num_epochs):
dist_true_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_true, H_epochs[epoch, :, :, :], best_permutation_index
)
dist_true_learned_epochs_last[:, epoch], temp = get_err_h1_h2(
H_true, H_epochs[epoch, :, :, :], best_permutation_index_last
)
flip = np.ones(num_conv)
delay = np.zeros(num_conv)
flip_last = np.ones(num_conv)
delay_last = np.zeros(num_conv)
permutations = list(itertools.permutations(np.arange(0, num_conv, 1)))
for n in range(num_conv):
cross_corr = np.correlate(
H_true[:, 0, n],
H_learned[:, 0, permutations[best_permutation_index][n]],
"full",
)
delay[n] = dictionary_dim - np.argmax(abs(cross_corr)) - 1
pos_corr = np.max(cross_corr)
neg_corr = np.abs(np.min(cross_corr))
if pos_corr < neg_corr:
flip[n] *= -1
cross_corr = np.correlate(
H_true[:, 0, n],
H_last[:, 0, permutations[best_permutation_index_last][n]],
"full",
)
delay_last[n] = dictionary_dim - np.argmax(abs(cross_corr)) - 1
pos_corr = np.max(cross_corr)
neg_corr = np.abs(np.min(cross_corr))
if pos_corr < neg_corr:
flip_last[n] *= -1
################################################
best_epoch = np.min(best_val_epoch)
plot_loss(
10 * np.log10(val_loss),
10 * np.log10(train_loss),
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["cycleLR"]:
plot_lr_iterations(
lr_iterations,
val_loss.shape[0],
PATH,
folder_name,
file_number,
line_width=2,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
if config_m["lambda_trainable"]:
plot_lambda(
lambda_init,
lambda_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
row=1,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_noiseSTD(
noiseSTD_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=30,
scale=4,
scale_height=0.5,
text_font=45,
title_font=55,
axes_font=48,
legend_font=34,
number_font=40,
)
plot_lambda_loss(
val_lambda_loss,
train_lambda_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_H_err_epochs_sim(
10 * np.log10(dist_true_learned_epochs),
10 * np.log10(dist_true_init),
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
y_fine=0.2,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_H_err_epochs_sim(
10 * np.log10(dist_true_learned_epochs_last),
10 * np.log10(dist_true_init_last),
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number + "last",
y_fine=0.2,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_H_err_epochs_sim_subplot(
10 * np.log10(dist_true_learned_epochs),
10 * np.log10(dist_true_init),
best_epoch,
best_val_epoch,
PATH,
folder_name,
file_number,
row=1,
y_fine=15,
line_width=2.2,
marker_size=15,
scale=4,
scale_height=0.75,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# plot dictionary
plot_H_sim(
H_true,
H_init,
H_learned,
best_permutation_index,
flip,
delay,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
row=1,
y_fine=0.5,
line_width=2.2,
marker_size=15,
scale=4,
scale_height=0.75,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_denoise_sim(
0,
y_test,
y_test_noisy,
y_test_hat,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_code_sim(
0,
z_test,
z_test_hat,
best_permutation_index,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
row=1,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_H_epochs_sim(
H_true,
H_init,
H_learned,
H_epochs,
best_permutation_index,
flip,
PATH,
folder_name,
file_number,
config_d["sampling_rate"],
row=1,
y_fine=0.5,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# RMSE
if y_test_noisy.shape[0] == 1:
RMSE_y_yhat_test = np.sqrt(
np.mean(
np.power(
(np.squeeze(y_test_noisy) - np.squeeze(y_test_hat)), 2
),
axis=0,
)
)
RMSE_ytrue_yhat_test = np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=0,
)
)
else:
RMSE_y_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power(
(np.squeeze(y_test_noisy) - np.squeeze(y_test_hat)), 2
),
axis=1,
)
)
)
RMSE_ytrue_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=1,
)
)
)
# l1 norm
l1_norm_z_test = np.mean(np.sum(np.abs(z_test), axis=1), axis=0)
l1_norm_z_test_hat = np.mean(np.sum(np.abs(z_test_hat), axis=1), axis=0)
summary = {
"distance error true init not swap": np.round(
dist_true_init_notswap, 3
).tolist(),
"distance error true init": np.round(dist_true_init, 3).tolist(),
"distance error true learned": np.round(dist_true_learned, 3).tolist(),
"distance error true init from last": np.round(
dist_true_init_last, 3
).tolist(),
"distance error true last": np.round(dist_true_last, 3).tolist(),
"averaged distance error true learned": np.mean(
np.round(dist_true_learned, 3)
).tolist(),
"averaged distance error true last": np.mean(
np.round(dist_true_last, 3)
).tolist(),
"noiseSTD": np.round(noiseSTD, 3).tolist(),
"RMSE test": np.round(RMSE_y_yhat_test, 3).tolist(),
"RMSE test compared to true": np.round(
RMSE_ytrue_yhat_test, 3
).tolist(),
"l1 norm test code": np.round(l1_norm_z_test, 3).tolist(),
"l1 norm test estimated code": np.round(l1_norm_z_test_hat, 3).tolist(),
"lambda donoho": np.round(lambda_donoho, 5).tolist(),
"lambda learned": np.round(lambda_learned, 5).tolist(),
}
with open(
"{}/experiments/{}/reports/summary_{}.yaml".format(
PATH, folder_name, file_number
),
"w",
) as outfile:
yaml.dump(summary, outfile, default_flow_style=False)
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def lcsc_simulated(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
hf_data = h5py.File("{}/experiments/{}/data/data.h5".format(PATH, folder_name), "r")
y_train = np.array(hf_data.get("y_train"))
y_test = np.array(hf_data.get("y_test"))
y_train_noisy = np.array(hf_data.get("y_train_noisy"))
y_test_noisy = np.array(hf_data.get("y_test_noisy"))
z_test = np.array(hf_data.get("z_test"))
noiseSTD = np.array(hf_data.get("noiseSTD"))
hf_data.close()
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "LCSC_results_training_*"):
# skip files related to multiple val shuffle of the same training
if file[-5] == "-":
continue
file_number = file[22:-3]
print("file number:", file_number)
# load H_true
H_true = np.load(
"{}/experiments/{}/data/H_true.npy".format(PATH, folder_name)
)
H_epochs = []
best_val_epoch = []
# load training results
hf_training = h5py.File(
"{}/experiments/{}/results/LCSC_results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
hf_prediction = h5py.File(
"{}/experiments/{}/results/LCSC_results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
g_ch = hf_prediction.get("{}".format(config_d["ch"]))
y_test_hat = np.array(g_ch.get("y_test_hat"))
z_test_hat = np.array(g_ch.get("z_test_hat"))
Wd_init = np.array(g_ch.get(("Wd_init")))
We_init = np.array(g_ch.get(("We_init")))
d_init = np.array(g_ch.get(("d_init")))
lambda_init = np.array(g_ch.get(("lambda_init")))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
Wd_epochs = np.array(hf_training.get("Wd_epochs"))
We_epochs = np.array(hf_training.get("We_epochs"))
d_epochs = np.array(hf_training.get("d_epochs"))
lambda_epochs = np.array(hf_training.get("lambda_epochs"))
best_val_epoch.append(np.argmin(monitor_val_loss))
Wd_learned = np.array(hf_training.get("Wd_learned"))
We_learned = np.array(hf_training.get("We_learned"))
d_learned = np.array(hf_training.get("d_learned"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
hf_training.close()
hf_prediction.close()
################################################
# get distance error of the weights
dist_We_true_learned, best_We_permutation_index = get_err_h1_h2(
H_true, We_learned
)
dist_We_true_init, temp = get_err_h1_h2(
H_true, We_init, best_We_permutation_index
)
dist_Wd_true_learned, best_Wd_permutation_index = get_err_h1_h2(
H_true, np.expand_dims(np.flip(np.squeeze(Wd_learned), axis=0), axis=1)
)
dist_Wd_true_init, temp = get_err_h1_h2(
H_true,
np.expand_dims(np.flip(np.squeeze(Wd_init), axis=0), axis=1),
best_Wd_permutation_index,
)
dist_d_true_learned, best_d_permutation_index = get_err_h1_h2(
H_true, np.expand_dims(np.flip(np.squeeze(d_learned), axis=0), axis=1)
)
dist_d_true_init, temp = get_err_h1_h2(
H_true,
np.expand_dims(np.flip(np.squeeze(d_init), axis=0), axis=1),
best_d_permutation_index,
)
num_conv = We_epochs.shape[-1]
num_epochs = We_epochs.shape[0]
dictionary_dim = We_epochs.shape[1]
dist_We_true_learned_epochs = np.zeros((num_conv, num_epochs))
dist_Wd_true_learned_epochs = np.zeros((num_conv, num_epochs))
dist_d_true_learned_epochs = np.zeros((num_conv, num_epochs))
for epoch in range(num_epochs):
dist_We_true_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_true, We_epochs[epoch, :, :, :], best_We_permutation_index
)
dist_Wd_true_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_true,
np.expand_dims(
np.flip(np.squeeze(Wd_epochs[epoch, :, :, :]), axis=0), axis=1
),
best_Wd_permutation_index,
)
dist_d_true_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_true,
np.expand_dims(
np.flip(np.squeeze(d_epochs[epoch, :, :, :]), axis=0), axis=1
),
best_d_permutation_index,
)
flip = np.ones(num_conv)
delay = np.zeros(num_conv)
permutations = list(itertools.permutations(np.arange(0, num_conv, 1)))
# for n in range(num_conv):
# cross_corr = np.correlate(
# H_true[:, 0, n],
# H_learned[:, 0, permutations[best_permutation_index][n]],
# "full",
# )
# delay[n] = dictionary_dim - np.argmax(abs(cross_corr)) - 1
# pos_corr = np.max(cross_corr)
# neg_corr = np.abs(np.min(cross_corr))
#
# if pos_corr < neg_corr:
# flip[n] *= -1
################################################
best_epoch = np.min(best_val_epoch)
plot_loss(
val_loss,
train_loss,
best_epoch,
best_val_epoch,
PATH,
folder_name,
"LCSC_" + file_number,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_lambda(
lambda_init,
lambda_epochs,
best_epoch,
best_val_epoch,
PATH,
folder_name,
"LCSC_" + file_number,
row=1,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
)
plot_H_err_epochs_sim(
dist_We_true_learned_epochs,
dist_We_true_init,
best_epoch,
best_val_epoch,
PATH,
folder_name,
"LCSC_" + file_number,
y_fine=0.2,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
output_name="We",
)
plot_H_err_epochs_sim(
dist_Wd_true_learned_epochs,
dist_Wd_true_init,
best_epoch,
best_val_epoch,
PATH,
folder_name,
"LCSC_" + file_number,
y_fine=0.2,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
output_name="Wd",
)
plot_H_err_epochs_sim(
dist_d_true_learned_epochs,
dist_d_true_init,
best_epoch,
best_val_epoch,
PATH,
folder_name,
"LCSC_" + file_number,
y_fine=0.2,
line_width=2,
marker_size=15,
scale=1.2,
scale_height=1,
text_font=20,
title_font=20,
axes_font=20,
legend_font=20,
number_font=20,
output_name="d",
)
# plot Wd
plot_Wd_sim(
H_true,
Wd_init,
Wd_learned,
best_Wd_permutation_index,
flip,
delay,
PATH,
folder_name,
"LCSC_" + file_number,
config_d["sampling_rate"],
row=1,
y_fine=0.5,
line_width=2.2,
marker_size=15,
scale=4,
scale_height=0.75,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# plot We
plot_We_sim(
H_true,
We_init,
We_learned,
best_We_permutation_index,
flip,
delay,
PATH,
folder_name,
"LCSC_" + file_number,
config_d["sampling_rate"],
row=1,
y_fine=0.5,
line_width=2.2,
marker_size=15,
scale=4,
scale_height=0.75,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# plot d
plot_d_sim(
H_true,
d_init,
d_learned,
best_d_permutation_index,
flip,
delay,
PATH,
folder_name,
"LCSC_" + file_number,
config_d["sampling_rate"],
row=1,
y_fine=0.5,
line_width=2.2,
marker_size=15,
scale=4,
scale_height=0.75,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_denoise_sim(
0,
y_test,
y_test_noisy,
y_test_hat,
PATH,
folder_name,
"LCSC_" + file_number,
config_d["sampling_rate"],
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
plot_code_sim(
0,
z_test,
z_test_hat,
best_d_permutation_index,
PATH,
folder_name,
"LCSC_" + file_number,
config_d["sampling_rate"],
row=1,
line_width=2,
marker_size=15,
scale=4,
scale_height=0.5,
text_font=45,
title_font=45,
axes_font=48,
legend_font=32,
number_font=40,
)
# RMSE
if y_test_noisy.shape[0] == 1:
RMSE_y_yhat_test = np.sqrt(
np.mean(
np.power(
(np.squeeze(y_test_noisy) - np.squeeze(y_test_hat)), 2
),
axis=0,
)
)
RMSE_ytrue_yhat_test = np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=0,
)
)
else:
RMSE_y_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power(
(np.squeeze(y_test_noisy) - np.squeeze(y_test_hat)), 2
),
axis=1,
)
)
)
RMSE_ytrue_yhat_test = np.mean(
np.sqrt(
np.mean(
np.power((np.squeeze(y_test) - np.squeeze(y_test_hat)), 2),
axis=1,
)
)
)
# l1 norm
l1_norm_z_test = np.mean(np.sum(np.abs(z_test), axis=1), axis=0)
l1_norm_z_test_hat = np.mean(np.sum(np.abs(z_test_hat), axis=1), axis=0)
summary = {
"Wd distance error true init": np.round(dist_Wd_true_init, 3).tolist(),
"Wd distance error true learned": np.round(
dist_Wd_true_learned, 3
).tolist(),
"Wd averaged distance error true learned": np.mean(
np.round(dist_Wd_true_learned, 3)
).tolist(),
"We distance error true init": np.round(dist_We_true_init, 3).tolist(),
"We distance error true learned": np.round(
dist_We_true_learned, 3
).tolist(),
"We averaged distance error true learned": np.mean(
np.round(dist_We_true_learned, 3)
).tolist(),
"d distance error true init": np.round(dist_d_true_init, 3).tolist(),
"d distance error true learned": np.round(
dist_d_true_learned, 3
).tolist(),
"d averaged distance error true learned": np.mean(
np.round(dist_d_true_learned, 3)
).tolist(),
"noiseSTD": np.round(noiseSTD, 3).tolist(),
"RMSE test": np.round(RMSE_y_yhat_test, 8).tolist(),
"RMSE test compared to true": np.round(
RMSE_ytrue_yhat_test, 8
).tolist(),
"l1 norm test code": np.round(l1_norm_z_test, 3).tolist(),
"l1 norm test estimated code": np.round(l1_norm_z_test_hat, 3).tolist(),
"lambda learned": np.round(lambda_learned, 5).tolist(),
}
with open(
"{}/experiments/{}/reports/LCSC_summary_{}.yaml".format(
PATH, folder_name, file_number
),
"w",
) as outfile:
yaml.dump(summary, outfile, default_flow_style=False)
@extract_results.command()
@click.option("--folder_name", default="", help="folder name in experiment directory")
def tlae_simulated(folder_name):
# load model parameters
print("load model parameters.")
file = open(
"{}/experiments/{}/config/config_model.yml".format(PATH, folder_name), "rb"
)
config_m = yaml.load(file)
file.close()
# load data parameters
print("load data parameters.")
file = open(
"{}/experiments/{}/config/config_data.yml".format(PATH, folder_name), "rb"
)
config_d = yaml.load(file)
file.close()
# load data
print("load data.")
hf_data = h5py.File("{}/experiments/{}/data/data.h5".format(PATH, folder_name), "r")
y_train = np.array(hf_data.get("y_train"))
y_test = np.array(hf_data.get("y_test"))
y_train_noisy = np.array(hf_data.get("y_train_noisy"))
y_test_noisy = np.array(hf_data.get("y_test_noisy"))
z_test = np.array(hf_data.get("z_test"))
noiseSTD = np.array(hf_data.get("noiseSTD"))
hf_data.close()
for file in os.listdir("{}/experiments/{}/results/".format(PATH, folder_name)):
if fnmatch.fnmatch(file, "TLAE_results_training_*"):
# skip files related to multiple val shuffle of the same training
if file[-5] == "-":
continue
file_number = file[22:-3]
print("file number:", file_number)
# load H_true
H_true = np.load(
"{}/experiments/{}/data/H_true.npy".format(PATH, folder_name)
)
H_epochs = []
best_val_epoch = []
hf_training = h5py.File(
"{}/experiments/{}/results/TLAE_results_training_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
# load prediction results
hf_prediction = h5py.File(
"{}/experiments/{}/results/TLAE_results_prediction_{}.h5".format(
PATH, folder_name, file_number
),
"r",
)
g_ch = hf_prediction.get("{}".format(config_d["ch"]))
z_test_hat = np.array(g_ch.get("z_test_hat"))
H_init = np.array(g_ch.get(("H_init")))
lambda_init = np.array(g_ch.get(("lambda_init")))
val_loss = np.array(hf_training.get("val_loss"))
train_loss = np.array(hf_training.get("train_loss"))
monitor_val_loss = np.array(hf_training.get(config_m["loss_type"]))
H_epochs = np.array(hf_training.get("H_epochs"))
best_val_epoch.append(np.argmin(monitor_val_loss))
H_learned = np.array(hf_training.get("H_learned"))
lambda_donoho = np.array(hf_training.get("lambda_donoho"))
lambda_learned = np.array(hf_training.get("lambda_learned"))
hf_training.close()
hf_prediction.close()
################################################
# get distance error of the dictionary
dist_true_learned, best_permutation_index = get_err_h1_h2(H_true, H_learned)
best_permutation_index = 0
dist_true_init, temp = get_err_h1_h2(H_true, H_init, best_permutation_index)
num_conv = H_epochs.shape[-1]
num_epochs = H_epochs.shape[0]
dictionary_dim = H_epochs.shape[1]
dist_true_learned_epochs = np.zeros((num_conv, num_epochs))
for epoch in range(num_epochs):
dist_true_learned_epochs[:, epoch], temp = get_err_h1_h2(
H_true, H_epochs[epoch, :, :, :], best_permutation_index
)
flip = np.ones(num_conv)
delay = np.zeros(num_conv)
permutations = list(itertools.permutations(np.arange(0, num_conv, 1)))
################################################
best_epoch =
|
np.min(best_val_epoch)
|
numpy.min
|
import numpy as np
import time
pi = np.pi
naxis = np.newaxis
F_2D = lambda x: np.fft.fft2(x, axes=(0, 1))
IF_2D = lambda x: np.fft.ifft2(x, axes=(0, 1))
F_3D = lambda x: np.fft.fftn(x, axes=(0, 1, 2))
IF_3D = lambda x: np.fft.ifftn(x, axes=(0, 1, 2))
def pupilGen(fxlin, fylin, wavelength, na, na_in=0.0):
'''
pupilGen create a circular pupil function in Fourier space.
Inputs:
fxlin : 1D spatial frequency coordinate in horizontal direction
fylin : 1D spatial frequency coordinate in vertical direction
wavelength: wavelength of incident light
na : numerical aperture of the imaging system
na_in : put a non-zero number smaller than na to generate an annular function
Output:
pupil : pupil function
'''
pupil = np.array(fxlin[naxis, :]**2+fylin[:, naxis]**2 <= (na/wavelength)**2, dtype="float32")
if na_in != 0.0:
pupil[fxlin[naxis, :]**2+fylin[:, naxis]**2 < (na_in/wavelength)**2] = 0.0
return pupil
def _genGrid(size, dx):
'''
_genGrid create a 1D coordinate vector.
Inputs:
size : length of the coordinate vector
dx : step size of the 1D coordinate
Output:
grid : 1D coordinate vector
'''
xlin = np.arange(size, dtype='complex64')
return (xlin-size//2)*dx
class Solver3DDPC:
'''
Solver3DDPC class provides methods to preprocess 3D DPC measurements and recovers 3D refractive index with Tikhonov or TV regularziation.
'''
def __init__(self, dpc_imgs, wavelength, na, na_in, pixel_size, pixel_size_z, rotation, RI_medium):
'''
Initialize system parameters and functions for DPC phase microscopy.
'''
self.wavelength = wavelength
self.na = na
self.na_in = na_in
self.pixel_size = pixel_size
self.pixel_size_z = pixel_size_z
self.rotation = rotation
self.dpc_num = len(rotation)
self.fxlin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[1], 1.0/dpc_imgs.shape[1]/self.pixel_size))
self.fylin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[0], 1.0/dpc_imgs.shape[0]/self.pixel_size))
self.dpc_imgs = dpc_imgs.astype('float32')
self.RI_medium = RI_medium
self.window = np.fft.ifftshift(np.hamming(dpc_imgs.shape[2]))
self.pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na)
self.phase_defocus = self.pupil*2.0*pi*((1.0/wavelength)**2-self.fxlin[naxis, :]**2-self.fylin[:, naxis]**2)**0.5
self.oblique_factor = self.pupil/4.0/pi/((RI_medium/wavelength)**2-self.fxlin[naxis, :]**2-self.fylin[:, naxis]**2)**0.5
self.normalization()
self.sourceGen()
self.WOTFGen()
def normalization(self):
'''
Normalize the 3D intensity stacks by their average illumination intensities, and subtract the mean.
'''
self.dpc_imgs /= np.mean(self.dpc_imgs, axis=(0, 1, 2), keepdims=True)
self.dpc_imgs -= 1.0
def sourceGen(self):
'''
Generate DPC source patterns based on the rotation angles and numerical aperture of the illuminations.
'''
self.source = []
pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na, na_in=self.na_in)
for rot_index in range(self.dpc_num):
self.source.append(np.zeros((self.dpc_imgs.shape[:2]), dtype='float32'))
rotdegree = self.rotation[rot_index]
if rotdegree < 180:
self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15>=
self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = 1.0
self.source[-1] *= pupil
else:
self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15<
self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = -1.0
self.source[-1] *= pupil
self.source[-1] += pupil
self.source = np.asarray(self.source)
def sourceFlip(self, source):
'''
Flip the sources in vertical and horizontal directions, since the coordinates of the source plane and the pupil plane are opposite.
'''
source_flip = np.fft.fftshift(source)
source_flip = source_flip[::-1, ::-1]
if np.mod(source_flip.shape[0], 2)==0:
source_flip = np.roll(source_flip, 1, axis=0)
if
|
np.mod(source_flip.shape[1], 2)
|
numpy.mod
|
# Copyright 2018 <NAME>. All rights reserved.
#
# Licensed under the MIT license
"""
Script for panels of Figure 1 (Zebrafish model training, evolution and navigation)
"""
import core as c
import analysis as a
from global_defs import GlobalDefs
import os
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as pl
import numpy as np
import h5py
from data_stores import SimulationStore
from mo_types import MoTypes
from pandas import DataFrame
from scipy.stats import wilcoxon
# file definitions
base_path = "./model_data/Adam_1e-4/sepInput_mixTrain/"
paths_1024 = [f+'/' for f in os.listdir(base_path) if "_3m1024_" in f]
paths_512 = [f+'/' for f in os.listdir(base_path) if "_3m512_" in f]
paths_256 = [f+'/' for f in os.listdir(base_path) if "_3m256_" in f]
def test_loss(path):
fname = base_path + path + "losses.hdf5"
lossfile = h5py.File(fname, "r")
test_losses = np.array(lossfile["test_losses"])
rank_errors = np.array(lossfile["test_rank_errors"])
timepoints = np.array(lossfile["test_eval"])
return timepoints, test_losses, rank_errors
def ev_path(path):
return base_path + path + "evolve/"
def mpath(path):
return base_path + path[:-1] # need to remove trailing slash
def get_bout_starts(pos: np.ndarray) -> np.ndarray:
"""
Extract bout starts from network position trace
:param pos: nx3 trace of x, y, angle at each timepoint
:return: Array of indices corresponding to bout starts
"""
spd = np.r_[0, np.sqrt(np.sum(np.diff(pos[:, :2], axis=0) ** 2, 1))] # speed
bs = np.r_[0, np.diff(spd) > 0.00098] # bout starts
return bs
def get_bout_da(pos: np.ndarray, starts: np.ndarray) -> np.ndarray:
"""
For each bout indicated by starts get the angle turned
:param pos: nx3 trace of x, y, angle at each timepoint
:param starts: Array of indices corresponding to bout starts
:return: For each bout in starts the turning angle
"""
starts = np.arange(pos.shape[0])[starts.astype(bool)]
ix_pre = starts - 10
ix_pre[ix_pre < 0] = 0
ix_post = starts + 10
ix_post[ix_post >= pos.shape[0]] = pos.shape[0]-1
da = pos[ix_post, 2] - pos[ix_pre, 2]
return da
def compute_gradient_bout_frequency(model_path, drop_list=None):
def bout_freq(pos: np.ndarray):
r = np.sqrt(np.sum(pos[:, :2]**2, 1)) # radial position
bs = get_bout_starts(pos) # bout starts
bins = np.linspace(0, GlobalDefs.circle_sim_params["radius"], 6)
bcenters = bins[:-1] +
|
np.diff(bins)
|
numpy.diff
|
from scipy.ndimage.filters import convolve
from scipy.sparse import lil_matrix, block_diag
from scipy.sparse.linalg import spsolve
from scipy.ndimage.morphology import distance_transform_edt
from scipy import ndimage, fft
from tqdm import tqdm
import numpy as np
import cv2
# useful kernels
four_neighbors_kernel = [[0, 1, 0],
[1, 0, 1],
[0, 1, 0]]
four_neighbors_kernel_with_center = [[0, 1, 0],
[1, 1, 1],
[0, 1, 0]]
basic_vector_field_kernel = [[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]]
def get_4_neigbours_amount(img):
"""
convolves img in order to calculate the 4-connected neighbors which are in S
:param img: float np 2d array
:return: 2d np array equals |N_p| in the pixel (2d-coords) p
"""
ones = np.ones_like(img)
return convolve(ones, four_neighbors_kernel, mode='constant', cval=0.0)
def get_omega_boundary(img):
"""
dialates img and take diff between original img and dilated img
:param img: equals the mask float np 2d array
:return:
"""
dilated = ndimage.binary_dilation(img, structure=four_neighbors_kernel_with_center).astype(np.float64)
return dilated - img
def get_basic_vector_field(img):
"""
calculate sum of v_pq in 4-connected components as defined in term (11) of the paper
:param img: float np 2d array
:param mask: float np 2d array
:return: 2d array where in every entry it has the summation result
"""
tmp = convolve(img, basic_vector_field_kernel, mode='constant', cval=0.0)
return tmp
def make_identity_off_mask(mask, mat, y_range, x_range):
"""
:param mask: binary mask defining f function
:param mat: sparse matrix of the left hand side equation system
:param y_range: obtained from apply_offset
:param x_range: obtained from apply_offset
:return:
"""
for y in range(1, y_range - 1):
for x in range(1, x_range - 1):
if mask[y, x] == 0:
ind = x + y * x_range
mat[ind, ind] = 1
mat[ind, ind + 1] = 0
mat[ind, ind - 1] = 0
mat[ind, ind + x_range] = 0
mat[ind, ind - x_range] = 0
def apply_offset(offset, source, target, mask):
"""
Warp source according to offset.
:param offset:
:param source:
:param target:
:param mask:
:return:
"""
y_max, x_max = target.shape[:2]
y_min, x_min = 0, 0
x_range = x_max - x_min
y_range = y_max - y_min
M = np.float64([[1, 0, offset[0]], [0, 1, offset[1]]])
warped_source = cv2.warpAffine(source, M, (x_range, y_range))
mask = mask[y_min:y_max, x_min:x_max]
return warped_source, mask, y_max, x_max, y_min, x_min, x_range, y_range
def get_laplacian_mat(n, m):
"""
taken from Git *** https://github.com/PPPW/poisson-image-editing
Generate the Poisson matrix.
Refer to:
https://en.wikipedia.org/wiki/Discrete_Poisson_equation
Note: it's the transpose of the wiki's matrix
"""
mat_D = lil_matrix((m, m))
mat_D.setdiag(-1, -1)
mat_D.setdiag(4)
mat_D.setdiag(-1, 1)
mat_A = block_diag([mat_D] * n).tolil()
mat_A.setdiag(-1, 1 * m)
mat_A.setdiag(-1, -1 * m)
return mat_A
def get_grad_magnitude(img):
"""
returns the magnitude in float 64
:param img:
:return:
"""
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
mag = sobelx**2 + sobely**2
return mag
def grad_vector_field_12(source, target):
"""
corresponding to vector field from equation (12) in the paper.
:param source:
:param target:
:return:
"""
grad_g = get_grad_magnitude(source)
grad_f_star = get_grad_magnitude(target)
cond = np.abs(grad_f_star) > np.abs(grad_g)
eq_right = np.where(cond, target, source)
return eq_right.flatten()
def seamless_cloning_single_channel(source, target, mask, offset, gradient_field_source_only, vec_field):
"""
:param vec_field:
:param source:
:param target:
:param mask:
:param offset:
:param gradient_field_source_only:
:return:
"""
source, mask, y_max, x_max, y_min, x_min, x_range, y_range = apply_offset(offset, source, target, mask)
laplacian = get_laplacian_mat(y_range, x_range)
flat_source = source[y_min:y_max, x_min:x_max].flatten()
flat_target = target[y_min:y_max, x_min:x_max].flatten()
flat_mask = mask.flatten()
eq_left_sys = laplacian.tocsc()
if gradient_field_source_only:
# inside f
eq_right = laplacian.dot(flat_source)
else:
# process using a special vector field.
vec_field = grad_vector_field_12 if vec_field is None else vec_field
eq_right = vec_field(source, target)
eq_right = laplacian.dot(eq_right)
flat_eq_right = eq_right.flatten()
# outside f
flat_eq_right[flat_mask == 0] = flat_target[flat_mask == 0]
make_identity_off_mask(mask, eq_left_sys, y_range, x_range)
s = spsolve(eq_left_sys, flat_eq_right).astype(np.float64)
# reconstruct image
blend = s.reshape(target.shape)
blend = (blend.clip(0, 1) * 255).astype('uint8')
return blend
def seamless_cloning(source, target, mask, offset=(0, 0), gradient_field_source_only=True, vec_field=None):
"""
Based on Poisson solver
:param vec_field:
:param source:
:param target:
:param mask:
:param offset:
:param gradient_field_source_only:
:return:
"""
mask = mask > 0.1
mask = mask.astype('uint8')
result =
|
np.zeros_like(target, dtype='uint8')
|
numpy.zeros_like
|
import numpy as np
import torch
import math
import cv2
from ....utils import box_utils
from ....utils.center_utils import draw_umich_gaussian, gaussian_radius, draw_seg_mask
class CenterTargetAssigner(object):
def __init__(self, model_cfg, voxel_size, point_cloud_range, class_names):
super().__init__()
target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG
self.class_names = np.array(class_names)
self.gaussian_minoverlap = target_cfg.GAUSSIAN_MINOVERLAP
self.gaussian_minradius = target_cfg.GAUSSIAN_MINRADIUS
self.feature_map_stride = target_cfg.FEATURE_MAP_STRIDE
self.max_objs = target_cfg.MAX_OBJS
self.point_cloud_range = point_cloud_range
self.voxel_size = np.array(voxel_size)
def assign_targets(self, gt_boxes_with_classes):
"""
Args:
gt_boxes_with_classes: (B, M, 8) [x,y,z,dimx(l),dimy(w),dimz(h),rot,cls]
Return:
hm_target: (B, n_dim(=num_class), mapsizey, mapsizex)
anno_box_target: (B, max_objs, 7)
ind_target: (B, max_objs, )
mask: (B, max_objs, )
batch_gtboxes_src: (B, M, 8)
"""
batch_gtboxes_src = gt_boxes_with_classes.clone()
target_device = gt_boxes_with_classes.device
# move to cpu
gt_boxes_with_classes_np = gt_boxes_with_classes.cpu().numpy()
batch_size = gt_boxes_with_classes_np.shape[0]
gt_classes = gt_boxes_with_classes_np[:, :, -1]
gt_boxes = gt_boxes_with_classes_np[:, :, :-1]
target_list = []
for k in range(batch_size):
cur_gt = gt_boxes[k]
cnt = cur_gt.__len__() - 1
while cnt > 0 and cur_gt[cnt].sum() == 0:
cnt -= 1
cur_gt = cur_gt[:cnt + 1]
# cur_gt_classes = gt_classes[k][:cnt + 1].int()
cur_gt_classes = (gt_classes[k][:cnt + 1]).astype(np.int8)
single_target = self.assign_target_maps_single(
gt_boxes=cur_gt,
gt_classes=cur_gt_classes,
num_classes=self.class_names.shape[0],
max_objs=self.max_objs,
gaussian_minoverlap=self.gaussian_minoverlap,
gaussian_minradius=self.gaussian_minradius,
point_cloud_range=self.point_cloud_range,
feature_map_stride=self.feature_map_stride,
voxel_size=self.voxel_size
)
target_list.append(single_target)
# stack to batch format
target_dict = {
'hm_target': torch.from_numpy( np.stack( [t['hm'] for t in target_list], axis=0) ).to(target_device),
'anno_box_target': torch.from_numpy( np.stack( [t['anno_box'] for t in target_list], axis=0) ).to(target_device),
'ind_target': torch.from_numpy( np.stack( [t['ind'] for t in target_list], axis=0) ).to(target_device),
'mask_target': torch.from_numpy( np.stack( [t['mask'] for t in target_list], axis=0) ).to(target_device),
'segm_target': torch.from_numpy( np.stack( [t['segm'] for t in target_list], axis=0) ).to(target_device),
'height_target': torch.from_numpy( np.stack( [t['height'] for t in target_list], axis=0) ).to(target_device),
'src_box_target': torch.from_numpy( np.stack( [t['src_box'] for t in target_list], axis=0) ).to(target_device),
'xsys_target': torch.from_numpy( np.stack( [t['xsys'] for t in target_list], axis=0) ).to(target_device),
'batch_gtboxes_src': batch_gtboxes_src,
}
# move to gpu
# target_dict['hm_target'] = torch.from_numpy(target_dict['hm_target'], device=target_device)
# target_dict['anno_box_target'] = torch.from_numpy(target_dict['anno_box_target'], device=target_device)
# target_dict['ind_target'] = torch.from_numpy(target_dict['ind_target'], device=target_device)
# target_dict['mask_target'] = torch.from_numpy(target_dict['mask_target'], device=target_device)
return target_dict
def assign_target_maps_single(self, gt_boxes, gt_classes,
num_classes, max_objs,
gaussian_minoverlap, gaussian_minradius,
point_cloud_range, feature_map_stride, voxel_size):
'''
Args:
point_cloud_range: [ 0. -40. -3. 70.4 40. 1. ], dtype是dtype=np.float32!!!
mapsize: (200, 176) for kitti
Return:
'''
# print('==> point_cloud_range: ', point_cloud_range)
# print('==> voxel_size: ', voxel_size)
feature_map_sizey = np.round( ((point_cloud_range[4] - point_cloud_range[1]) / voxel_size[1] / feature_map_stride) ).astype(np.int64) # size_y(img_h), should be 200
feature_map_sizex = np.round( ((point_cloud_range[3] - point_cloud_range[0]) / voxel_size[0] / feature_map_stride) ).astype(np.int64) # size_x(img_w), should be 176
# print('==> feature_map_sizey: ', feature_map_sizey)
# print('==> feature_map_sizex: ', feature_map_sizex)
hm = np.zeros((num_classes, feature_map_sizey, feature_map_sizex), dtype=np.float32)
ind = np.zeros((max_objs), dtype=np.int64)
mask = np.zeros((max_objs), dtype=np.uint8)
anno_box = np.zeros((max_objs, 7), dtype=np.float32)
segm = np.zeros((1, feature_map_sizey, feature_map_sizex), dtype=np.float32)
height =
|
np.zeros((1, feature_map_sizey, feature_map_sizex), dtype=np.float32)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 12 09:40:22 2019
@author: TaiT_
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def computeRMSE(y, y_predict):
Zigma =
|
np.square(y_predict - y)
|
numpy.square
|
import unittest
from unittest import TestCase
import numpy as np
class TestUncertaintyCharacteristicsCurve(TestCase):
def _generate_mock_data(self):
errors = [1.3642327303743222, 1.90992286562998, 1.3376553033984742, 1.1360514041212681, 1.0059398687783236,
0.6562763187757668, 0.583628028840792, 0.6876683476085894, 1.0506101454179664, 0.795072119831687,
1.3275374841578582, 2.4458894373634283, 2.909916525881682, 2.837773991026335, 2.550841867998461]
pred = [96.98915353045395, 96.23007611746924, 97.59180156001409, 98.50349438071208, 97.58943114819733,
97.90442496968824, 97.76155157998329, 96.83695266121595, 96.33141125146022, 96.67439930495053,
95.17170073977303, 92.64549009869268, 94.29582588015835, 96.04923654039105, 97.50696433632777]
actual = [99.02260232131948, 98.61111111111111, 98.03439803439804, 98.05194805194805, 97.47126436781609,
97.27272727272727, 96.75675675675676, 96.328125, 95.68627450980392, 95.0, 94.48051948051948,
94.78260869565217, 93.75433726578764, 93.59375, 76.30066780555987]
X = np.zeros([len(errors), 3])
X[:, 0] = pred
X[:, 1] = errors
X[:, 2] = errors
return X, actual
def test_set_coordinates(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
o = ucc()
o.set_coordinates(x_axis_name='missrate', y_axis_name='bandwidth', normalize=True)
assert(o.norm_x_axis==False and o.norm_y_axis==True)
assert(o.x_axis_idx==o.axes_name2idx['missrate'])
assert(o.y_axis_idx==o.axes_name2idx['bandwidth'])
o.set_coordinates(x_axis_name='bandwidth', y_axis_name='missrate', normalize=True)
assert(o.norm_x_axis==True and o.norm_y_axis==False)
assert(o.x_axis_idx==o.axes_name2idx['bandwidth'])
assert(o.y_axis_idx==o.axes_name2idx['missrate'])
o.set_coordinates(x_axis_name='excess', y_axis_name='deficit', normalize=False)
assert(o.norm_x_axis==False and o.norm_y_axis==False)
assert(o.x_axis_idx==o.axes_name2idx['excess'])
assert(o.y_axis_idx==o.axes_name2idx['deficit'])
def test_set_std_unit(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
o = ucc()
self.assertRaises(ValueError, o.set_std_unit)
X, gt = self._generate_mock_data()
o.fit(X, gt)
o.set_std_unit()
assert (np.isclose(np.std(gt), o.std_unit))
def test_fit(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
X, gt = self._generate_mock_data()
o = ucc()
o.fit(X, gt)
assert (len(o.d) == 1)
assert (all(np.isclose(o.d[0], gt - X[:, 0])))
assert (all(np.isclose(o.lb[0], X[:, 1])))
o.fit([X, X], gt)
assert (len(o.d) == 2)
def test__sanitize_input(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
from copy import deepcopy
X, gt = self._generate_mock_data()
o = ucc()
x = deepcopy(X)
x[:, 1:] = 0.
self.assertRaises(ValueError, o._sanitize_input, x)
x = deepcopy(X)
x[0:2, 1] = 0.
x[2:4, 2] = 0.
x = o._sanitize_input(x)
assert all(x[0:2, 1] != 0)
assert all(x[2:4, 2] != 0)
def test__calc_missrate_bandwidth_excess_deficit(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
X, gt = self._generate_mock_data()
o = ucc()
d = X[:, 0] - gt
lb = X[:, 1]
ub = X[:, 2]
m, b, e, df = o._calc_missrate_bandwidth_excess_deficit(d, lb, ub)
assert (np.isclose(m, 0.333333) and np.isclose(b, 1.506601) and np.isclose(e, 0.451471) and np.isclose(df,
1.406418))
def test__calc_plotdata(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
X, gt = self._generate_mock_data()
o = ucc()
d = X[:, 0] - gt
lb = X[:, 1]
ub = X[:, 2]
pd = o._calc_plotdata(d, lb, ub)
assert (len(pd) == 15)
assert (all(np.isclose(pd[-1], (8.313450079681967, 0.0, 12.525053001149491, 10.06350492529685, 0.0))))
pd = o._calc_plotdata(d, lb, ub, vary_bias=True)
assert (len(pd) == 15)
assert (all(np.isclose(pd[-1], (18.65545466276944, 0.0, 20.162055758716438, 17.70050768286379, 0.0))))
def test_get_AUUCC_OP(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
X, gt = self._generate_mock_data()
o = ucc()
o.fit(X, gt)
assert (np.isclose(o.get_AUUCC(), 0.14510967778953665))
assert (np.isclose(o.get_AUUCC(vary_bias=True), 0.1818591455455332))
d = X[:, 0] - gt
lb = X[:, 1]
ub = X[:, 2]
op = o._get_single_OP(d, lb, ub)
assert (all(np.isclose(op, (0.08553963270429753, 0.33333333333333337, 5.2779216043654325, 1.0))))
op = o._get_single_OP(d, lb, ub, 2.0, 0.5)
assert (all(np.isclose(op, (0.4847795200753158, 0.06666666666666665, 5.2779216043654325, 1.0))))
o.set_coordinates('excess', 'deficit', normalize=True)
op = o._get_single_OP(d, lb, ub)
assert (all(np.isclose(op, (0.08553963270429753, 0.26647202456017466, 5.2779216043654325, 5.2779216043654325))))
o.set_coordinates('excess', 'deficit', normalize=False)
op = o._get_single_OP(d, lb, ub)
assert (all(np.isclose(op, (0.45147147547949584, 1.406418455385142, 1.0, 1.0))))
def test_get_partial_AUUCC(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
X, gt = self._generate_mock_data()
o = ucc()
o.set_coordinates(x_axis_name='bandwidth', y_axis_name='missrate', normalize=True)
o.fit(X, gt)
assert (np.isclose(o.get_AUUCC(partial_y=(0.,0.1)), .07003588145613951))
assert (np.isclose(o.get_AUUCC(partial_x=(0.,.5)), 0.21031874267804815))
assert (np.isclose(o.get_AUUCC(), o.get_AUUCC(partial_y=(0.,1.))))
assert (np.isclose(o.get_AUUCC(), o.get_AUUCC(partial_x=(0.,1000000.))))
self.assertRaises(ValueError, o.get_AUUCC, partial_x=(0., 1.), partial_y=(0., 1.))
# assert (np.isclose(o.get_AUUCC(vary_bias=True), 0.1818591455455332))
def test_minimize_cost_and_get_recipe(self):
from uq360.metrics.uncertainty_characteristics_curve.uncertainty_characteristics_curve import UncertaintyCharacteristicsCurve as ucc
from copy import deepcopy
X, gt = self._generate_mock_data()
o = ucc()
o.fit(X, gt)
C = o.minimize_cost(x_axis_cost=1.0, y_axis_cost=10., augment_cost_by_normfactor=False)
assert(C['operation']=='bias' and np.isclose(C['cost'], 1.7761220370565665) and np.isclose(C['modvalue'], 0.8793271851188393))
Cn = o.minimize_cost(x_axis_cost=1.0, y_axis_cost=10., augment_cost_by_normfactor=True)
# Cn should have different cost value but same optimum coordinates
assert(Cn['modvalue']==C['modvalue'] and Cn['new_x']==C['new_x'] and Cn['new_y']==C['new_y']
and Cn['operation']==C['operation'])
X2 = deepcopy(X)
X2[:,1:] = X[:,1:] + C['modvalue']
o2 = ucc()
o2.fit(X2, gt)
C2 = o2.minimize_cost(x_axis_cost=1.0, y_axis_cost=10., augment_cost_by_normfactor=False)
assert(np.isclose(C2['cost'], C2['original_cost']))
r = o2.get_specific_operating_point(req_x_axis_value=C2['new_x'], vary_bias=True)
assert(np.isclose(r['new_y'], C2['new_y']))
r2 = o2.get_specific_operating_point(req_x_axis_value=C2['new_x'], vary_bias=False)
assert(r2['new_y'] >= r['new_y']) # scale ucc happens to have higher cost
r = o2.get_specific_operating_point(req_y_axis_value=C2['new_y'], vary_bias=True)
assert(r['new_x']<=C2['new_x']) # if multiple x's for a y, the lowest is returned
r2 = o2.get_specific_operating_point(req_y_axis_value=C2['new_y'], vary_bias=False)
assert(np.isclose(r2['new_x'], r['new_x'])) # x points should be the same
assert(np.isclose(r['modvalue'], 0.,) and np.isclose(r2['modvalue'], 1.))
op = o2.get_OP()
assert(np.isclose(op[0] * op[2] * 1. + op[1] * op[3] * 10., C2['original_cost']))
# test normalization
o2.set_coordinates(normalize=False)
r3 = o2.get_specific_operating_point(req_y_axis_value=C2['new_y'])
assert(
|
np.isclose(r3['new_x'] / o2.std_unit, r2['new_x'])
|
numpy.isclose
|
# routines for assessing RVs from pipeline
import os
import copy
import glob
import pdb
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import esutil
import pickle
import yaml
from astropy.io import fits
from apogee.utils import apload
from apogee.utils import applot
from apogee.utils import bitmask
from apogee.utils import spectra
from apogee.aspcap import norm
from tools import plots
from tools import html
from tools import match
from tools import struct
from sdss import yanny
from scipy import interpolate
from scipy.signal import correlate
from scipy.ndimage.filters import median_filter, gaussian_filter
colors=['r','g','b','c','m','y','k']
chips=['a','b','c']
def allField(files=['apo*/*/apField-*.fits','apo*/*/apFieldC-*.fits','lco*/*/apField-*.fits'],out='allField.fits',verbose=False) :
""" Concatenate set of apField files
"""
# concatenate the structures
all=struct.concat(files,verbose=verbose)
# write out the file
if out is not None:
print('writing',out)
struct.wrfits(all,out)
return all
def allFieldVisits(files=['apo*/*/apFieldVisits-*.fits','apo*/*/apFieldC-*.fits','lco*/*/apFieldVisits-*.fits'],out='allFieldVisits.fits',verbose=False) :
""" Concatenate set of apFieldVisit files
"""
# concatenate the structures
all=struct.concat(files,verbose=verbose)
# write out the file
if out is not None:
print('writing',out)
struct.wrfits(all,out)
return all
def vscat(a,fig=None,ls=None,marker='o',nmin=2,mhmin=-3,density=False,out=None) :
""" Make histograms of VSCATTER for different bins of Teff H], given min NVISITS, and min [M/H]
"""
if fig == None : fig,ax=plots.multi(4,6,hspace=0.001,wspace=0.4,figsize=(12,8))
else : fig,ax=fig
tbins=[3000,3500,4000,4500,5500,8000,30000]
hbins=[8,11,12,13,15]
try: snr = a['SNREV']
except: snr=a['SNR']
j=np.where(snr > 300) [0]
snr[j] = 300
for i in range(len(tbins)-1) :
ax[i,0].text(0.9,0.9,'{:d}<=RV_TEFF<{:d}'.format(tbins[i],tbins[i+1]),ha='right',transform=ax[i,0].transAxes,fontsize=8)
for j in range(len(hbins)-1) :
ax[0,j].set_title('{:d}<=H<{:d}'.format(hbins[j],hbins[j+1]))
gd = np.where((a['RV_TEFF']>=tbins[i]) & (a['RV_TEFF']<tbins[i+1]) &
(a['H']>=hbins[j]) & (a['H']<hbins[j+1]) &
(a['NVISITS']>nmin) & (a['RV_FEH']>mhmin) & (a['VSCATTER'] > 0)) [0]
print(tbins[i],tbins[i+1],hbins[j],hbins[j+1],nmin,len(gd))
try :
#plots.plotc(ax[i,2],snr[gd],a['VSCATTER'][gd],a['RV_FEH'][gd],marker=marker,xr=[0,310],yr=[0,1],xt='S/N',yt='VSCATTER')
ax[i,j].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),ls=ls,histtype='step',color=colors[j],normed=density)
ax[i,j].set_xlabel('VSCATTER (km/s)')
ax[i,j].plot([0.1,0.1],ax[i,j].get_ylim())
#ax[i,1].hist(a['VSCATTER'][gd],bins=np.arange(0,1,0.01),histtype='step',cumulative=True,normed=True,ls=ls,color=colors[j])
#ax[i,1].set_xlabel('VSCATTER')
except : pass
if out is not None :
fig.savefig(out+'.png')
plt.close()
fig.suptitle('NVISITS>{:d} [M/H]>{:6.2f}'.format(nmin,mhmin))
return fig,ax
def apolco(a,minfeh=-3,out=None) :
""" VSCATTER histograms for APO vs LCO
"""
apo=np.where((a['TELESCOPE'] == 'apo25m') & (a['RV_FEH']>minfeh) )[0]
fig=vscat(a[apo],marker='o',density=True)
lco=np.where((a['TELESCOPE'] == 'lco25m') & (a['RV_FEH']>minfeh) )[0]
vscat(a[lco],fig=fig,ls=':',marker='+',density=True)
if out is not None :
fig[0].savefig(out+'_1.png')
plt.close()
i1,i2=match.match(a['APOGEE_ID'][apo],a['APOGEE_ID'][lco])
print('matched {:d} stars'.format(len(i1)))
fig,ax=plots.multi(1,2)
#plots.plotp(ax[0,0],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-3,3],yt=r'$\Delta$ VHELIO_AVG',xt='S/N')
#plots.plotp(ax[0,1],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-50,50],yt=r'$\Delta$ VHELIO_AVG',xt='S/N')
#plots.plotp(ax[1,0],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-0.5,0.5],yt=r'$\Delta$ VSCATTER',xt='S/N')
#plots.plotp(ax[1,1],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-5,5],yt=r'$\Delta$ VSCATTER',xt='S/N')
ax[0].hist(a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],bins=np.arange(-0.5,0.5,0.02),histtype='step')
ax[0].set_xlabel(r'$\Delta$ VHELIO_AVG')
ax[1].hist(a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],bins=np.arange(-0.25,0.25,0.01),histtype='step')
ax[1].set_xlabel(r'$\Delta$ VSCATTER')
if out is not None :
fig.savefig(out+'_2.png')
plt.close()
def comp(a,b,av=None,bv=None,domatch=True,out=None) :
""" VSCATTER comparison of two different data sets
"""
if domatch :
i1,i2=match.match(a['APOGEE_ID'],b['APOGEE_ID'])
gd = np.where(a['NVISITS'][i1] == b['NVISITS'][i2])[0]
a=a[i1[gd]]
b=b[i2[gd]]
fig = vscat(a)
vscat(b,fig=fig,ls=':')
if out is not None :
fig[0].savefig(out+'_1.png')
plt.close()
if domatch :
fig,ax=plots.multi(1,2)
#plots.plotp(ax[0,0],a['SNR'],a['VHELIO_AVG']-b['VHELIO_AVG'],yr=[-3,3],yt=r'$\Delta$ VHELIO_AVG')
#plots.plotp(ax[0,1],a['SNR'],a['VHELIO_AVG']-b['VHELIO_AVG'],yr=[-50,50],yt=r'$\Delta$ VHELIO_AVG')
#plots.plotp(ax[1,0],a['SNR'],a['VSCATTER']-b['VSCATTER'],yr=[-0.5,0.5],yt=r'$\Delta$ VSCATTER')
#plots.plotp(ax[1,1],a['SNR'],a['VSCATTER']-b['VSCATTER'],yr=[-5,5],yt=r'$\Delta$ VSCATTER')
ax[0].hist(a['VHELIO_AVG']-b['VHELIO_AVG'],bins=np.arange(-0.5,0.5,0.02),histtype='step')
ax[0].set_xlabel(r'$\Delta$ VHELIO_AVG')
ax[1].hist(a['VSCATTER']-b['VSCATTER'],bins=np.arange(-0.5,0.5,0.02),histtype='step')
ax[1].set_xlabel(r'$\Delta$ VSCATTER')
if out is not None :
fig.savefig(out+'_2.png')
plt.close()
return a,b
def visitsum_tel(all) :
j=np.where(all['TELESCOPE'] == 'apo25m')[0]
apo= all[j]
j=np.where(all['TELESCOPE'] == 'lco25m')[0]
lco= all[j]
apoobjs = np.array(list(set(apo['APOGEE_ID'])))
lcoobjs = np.array(list(set(lco['APOGEE_ID'])))
i1,i2=match.match(apoobjs,lcoobjs)
vhelio = []
vscat = []
verr = []
sigfiber = []
vdiff = []
n = []
mjd = []
tel = []
for i in i1 :
j=np.where(all['APOGEE_ID'] == apoobjs[i])[0]
vhelio.append(all['VHELIO'][j].mean())
vscat.append(all['VHELIO'][j].std())
verr.append(all['VRELERR'][j].max())
sigfiber.append(all['FIBERID'][j].std())
vdiff.extend(all['VHELIO'][j]-all['VHELIO'][j].mean())
mjd.extend(all['MJD'][j])
tel.extend(all['TELESCOPE'][j])
n.append(len(j))
fig,ax=plots.multi(1,2)
mjd=np.array(mjd)
tel=np.array(tel)
vdiff=np.array(vdiff)
plots.plotp(ax[0],mjd,vdiff,typeref=tel,types=['apo25m','lco25m'],color=['b','g'],yr=[-1,1])
j=np.where(tel == 'apo25m')[0]
ax[1].hist(vdiff[j],color='b',bins=np.arange(-1,1,0.01),histtype='step')
mjds= [55800, 56130, 56512, 56876, 57230, 57600, 57966, 58360]
for i in range(len(mjds)-1) :
j=np.where((tel == 'apo25m') & (mjd >mjds[i]) & (mjd<mjds[i+1]) )[0]
print(mjds[i],len(j))
ax[1].hist(vdiff[j],bins=np.arange(-1,1,0.03),histtype='step')
j=np.where(tel == 'lco25m')[0]
ax[1].hist(vdiff[j],color='g',bins=np.arange(-1,1,0.01),histtype='step')
mjds= [57829, 57966, 58360]
plt.show()
pdb.set_trace()
def visitsum(all,out=None,minvisit=1) :
objs = set(all['APOGEE_ID'])
if out is None :
vhelio = []
vscat = []
verr = []
sigfiber = []
vdiff = []
n = []
print('n objects: ', len(objs))
for iobj,obj in enumerate(objs) :
j = np.where(all['APOGEE_ID'] == obj)[0]
print(iobj,len(j))
vhelio.append(all['VHELIO'][j].mean())
vscat.append(all['VHELIO'][j].std())
verr.append(all['VRELERR'][j].max())
sigfiber.append(all['FIBERID'][j].std())
vdiff.extend(all['VHELIO'][j]-all['VHELIO'][j].mean())
n.append(len(j))
vhelio=np.array(vhelio)
vscat=np.array(vscat)
verr=np.array(verr)
sigfiber=np.array(sigfiber)
vdiff=np.array(vdiff)
n=np.array(n)
else :
vhelio,vscat,verr,sigfiber,vdiff,n = out
vdiff=np.array(vdiff)
fig,ax=plots.multi(2,3)
gd = np.where(n>minvisit)[0]
ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='b')
ax[2,0].hist(vdiff,color='b',bins=np.arange(-1.,1,0.01),histtype='step')
gd=np.where((n>minvisit) & (verr < 0.2))[0]
ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='g')
ax[2,1].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',color='g')
fig.tight_layout()
plt.show()
return vhelio,vscat,verr,sigfiber,vdiff,n
def field(name,dr14=False,dir='./',minvisit=1) :
""" look at a single field
"""
all=struct.concat([dir+'/apVisitSum*.fits'])
if name == 'M67' : locid=[os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/4162//apVisitSum*']
elif name == 'N188' : locid=[os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/4217//apVisitSum*',
os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/5067//apVisitSum*']
alldr14=struct.concat(locid)
objs = set(all['APOGEE_ID'])
vhelio = []
vscat = []
verr = []
sigfiber = []
vdiff = []
n = []
dr14vhelio = []
dr14vscat = []
dr14sigfiber = []
dr14n = []
dr14vdiff = []
for obj in objs :
j = np.where(all['APOGEE_ID'] == obj)[0]
vhelio.append(all['VHELIO'][j].mean())
vscat.append(all['VHELIO'][j].std())
verr.append(all['VRELERR'][j].max())
sigfiber.append(all['FIBERID'][j].std())
vdiff.extend(all['VHELIO'][j]-all['VHELIO'][j].mean())
n.append(len(j))
#print(all['MJD'][j],all['VHELIO'][j])
j = np.where(alldr14['APOGEE_ID'] == obj)[0]
dr14vhelio.append(alldr14['VHELIO'][j].mean())
dr14vscat.append(alldr14['VHELIO'][j].std())
dr14sigfiber.append(alldr14['FIBERID'][j].std())
dr14n.append(len(j))
dr14vdiff.extend(alldr14['VHELIO'][j]-alldr14['VHELIO'][j].mean())
#print(all['MJD'][j],all['VHELIO'][j],all['VRELERR'][j])
#print(alldr14['MJD'][j],alldr14['VHELIO'][j],alldr14['VRELERR'][j])
#pdb.set_trace()
vhelio=np.array(vhelio)
vscat=np.array(vscat)
verr=np.array(verr)
sigfiber=np.array(sigfiber)
n=np.array(n)
dr14vhelio=np.array(dr14vhelio)
dr14vscat=np.array(dr14vscat)
dr14sigfiber=np.array(dr14sigfiber)
dr14n=np.array(dr14n)
fig,ax=plots.multi(2,3)
gd =np.where(n > minvisit)[0]
ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='b')
ax[0,0].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='r')
gd=np.where((verr < 0.2) & (n>minvisit))[0]
ax[0,0].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='g')
ax[0,0].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',cumulative=True,normed=True,color='m')
ax[2,1].hist(vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',color='k')
ax[2,1].hist(dr14vscat[gd],bins=np.arange(0.01,1,0.01),histtype='step',color='r')
plots.plotc(ax[1,0],vhelio-dr14vhelio,vscat-dr14vscat,verr,xr=[-0.5,0.5],yr=[-0.3,0.3],zr=[0,0.15])
plots.plotc(ax[0,1],sigfiber,vscat-dr14vscat,verr,zr=[0,0.15],yr=[-0.3,0.3])
plots.plotc(ax[1,1],vscat,vscat-dr14vscat,verr,zr=[0,0.15],yr=[-0.3,0.3],xr=[0,0.5])
ax[2,0].hist(vdiff,color='b',bins=np.arange(-1.,1,0.01),histtype='step')
ax[2,0].hist(dr14vdiff,color='r',bins=np.arange(-1.,1,0.01),histtype='step')
fig.tight_layout()
plt.show()
def visitcomp(plate,mjd,indiv=False,apred='test') :
""" Compare RVs for plate/mjd with DR14 RVs
"""
#plt.close('all')
load=apload.ApLoad(apred=apred)
a=load.apVisitSum(plate,mjd)[1].data
#dr14=apload.ApLoad(dr='dr14')
p=yanny.yanny(os.environ['PLATELIST_DIR']+'/platePlans.par',np=True)
j=np.where(p['PLATEPLANS']['plateid'] == plate)[0][0]
locid=p['PLATEPLANS']['locationid'][j]
b=fits.open(os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/{:04d}/apVisitSum-{:04d}-{:05d}.fits'.format(
locid,plate,mjd))[1].data
fig,ax=plots.multi(2,2)
i1,i2=match.match(a['FIBERID'],b['FIBERID'])
plots.plotc(ax[0,0],a['FIBERID'][i1],a['VHELIO'][i1]-b['VHELIO'][i2],a['RV_TEFF'][i1],zr=[3500,5500],xt='Fiber',yt=r'$\Delta$ VHELIO')
plots.plotc(ax[0,1],a['FIBERID'][i1],a['VHELIO'][i1]-b['VHELIO'][i2],a['RV_TEFF'][i1],zr=[3500,5500],yr=[-2.,2.],xt='Fiber',yt=r'$\Delta$ VHELIO')
plots.plotc(ax[1,0],a['FIBERID'][i1],a['RV_TEFF'][i1]-b['RV_TEFF'][i2],a['RV_TEFF'][i1],zr=[3500,5500],xt='Fiber',yt=r'$\Delta$ RV_TEFF')
plots.plotc(ax[1,1],a['RV_TEFF'][i1]-b['RV_TEFF'][i2],a['VHELIO'][i1]-b['VHELIO'][i2],a['RV_TEFF'][i1],zr=[3500,5500],xt=r'$\Delta$ RV_TEFF',yt=r'$\Delta$ VHELIO')
out=load.filename('Plate',chips=True,plate=plate,mjd=mjd)
outdir=os.path.dirname(out)
outname=os.path.basename(out).replace('-a','').replace('.fits','_dr14comp.png')
fig.tight_layout()
pdb.set_trace()
print(outdir+'/plots/'+outname)
fig.savefig(outdir+'/plots/'+outname)
if indiv :
va=load.apPlate(plate,mjd)
vb={}
for chip in chips :
tmp=fits.open(os.environ['APOGEE_REDUX']+'/r8/apo25m/{:04d}/{:05d}/apPlate-{:s}-{:04d}-{:05d}.fits'.format(
plate,mjd,chip,plate,mjd))
vb[chip] = tmp
fig,ax=plots.multi(1,3,hspace=0.3)
pfig,pax=plots.multi(1,3,hspace=0.3)
wfig,wax=plots.multi(1,3,hspace=0.3)
for i in range(len(i1)) :
fiber = a['FIBERID'][i1[i]]
if (a['VHELIO'][i1[i]]-b['VHELIO'][i2[i]]) > 0.5 :
print(fiber,a['VHELIO'][i1[i]],b['VHELIO'][i2[i]],a['RV_TEFF'][i1[i]],b['RV_TEFF'][i2[i]])
applot.chip(va,ax=ax,row=300-fiber,color='r')
#applot.chip(va,ax=pax,row=300-fiber,color='r',pixel=True)
applot.chip(vb,ax=ax,row=300-fiber,color='b')
#applot.chip(vb,ax=pax,row=300-fiber,color='b',pixel=True)
for ichip,chip in enumerate(chips) :
pax[ichip].plot(va[chip][1].data[300-fiber,:]/vb[chip][1].data[300-fiber,:])
wax[ichip].plot(va[chip][4].data[300-fiber,:]-vb[chip][4].data[300-fiber,:])
plt.show()
pdb.set_trace()
for ichip in range(3) :
ax[ichip].cla()
pax[ichip].cla()
wax[ichip].cla()
plt.close()
def dr14comp(a,b,av,bv):
""" compare multiple field RVs from, e.g. allField file with DR14
"""
load=apload.ApLoad(apred='r11')
dr14=apload.ApLoad(dr='dr14')
i1,i2=match.match(a['APOGEE_ID'],b['APOGEE_ID'])
gd = np.where((a['NVISITS'][i1] == b['NVISITS'][i2]) & (a['SNR'][i1]>75) )[0]
a=a[i1[gd]]
b=b[i2[gd]]
j=np.argsort(a['VHELIO_AVG']-b['VHELIO_AVG'])
fig,ax=plots.multi(1,3,hspace=0.3)
pfig,pax=plots.multi(1,3,hspace=0.3)
wfig,wax=plots.multi(1,3,hspace=0.3)
chips=['a','b','c']
for jj in j :
j1=np.where(av['APOGEE_ID'] == a['APOGEE_ID'][jj])[0]
j2=np.where(bv['APOGEE_ID'] == a['APOGEE_ID'][jj])[0]
print(a['APOGEE_ID'][jj],a['RV_TEFF'][jj],b['RV_TEFF'][jj],a['SNR'][jj],b['SNR'][jj])
for jjj,kkk in zip(j1,j2) :
print(av['MJD'][jjj],av['PLATE'][jjj],av['FIELD'][jjj],av['SNR'][jjj],av['FIBERID'][jjj],av['VHELIO'][jjj],av['ESTVHELIO'][jjj])
print(bv['MJD'][kkk],bv['PLATE'][kkk],bv['FIELD'][kkk],bv['SNR'][kkk],bv['FIBERID'][kkk],bv['VHELIO'][kkk],bv['ESTVHELIO'][kkk])
va=load.apPlate(int(av['PLATE'][jjj]),av['MJD'][jjj])
vsum=load.apVisitSum(int(av['PLATE'][jjj]),av['MJD'][jjj])[1].data
f=np.where(vsum['FIBERID'] == av['FIBERID'][jjj])[0]
print(vsum['RV_TEFF'][f])
applot.chip(va,ax=ax,row=300-av['FIBERID'][jjj],color='r')
applot.chip(va,ax=pax,row=300-av['FIBERID'][jjj],color='r',pixel=True)
vb={}
for chip in chips :
tmp=fits.open(os.environ['APOGEE_REDUX']+'/r8/apo25m/{:04d}/{:05d}/apPlate-{:s}-{:04d}-{:05d}.fits'.format(
int(bv['PLATE'][kkk]),bv['MJD'][kkk],chip,int(bv['PLATE'][kkk]),bv['MJD'][kkk]))
vb[chip] = tmp
vsum=fits.open(os.environ['APOGEE_REDUX']+'/r8/fields/apo25m/{:04d}/apVisitSum-{:04d}-{:05d}.fits'.format(
int(bv['LOCATION_ID'][kkk]),int(bv['PLATE'][kkk]),bv['MJD'][kkk]))[1].data
f=np.where(vsum['FIBERID'] == bv['FIBERID'][kkk])[0]
print(vsum['RV_TEFF'][f])
applot.chip(vb,ax=ax,row=300-bv['FIBERID'][kkk],color='b')
applot.chip(vb,ax=pax,row=300-bv['FIBERID'][kkk],color='b',pixel=True)
for ichip,chip in enumerate(chips) :
wax[ichip].plot(va[chip][4].data[300-av['FIBERID'][jjj],:]-vb[chip][4].data[300-bv['FIBERID'][kkk],:])
plt.show()
pdb.set_trace()
for ichip in range(3) :
ax[ichip].cla()
pax[ichip].cla()
wax[ichip].cla()
def standards(a,out=None) :
""" Compare RVs to standards
"""
stan = fits.open(os.environ['APOGEE_DIR']+'/data/rv/rvstandards.fits')[1].data
h=esutil.htm.HTM()
m1,m2,rad=h.match(a['ra'],a['dec'],stan['ra'],stan['dec'],1./3600.,maxmatch=500)
fig,ax=plots.multi(1,1)
ax.hist(a['VHELIO_AVG'][m1]-stan['RV'][m2],histtype='step',bins=np.arange(-1,1,0.1))
ax.set_xlabel('RV(APOGEE) - RV(lit)')
if out is not None :
fig.savefig(out+'.png')
plt.close()
def all(a,name='DR16',dr='dr14') :
""" Do a series of RV comparisons for input data and previous DR
"""
grid=[]
xtit=[]
load=apload.ApLoad(dr=dr)
b=load.allStar()[1].data
# vscatter of new RVs
vscat(a,out='plots/vscat')
vscat(a,out='plots/vscat5',nmin=5)
grid.append(['vscat.png','vscat5.png'])
xtit.append(name+' : VSCATTER')
# APO/LCO comparison
apolco(a,out='plots/apolco')
grid.append(['apolco_1.png','apolco_2.png'])
xtit.append(name+' : APO (solid) and LCO (dotted), same stars')
#apolco(a,out='plots/apolco_nolowz',minfeh=-0.6)
#grid.append(['apolco_nolowz_1.png','apolco_nolowz_2'])
#xtit.append(name+', no low [Fe/H]: APO (solid) and LCO (dotted)')
# RV standards
standards(a,out='plots/rvstan')
grid.append(['rvstan.png',''])
xtit.append(name+', comparison with literature RVs')
# comparison with previous DR
comp(a,b,domatch=True,out='plots/drcomp')
grid.append(['drcomp_1.png','drcomp_2.png'])
xtit.append(name+', comparison with '+dr+' : same stars, same NVISITS, new(solid) '+dr+'(dotted)')
html.htmltab(grid,ytitle=xtit,file='plots/rv.html')
def visitspec(load,plate,mjd,fiber,gridfile='apg_rvsynthgrid',apstar=False) :
""" Crude beginnings of an RV routine
"""
grid = fits.open(os.environ['APOGEE_DIR']+'/data/synthgrid/'+gridfile+'.fits')
if gridfile == 'apg_rvsynthgrid' : hdu=1
elif gridfile == 'apg_rvsynthgrid_v2': hdu=0
elif apstar : hdu=2
else : hdu=1
gridspec=grid[hdu].data
gridwave = 10.**spectra.fits2vector(grid[hdu].header,2)
griderr = np.ones(gridspec.shape[0])
#for ispec in range(gridspec.shape[1]) :
# cont = norm.cont(gridspec[:,ispec],griderr)
# gridspec[:,ispec] /= cont
data = load.apVisit(plate,mjd,fiber)
# compare with DR14
comp(a,b,domatch=False,out='plots/dr14all')
grid.append(['dr14all_1.png',''])
xtit.append('all stars: DR14 (dotted) and test DR16 (solid)')
comp(a,b,domatch=True,out='plots/dr14match')
grid.append(['dr14match_1.png','dr14match_2.png'])
xtit.append('same stars: DR14 (dotted) and test DR16 (solid)')
# set bad pixels to nan
shape=data[1].data.shape
spec = copy.copy(data[1].data).flatten()
specerr = copy.copy(data[2].data)
specwave=data[4].data
pixmask=bitmask.PixelBitMask()
bd = np.where( ((data[3].data & pixmask.badval()) > 0) |
((data[3].data & pixmask.getval('SIG_SKYLINE')) > 0) ) [0]
spec[bd] = np.nan
spec = spec.reshape(shape)
# continuum normalize and sample to grid
outspec = np.full(len(gridwave),np.nan)
if not apstar :
# apVisit wavelengths are reversed
spec=np.flip(spec)
specwave=np.flip(specwave)
specerr=np.flip(specerr)
for ichip in range(3) :
cont = norm.cont(spec[ichip,:],specerr[ichip,:])
spec[ichip,:] /= cont
gd=np.where(np.isfinite(spec[ichip,:]))[0]
ip= interpolate.InterpolatedUnivariateSpline(specwave[ichip,gd],spec[ichip,gd],k=3)
out = ip(gridwave)
gd = np.where( (gridwave > specwave[ichip,0]) & (gridwave < specwave[ichip,-1]) )[0]
outspec[gd] = out[gd]
plt.plot(specwave[ichip,:],spec[ichip,:])
plt.plot(gridwave[gd],out[gd])
plt.show()
for ispec in range(gridspec.shape[1]) :
print(ispec)
bd=np.where(np.isnan(outspec))
outspec[bd]=1.
out=correlate(outspec,gridspec[:,ispec])
pdb.set_trace()
def repeatspec(a) :
stars=set(a['APOGEE_ID'])
fig,ax=plots.multi(1,2,hspace=0.001,sharex=True)
for star in stars :
j=np.where(a['APOGEE_ID'] == star)[0]
if len(j) > 1 :
for i in j :
print(a['TELESCOPE'][i],a['FIELD'][i],a['NVISITS'][i])
spec=fits.open(a['TELESCOPE'][i]+'/'+a['FIELD'][i]+'/apStar-r12-'+a['APOGEE_ID'][i]+'.fits')
if i == j[0] : spec0=copy.copy(spec[1].data[0,:])
plots.plotl(ax[0],spectra.fits2vector(spec[1].header,1),spec[1].data[0,:])
plots.plotl(ax[1],spectra.fits2vector(spec[1].header,1),spec[1].data[0,:]/spec0,yr=[0.5,1.5])
plt.show()
pdb.set_trace()
ax[0].cla()
ax[1].cla()
import doppler
import multiprocessing as mp
from astropy.table import Table, Column
from apogee.apred import bc
def doppler_rv(planfile,survey='apogee',telescope='apo25m',apred='r13',obj=None,
nobj=0,threads=8,maxvisit=500,snmin=3,
clobber=False,verbose=False,tweak=False,plot=False,windows=None) :
""" Run DOPPLER RVs for a field
"""
plan=yaml.safe_load(open(planfile,'r'))
apred=plan['apred_vers']
telescope=plan['telescope']
field=plan['field']
# get all the VisitSum files for this field and concatenate them
files=glob.glob(os.environ['APOGEE_REDUX']+'/'+apred+'/visit/'+telescope+'/'+field+'/apVisitSum*')
if len(files) == 0 :
print('no apVisitSum files found for {:s}'.format(field))
return
else :
allvisits=struct.concat(files)
starmask=bitmask.StarBitMask()
gd=np.where(((allvisits['STARFLAG'] & starmask.badval()) == 0) &
(allvisits['APOGEE_ID'] != b'') &
(allvisits['SNR'] > snmin) )[0]
print(len(allvisits),len(gd))
allvisits=Table(allvisits)
# output directory
try: os.mkdir(field)
except FileExistsError: pass
# get all unique (or requested) objects
if obj is None :
if nobj > 0 :
allobj=set(allvisits['APOGEE_ID'][0:nobj])
else :
allobj=set(allvisits['APOGEE_ID'])
else :
allobj = obj
# output apField structure
fieldtype = np.dtype([('FILE','S64'),('APOGEE_ID','S20'),('TELESCOPE','S6'),('LOCATION_ID',int),('FIELD','S20'),
('J',float),('J_ERR',float),('H',float),('H_ERR',float),('K',float),('K_ERR',float),
('RA',float),('DEC',float),('GLON',float),('GLAT',float),
('AK_TARG',float),('AK_TARG_METHOD','S32'),
('AK_WISE',float),('SFD_EBV',float),
('APOGEE_TARGET1',int),('APOGEE_TARGET2',int),('APOGEE_TARGET3',int),
('APOGEE2_TARGET1',int),('APOGEE2_TARGET2',int),('APOGEE2_TARGET3',int),('APOGEE2_TARGET4',int),
('TARGFLAGS','S132'),('SURVEY','S16'),('PROGRAMNAME','S32'),
('NINST',int),('NVISITS',int),('COMBTYPE',int),('COMMISS',int),
('SNR',float),('STARFLAG',int),('STARFLAGS','S132'),('ANDFLAG',int),('ANDFLAGS','S132'),
('VHELIO_AVG',float),('VSCATTER',float),('VERR',float),
('RV_TEFF',float),('RV_LOGG',float),('RV_FEH',float),('RV_ALPHA',float),('RV_CARB',float),
('RV_CCPFWHM',float),('RV_AUTOFWHM',float),
('N_COMPONENTS',int)
])
allfield = np.zeros(len(allobj),dtype=fieldtype)
allfield['TELESCOPE'] = telescope
allfield['FIELD'] = field
allfiles=[]
allv=[]
load=apload.ApLoad(apred=apred,telescope=telescope)
nobj=0
nvisit=0
pixelmask=bitmask.PixelBitMask()
# loop over requested objects, building up allfiles list of
# [(field,obj,clobber,verbose,tweak,plot,windows),filenames....] to pass to dorv()
for iobj,star in enumerate(sorted(allobj)) :
if type(star) is str : star=star.encode()
allfield['APOGEE_ID'][iobj] = star
# we will only consider good visits
visits=np.where(allvisits['APOGEE_ID'][gd] == star)[0]
print('object: {:} nvisits: {:d}'.format(star,len(visits)))
nobj+=1
nvisit+=len(visits)
if len(visits) > 0 :
allfiles.append([allvisits[gd[visits]],load,(field,star,clobber,verbose,tweak,plot,windows)])
print('total objects: ', nobj, ' total visits: ', nvisit)
# now do the RVs, in parallel if requested
if threads == 0 :
output=[]
for speclist in allfiles :
print(speclist)
output.append(dorv(speclist))
else :
pool = mp.Pool(threads)
output = pool.map_async(dorv, allfiles).get()
pool.close()
pool.join()
print('done pool')
# load up the individual visit RV information
# first rename old visit RV tags and initialize new ones
for col in ['VTYPE','VREL','VRELERR','VHELIO','BC','RV_TEFF','RV_LOGG','RV_FEH','RV_CARB','RV_ALPHA'] :
allvisits.rename_column(col,'EST'+col)
if col == 'VTYPE' : allvisits[col] = 0
else : allvisits[col] = np.nan
for col in ['XCORR_VREL','XCORR_VRELERR','XCORR_VHELIO','BC'] :
allvisits[col] = np.nan
# add columns for RV components
allvisits['N_COMPONENTS'] = -1
rv_components = Column(name='RV_COMPONENTS',dtype=float,shape=(3),length=len(allvisits))
allvisits.add_column(rv_components)
# now load the new ones with the dorv() output
allv=[]
for out,files in zip(output,allfiles) :
if out is not None :
visits=[]
ncomponents=0
for i,(v,g) in enumerate(zip(out[0][1],out[1])) :
# match by filename components in case there was an error reading in doppler
name=os.path.basename(v['filename']).replace('.fits','').split('-')
if telescope == 'apo1m' :
visit = np.where( np.char.strip(allvisits['FILE']).astype(str) == os.path.basename(v['filename'].strip()) )[0]
if len(visit) == 0 :
# special case for incremental release...yuck
visit = np.where( np.char.strip(allvisits['FILE']).astype(str) ==
os.path.basename(v['filename'].strip()).replace('-r13-','-r12-') )[0]
else :
visit = np.where( (np.char.strip(allvisits['PLATE']).astype(str) == name[-3]) &
(allvisits['MJD'] == int(name[-2])) &
(allvisits['FIBERID'] == int(name[-1])) )[0]
if len(visit) > 0 : visit=visit[0]
else : continue
visits.append(visit)
allvisits[visit]['VREL']=v['vrel']
allvisits[visit]['VRELERR']=v['vrelerr']
allvisits[visit]['VHELIO']=v['vhelio']
allvisits[visit]['XCORR_VREL']=v['xcorr_vrel']
allvisits[visit]['XCORR_VRELERR']=v['xcorr_vrelerr']
allvisits[visit]['XCORR_VHELIO']=v['xcorr_vhelio']
allvisits[visit]['BC']=v['bc']
allvisits[visit]['RV_TEFF']=v['teff']
allvisits[visit]['RV_LOGG']=v['logg']
allvisits[visit]['RV_FEH']=v['feh']
allvisits[visit]['N_COMPONENTS']=out[1][i]['N_components']
if allvisits[visit]['N_COMPONENTS'] > 1 :
allvisits[visit]['STARFLAG'] |= starmask.getval('MULTIPLE_SUSPECT')
n=len(g['best_fit_parameters'])//3
gd=np.where(np.array(g['best_fit_parameters'])[0:n] > 0)[0]
rv_comp = np.array(g['best_fit_parameters'])[2*n+gd]
n_rv_comp = np.min([3,len(rv_comp)])
allvisits[visit]['RV_COMPONENTS'][0:n_rv_comp] = rv_comp[0:n_rv_comp]
# flag visits with suspect RVs
if allvisits[visit]['RV_TEFF'] < 6000 : bd_diff = 10
else : bd_diff = 50.
if (np.abs(allvisits[visit]['VHELIO']-allvisits[visit]['XCORR_VHELIO']) > bd_diff) :
allvisits[visit]['STARFLAG'] |= starmask.getval('RV_REJECT')
elif (np.abs(allvisits[visit]['VHELIO']-allvisits[visit]['XCORR_VHELIO']) > 0) :
allvisits[visit]['STARFLAG'] |= starmask.getval('RV_SUSPECT')
if len(visits) > 0 :
visits=np.array(visits)
# set up visit combination, removing visits with suspect RVs
apogee_id=files[-1][1].decode()
gdrv = np.where((allvisits[visits]['STARFLAG'] & starmask.getval('RV_REJECT')) == 0)[0]
if len(gdrv) > 0 :
allv.append([allvisits[visits[gdrv]],load,(field,apogee_id,clobber)])
# do the visit combination, in parallel if requested
if threads == 0 :
output=[]
for v in allv :
output.append(dovisitcomb(v))
else :
pool = mp.Pool(threads)
output = pool.map_async(dovisitcomb, allv).get()
pool.close()
pool.join()
print('done visitcomb pool pool')
# now load the combined star information into allfield structure
# note that dovisitcomb() returns and apstar structure, with header
# information in FITS header, which limits card names to 8 characters
# Some of these are renamed in allField structure to use different
# (longer, more clear) names
for apstar,v in zip(output,allv) :
j = np.where(allfield['APOGEE_ID'] == v[-1][1].encode())[0]
# basic target information
try: allfield['APOGEE_ID'][j] = apstar.header['OBJID']
except: allfield['APOGEE_ID'][j] = v[-1][1]
for key in ['RA','DEC','J','J_ERR','H','H_ERR','K','K_ERR'] :
allfield[key][j] = apstar.header[key]
# targeting flags have different names
apogee_target1 = apstar.header['APTARG1']
apogee_target2 = apstar.header['APTARG2']
apogee_target3 = apstar.header['APTARG3']
apogee2_target1 = apstar.header['AP2TARG1']
apogee2_target2 = apstar.header['AP2TARG2']
apogee2_target3 = apstar.header['AP2TARG3']
apogee2_target4 = apstar.header['AP2TARG4']
allfield['APOGEE_TARGET1'][j] = apogee_target1
allfield['APOGEE_TARGET2'][j] = apogee_target2
allfield['APOGEE_TARGET3'][j] = apogee_target3
allfield['APOGEE2_TARGET1'][j] = apogee2_target1
allfield['APOGEE2_TARGET2'][j] = apogee2_target2
allfield['APOGEE2_TARGET3'][j] = apogee2_target3
allfield['APOGEE2_TARGET4'][j] = apogee2_target4
# add character string for target flags
allfield['TARGFLAGS'][j] = (bitmask.targflags(apogee_target1,apogee_target2,apogee_target3,survey='apogee')+
bitmask.targflags(apogee2_target1,apogee2_target2,apogee2_target3,survey='apogee2'))
# some modified names
allfield['AK_TARG'][j] = apstar.header['AKTARG']
allfield['AK_TARG_METHOD'][j] = apstar.header['AKMETHOD']
allfield['AK_WISE'][j] = apstar.header['AKWISE']
allfield['SFD_EBV'][j] = apstar.header['SFD_EBV']
allfield['N_COMPONENTS'][j] = apstar.header['N_COMP']
allfield['VHELIO_AVG'][j] = apstar.header['VHELIO']
# mostly unmodified names
for key in ['STARFLAG','ANDFLAG','SNR','VSCATTER','VERR','RV_TEFF','RV_LOGG','RV_FEH','NVISITS' ] :
allfield[key][j] = apstar.header[key]
# add character string for star flags
allfield['STARFLAGS'][j] = starmask.getname(allfield['STARFLAG'][j])
allfield['ANDFLAGS'][j] = starmask.getname(allfield['ANDFLAG'][j])
# tags that are not from apStar
allfield['SURVEY'][j] = ','.join(set(v[0]['SURVEY']))
allfield['PROGRAMNAME'][j] = ','.join(set(v[0]['PROGRAMNAME']))
#output apField and apFieldVisits
hdulist=fits.HDUList()
hdulist.append(fits.table_to_hdu(Table(allfield)))
outfile=load.filename('Field',field=field)
outfile=outfile.replace('/stars/','/rv/')
try : os.makedirs(os.path.dirname(outfile))
except : pass
hdulist.writeto(outfile,overwrite=True)
hdulist=fits.HDUList()
hdulist.append(fits.table_to_hdu(allvisits))
outfile=load.filename('FieldVisits',field=field)
outfile=outfile.replace('/stars/','/rv/')
hdulist.writeto(outfile,overwrite=True)
# make web page
if obj is not None : suffix='_obj'
else : suffix=''
if tweak: suffix=suffix+'_tweak'
print('making HTML page ....')
mkhtml(field,suffix=suffix,apred=apred,telescope=telescope)
return allfield,allvisits
def dorv(visitfiles) :
""" do the rv jointfit from list of files
"""
# last list elements has configuration variables in a tuple
allvisit = visitfiles[0]
load = visitfiles[1]
field=visitfiles[-1][0]
obj=visitfiles[-1][1].decode('UTF-8')
clobber=visitfiles[-1][2]
verbose=visitfiles[-1][3]
tweak=visitfiles[-1][4]
plot=visitfiles[-1][5]
windows=visitfiles[-1][6]
#rvrange=visitfiles[-1][7]
if tweak: suffix='_tweak'
else : suffix='_out'
outdir = os.path.dirname(load.filename('Star',field=field,obj=obj))
outdir = outdir.replace('/stars/','/rv/')
if os.path.exists(outdir+'/'+obj+suffix+'.pkl') and not clobber:
print(obj,' already done')
fp=open(field+'/'+obj+suffix+'.pkl','rb')
try:
out=pickle.load(fp)
fp.close()
return out
except:
print('error loading: ', obj+suffix+'.pkl')
pass
speclist=[]
pixelmask=bitmask.PixelBitMask()
badval=pixelmask.badval()|pixelmask.getval('SIG_SKYLINE')|pixelmask.getval('LITTROW_GHOST')
# if we have a significant number of low S/N visits, combine first using
# barycentric correction only, use that to get an estimate of systemic
# velocity, then do RV determination restricting RVs to within 50 km/s
# of estimate. This seems to help significant for faint visits
lowsnr_visits=np.where(allvisit['SNR']<10)[0]
if (len(lowsnr_visits) > 1) & (len(lowsnr_visits)/len(allvisit) > 0.1) :
try :
apstar_bc=visitcomb(allvisit,bconly=True,load=load,write=False)
apstar_bc.mask(badval)
spec=doppler.Spec1D(apstar_bc.flux,err=apstar_bc.err,bitmask=apstar_bc.bitmask,
mask=apstar_bc.mask,wave=apstar_bc.wave,lsfpars=np.array([0]),
lsfsigma=apstar_bc.wave/22500/2.354,instrument='APOGEE',
filename=apstar_bc.filename)
print('running BC jointfit for :',obj)
out= doppler.rv.jointfit([spec],verbose=verbose,plot=plot,tweak=tweak,maxvel=[-500,500])
rvrange=[out[1][0]['vrel']-50,out[1][0]['vrel']+50]
except :
print(' BC jointfit failed')
rvrange=[-500,500]
elif allvisit['H'].max() > 13.5 :
# if it's faint, restrict to +/- 500 km/s
rvrange=[-500,500]
else :
# otherwise, restrict to +/ 1000 km/s
rvrange=[-1000,1000]
for i in range(len(allvisit)) :
# load all of the visits into doppler Spec1D objects
if load.telescope == 'apo1m' :
visitfile= load.allfile('Visit',plate=allvisit['PLATE'][i],
mjd=allvisit['MJD'][i],reduction=allvisit['APOGEE_ID'][i])
else :
visitfile= load.allfile('Visit',plate=int(allvisit['PLATE'][i]),
mjd=allvisit['MJD'][i],fiber=allvisit['FIBERID'][i])
spec=doppler.read(visitfile,badval=badval)
if windows is not None :
# if we have spectral windows to mask, do so here
for ichip in range(3) :
mask = np.full_like(spec.mask[:,ichip],True)
gd = []
for window in windows :
gd.extend(np.where((spec.wave[:,ichip] > window[0]) & (spec.wave[:,ichip] < window[1]))[0])
mask[gd] = False
spec.mask[:,ichip] |= mask
if spec is not None : speclist.append(spec)
# now do the doppler jointfit to get RVs
# dump empty pickle to stand in case of failure (to prevent redo if not clobber)
try:
# dump empty pickle to stand in case of failure (to prevent redo if not clobber)
fp=open(outdir+'/'+obj+suffix+'.pkl','wb')
pickle.dump(None,fp)
fp.close()
print('running jointfit for : {:s} rvrange:[{:.1f},{:.1f}] nvisits: {:d}'.format(obj,*rvrange,len(speclist)))
out= doppler.rv.jointfit(speclist,maxvel=rvrange,verbose=verbose,
plot=plot,saveplot=plot,outdir=outdir+'/',tweak=tweak)
print('running decomp for :',obj)
gout = gauss_decomp(out[1],phase='two',filt=True)
fp=open(outdir+'/'+obj+suffix+'.pkl','wb')
pickle.dump([out,gout],fp)
fp.close()
print('running plots for :',obj,outdir)
try : os.makedirs(outdir+'/plots/')
except : pass
dop_plot(outdir+'/plots/',obj,out,decomp=gout)
except KeyboardInterrupt :
raise
except ValueError as err:
print('Exception raised for: ', field, obj)
print("ValueError: {0}".format(err))
return
except RuntimeError as err:
print('Exception raised for: ', field, obj)
print("Runtime error: {0}".format(err))
return
except :
print('Exception raised for: ', field, obj)
return
return [out[0:2],gout]
def dovisitcomb(allv) :
""" Routine to do visit combination in parallel
"""
allvisits = allv[0]
load = allv[1]
field = allv[2][0]
apogee_id = allv[2][1]
clobber = allv[2][2]
pixelmask=bitmask.PixelBitMask()
# already done?
apstar=visitcomb(allvisits,load=load,plot=False)
outdir=os.path.dirname(load.filename('Field',field=field))
outdir=outdir.replace('/stars/','/rv/')
if os.path.exists(outdir+'/'+apogee_id+'.pkl') and not clobber:
print(apogee_id,' already done visitcomb')
fp=open(outdir+'/'+apogee_id+'.pkl','rb')
try:
out=pickle.load(fp)
fp.close()
return out
except:
print('error loading: ', apogee_id+'.pkl')
pass
# do the combination
apstar=visitcomb(allvisits,load=load,plot=False)
# dump
pickle.dump(apstar,open(outdir+'/'+apogee_id+'.pkl','wb'))
# plot
gd=np.where((apstar.bitmask & (pixelmask.badval()|pixelmask.getval('SIG_SKYLINE'))) == 0) [0]
fig,ax=plots.multi(1,3,hspace=0.001,figsize=(48,6))
med=np.nanmedian(apstar.flux)
plots.plotl(ax[0],aspcap.apStarWave(),apstar.flux,color='k',yr=[0,2*med])
ax[0].plot(aspcap.apStarWave()[gd],apstar.flux[gd],color='g')
ax[0].set_ylabel('Flux')
ax[1].plot(aspcap.apStarWave()[gd],apstar.cont[gd],color='g')
ax[1].set_ylabel('Normalized')
ax[1].plot(aspcap.apStarWave(),apstar.template,color='r')
plots.plotl(ax[2],aspcap.apStarWave(),apstar.flux/apstar.err,yt='S/N')
for i in range(3) : ax[i].set_xlim(15100,17000)
ax[0].set_xlabel('Wavelength')
fig.savefig(outdir+'/plots/'+apogee_id+'.png')
return apstar
def gaussian(amp, fwhm, mean):
""" Gaussian as defined by gausspy
"""
return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)
import gausspy.gp as gp
def gauss_decomp(out,phase='one',alpha1=0.5,alpha2=1.5,thresh=[4,4],plot=None,filt=False) :
""" Do Gaussian decomposition of CCF using gausspy
Parameters:
out : list of dictionaries for each frame, giving x_ccf, ccf, and ccferr
phase : gausspy paramater
alpha1 : gausspy parameter
alpha2 : gausspy parameter for second set of gaussians if phase=='two'
thresh : gausspy parameter
plot (str) : if not None, do plot and use as root file name for plot
filt (bool) : if true, apply filtering to remove components judged to be insignificant
"""
g = gp.GaussianDecomposer()
g.set('phase',phase)
g.set('SNR_thresh',thresh)
g.set('alpha1',alpha1)
g.set('alpha2',alpha2)
gout=[]
if plot is not None : fig,ax=plots.multi(1,n,hspace=0.001,figsize=(6,2+n))
for i,final in enumerate(out) :
gd=np.where(np.isfinite(final['x_ccf']))[0]
x=final['x_ccf'][gd]
y=final['ccf'][gd]
decomp=g.decompose(x,y,final['ccferr'][gd])
n=decomp['N_components']
if filt and n>0 :
# remove components if they are within width of brighter component, or <0.25 peak ,
# or more than twice as wide, or if primary component is wide
for j in range(1,n) :
pars_j = decomp['best_fit_parameters'][j::n]
for k in range(j) :
pars_k = decomp['best_fit_parameters'][k::n]
if (pars_j[0]>pars_k[0] and pars_k[0]>0 and
(abs(pars_j[2]-pars_k[2])<pars_j[1] or
pars_k[0]<0.25*pars_j[0] or
pars_j[1]>100 or
np.abs(pars_k[1])>2*np.abs(pars_j[1]) ) ) :
decomp['best_fit_parameters'][k] = 0
decomp['N_components'] -= 1
elif (pars_k[0]>pars_j[0] and pars_j[0]>0 and
(abs(pars_j[2]-pars_k[2])<pars_k[1] or
pars_j[0]<0.25*pars_k[0] or
pars_k[1]>100 or
np.abs(pars_j[1])>2*np.abs(pars_k[1]) ) ) :
decomp['best_fit_parameters'][j] = 0
pars_j = decomp['best_fit_parameters'][j::n]
decomp['N_components'] -= 1
gout.append(decomp)
if plot is not None:
plots.plotl(ax[i],final['x_ccf'],final['ccf'])
ax[i].plot(final['x_ccf'],final['ccferr'],color='r')
for j in range(n) :
pars=gout[i]['best_fit_parameters'][j::n]
ax[i].plot(x,gaussian(*pars)(x))
if pars[0] > 0 : color='k'
else : color='r'
ax[i].text(0.1,0.8-j*0.1,'{:8.1f}{:8.1f}{:8.1f}'.format(*pars),transform=ax[i].transAxes,color=color)
fig.savefig(plot+'_ccf.png')
del g
return gout
def dop_plot(outdir,obj,out,decomp=None) :
""" RV diagnostic plots
"""
matplotlib.use('Agg')
n = len(out[2])
#plot final spectra and final models
# full spectrum
fig,ax=plots.multi(1,n,hspace=0.001,figsize=(8,2+n))
ax=np.atleast_1d(ax)
# continuum
figc,axc=plots.multi(1,n,hspace=0.001,figsize=(8,2+n))
axc=np.atleast_1d(axc)
# windows
windows=[[15700,15780],[15850,16000],[16700,16930]]
fig2,ax2=plots.multi(len(windows),n,hspace=0.001,wspace=0.001,figsize=(12,2+n))
ax2=np.atleast_2d(ax2)
# loop over visitis
for i,(mod,spec) in enumerate(zip(out[2],out[3])) :
ax[i].plot(spec.wave,spec.flux,color='k')
for iorder in range(3) :
gd = np.where(~spec.mask[:,iorder])[0]
ax[i].plot(spec.wave[gd,iorder],spec.flux[gd,iorder],color='g')
ax[i].plot(mod.wave,mod.flux,color='r')
ax[i].text(0.1,0.1,'{:d}'.format(spec.head['MJD5']),transform=ax[i].transAxes)
for iwind,wind in enumerate(windows) :
ax2[i,iwind].plot(spec.wave,spec.flux,color='k')
for iorder in range(3) :
gd = np.where(~spec.mask[:,iorder])[0]
ax2[i,iwind].plot(spec.wave[gd,iorder],spec.flux[gd,iorder],color='g')
ax2[i,iwind].plot(mod.wave,mod.flux,color='r')
ax2[i,iwind].set_xlim(wind[0],wind[1])
ax2[i,iwind].set_ylim(0.5,1.3)
if iwind == 0 : ax2[i,iwind].text(0.1,0.1,'{:d}'.format(spec.head['MJD5']),transform=ax2[i,0].transAxes)
axc[i].plot(spec.wave,spec.flux*spec.cont,color='k')
axc[i].plot(spec.wave,spec.cont,color='g')
axc[i].text(0.1,0.1,'{:d}'.format(spec.head['MJD5']),transform=axc[i].transAxes)
fig.savefig(outdir+'/'+obj+'_spec.png')
plt.close()
fig2.savefig(outdir+'/'+obj+'_spec2.png')
plt.close()
figc.savefig(outdir+'/'+obj+'_cont.png')
plt.close()
# plot cross correlation functions with final model
fig,ax=plots.multi(1,n,hspace=0.001,figsize=(6,2+n))
ax=np.atleast_1d(ax)
vmed=np.median(out[1]['vrel'])
for i,(final,spec) in enumerate(zip(out[1],out[3])) :
ax[i].plot(final['x_ccf'],final['ccf'],color='k')
ax[i].plot(final['x_ccf'],final['ccferr'],color='r')
ax[i].plot([final['vrel'],final['vrel']],ax[i].get_ylim(),color='g',label='fit RV')
ax[i].plot([final['xcorr_vrel'],final['xcorr_vrel']],ax[i].get_ylim(),color='r',label='xcorr RV')
ax[i].text(0.1,0.9,'{:d}'.format(spec.head['MJD5']),transform=ax[i].transAxes)
ax[i].set_xlim(vmed-200,vmed+200)
ax[i].legend()
if decomp is not None :
n=decomp[i]['N_components']
if n>0 : n=len(decomp[i]['best_fit_parameters'])//3
x=final['x_ccf']
for j in range(n) :
pars=decomp[i]['best_fit_parameters'][j::n]
ax[i].plot(x,gaussian(*pars)(x))
if pars[0] > 0 : color='k'
else : color='r'
ax[i].text(0.1,0.8-j*0.1,'{:8.1f}{:8.1f}{:8.1f}'.format(*pars),transform=ax[i].transAxes,color=color)
fig.savefig(outdir+'/'+obj+'_ccf.png')
plt.close()
from scipy.signal import convolve
def dop_comp(field) :
""" Compare RVs from different data releases
"""
dop = fits.open(field+'/'+field+'_rv.fits')
r13 = apload.ApLoad(apred='r13')
old = r13.apField(field)
i1,i2 = match.match(dop[1].data['APOGEE_ID'],old[1].data['APOGEE_ID'])
print(len(dop[1].data),len(old[1].data),len(i1))
fig,ax=plots.multi(1,1)
plots.plotc(ax,dop[1].data['RV_TEFF'][i1],dop[1].data['VHELIO_AVG'][i1]-old[1].data['VHELIO_AVG'][i2],dop[1].data['VSCATTER'][i1])
j=np.argsort(np.abs(dop[1].data['VHELIO_AVG'][i1]-old[1].data['VHELIO_AVG'][i2],dop[1].data['VSCATTER'][i1]))
plots._data = dop[1].data
plots._id_cols=['APOGEE_ID']
plots.event(fig)
key=' '
sf,sax=plots.multi(1,2,sharex=True,hspace=0.001)
while key != 'e' :
x,y,key,index = plots.mark(fig,index=True)
obj = dop[1].data['APOGEE_ID'][i1[index]]
#jv = np.where(dop[2].data['APOGEE_ID'] == dop[1].data['APOGEE_ID'][i1])[0]
out=pickle.load(open(field+'/'+obj+'_out.pkl','rb'))
print(obj,old[1].data['APOGEE_ID'][i2[index]])
print(out[0])
sax[0].cla()
spec=old[2].data['SPEC'][i2[index]]
plots.plotl(sax[0],old[3].data['WAVE'][0,:],spec/convolve(spec,np.ones(500)/500,mode='same'),xr=[15000,17000],yr=[0.5,1.5])
for mod,obs in zip(out[2],out[3]) :
sax[1].cla()
for chip in range(3) :
plots.plotl(sax[1],obs.wave[:,chip],obs.flux[:,chip],color='k',yr=[0.5,1.5])
gd = np.where(obs.mask[:,chip] == False)[0]
plots.plotl(sax[1],obs.wave[gd,chip],obs.flux[gd,chip],color='g')
plots.plotl(sax[1],mod.wave[:,chip],mod.flux[:,chip],color='r')
plt.draw()
input('hit a key: ')
def mkhtml(field,suffix='',apred='r13',telescope='apo25m') :
""" Make web pages with tables/plots of RV output
c.f., Doppler vs IDL
"""
starmask=bitmask.StarBitMask()
# get new RV results
load=apload.ApLoad(apred=apred,telescope=telescope)
#apf=load.apField(field)[1].data
infile=load.filename('Field',field=field)
infile=infile.replace('/stars/','/rv/')
apf=fits.open(infile)[1].data
infile=load.filename('FieldVisits',field=field)
infile=infile.replace('/stars/','/rv/')
#apfv=load.apFieldVisits(field)[1].data
apfv=fits.open(infile)[1].data
outdir=os.path.dirname(infile)
try: os.makedirs(outdir+'/plots/')
except: pass
# get old IDLresults
r13 = apload.ApLoad(apred='r13',telescope=telescope)
try :
apfieldvisits = r13.apFieldVisits(field)[1].data
apfield = r13.apField(field)[1].data
doapfield = True
except :
print('No apField files found ...')
doapfield = False
# match
if doapfield: i1,i2 = match.match(apfv['FILE'],apfieldvisits['FILE'])
fig,ax=plots.multi(1,2,figsize=(12,4),hspace=0.5)
ax[0].hist(apf['VHELIO_AVG'],bins=np.arange(-600,600,5),label='doppler',color='g',histtype='step')
if doapfield: ax[0].hist(apfield['VHELIO_AVG'],bins=np.arange(-600,600,5),label='IDL',color='r',histtype='step')
ax[0].legend()
ax[0].set_xlabel('VHELIO_AVG')
ax[1].hist(apf['VSCATTER'],bins=np.arange(0,5,0.02),label='doppler',color='g',histtype='step')
if doapfield: ax[1].hist(apfield['VSCATTER'],bins=np.arange(0,1,0.02),label='IDL',color='r',histtype='step')
ax[1].legend()
ax[1].set_xlabel('VSCATTER')
fig.savefig(outdir+'/plots/'+field+'_rvhist.png')
# create HTML and loop over objects
fp=open(outdir+'/'+field+suffix+'.html','w')
fp.write('<HTML>\n')
fp.write('<HEAD><script type=text/javascript src=../html/sorttable.js></script></head>')
fp.write('<BODY>\n')
fp.write('<H2> Field: {:s}</H2><p>\n'.format(field))
fp.write('<A HREF=plots/{:s}_rvhist.png> <IMG SRC=plots/{:s}_rvhist.png> </A>'.format(field,field))
fp.write('<BR>Click on column headers to sort by column value<BR>')
fp.write('<TABLE BORDER=2 CLASS=sortable>\n')
fp.write('<TR><TD>Obj<TD>Delta(VSCATTER)<TD>H<TD>Doppler RV_TEFF<TD>N_components<TD>Combined spectrum<TD>RV plot<TD>Spectrum<TD>Spectrum windows<TD> continuum\n')
for star in apf :
obj=star['APOGEE_ID']
print(obj)
# get visits in Doppler allvisit table
j=np.where(apfv['APOGEE_ID'] == obj)[0]
if len(j) == 0 :
print('missing {:s} in apfv'.format(obj))
continue
# get object in apField
try: k=np.where(apfield['APOGEE_ID'] == obj)[0][0]
except: k=-1
if doapfield :jj=np.where(apfieldvisits['APOGEE_ID'] == obj)[0]
# star information
if star['TARGFLAGS'].find('TELLURIC') >=0 :
fp.write('<TR><TD bgcolor=lightblue>')
else :
fp.write('<TR><TD>')
fp.write('{:s}'.format(obj))
fp.write('(<A HREF="http://simbad.cfa.harvard.edu/simbad/sim-basic?Ident={:12.5f}%09{:12.5f}++&submit=SIMBAD+search"> SIMBAD </A>)<BR>'.format
(star['RA'],star['DEC']))
fp.write('H = {:7.2f}<br>'.format(star['H']))
fp.write('SNR = {:7.2f}<br>'.format(star['SNR']))
fp.write('{:s}<br>'.format(star['TARGFLAGS']))
fp.write('{:s}<br>'.format(star['STARFLAGS']))
# average velocities
fp.write('<TABLE BORDER=2>\n')
fp.write('<TR><TD><TD>VHELIO_AVG<TD>VSCATTER<TD>TEFF<TD>LOGG<TD>[FE/H]\n')
fp.write('<TR><TD>Doppler<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\n'.format(
star['VHELIO_AVG'],star['VSCATTER'],
star['RV_TEFF'],star['RV_LOGG'],star['RV_FEH']))
fp.write('<TR><TD>Doppler Xcorr<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\n'.format(
np.median(apfv['XCORR_VHELIO'][j]),
apfv['XCORR_VHELIO'][j].std(ddof=1),
star['RV_TEFF'],star['RV_LOGG'],star['RV_FEH']))
if k>=0 :
gd = np.where(np.abs(apfieldvisits['VHELIO']) < 999)[0]
fp.write('<TR><TD>IDL<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\n'.format(
apfield['VHELIO_AVG'][k],apfield['VSCATTER'][k],
apfield['RV_TEFF'][k],apfield['RV_LOGG'][k],apfield['RV_FEH'][k]))
fp.write('</TABLE><br>')
# flag bad RVs
vhelio=apfv['VHELIO'][j]
# individual visit velocities
fp.write('<TABLE BORDER=2>')
fp.write('<TR><TD>JD<TD>PLATE<TD>MJD<TD>FIBER<TD>S/N<TD>Doppler xcorr<TD> xcorr_err<TD>Doppler<TD>VERR<TD>IDL<TD>VERR<TD>ESTBC<TD>Dop BC<TD>apS BC\n')
for ind,i in enumerate(j) :
try :
ii = np.where(apfieldvisits['FILE'] == apfv['FILE'][i])[0][0]
vhelio_idl = apfieldvisits['VHELIO'][ii]
vrelerr_idl = apfieldvisits['VRELERR'][ii]
bc_idl = apfieldvisits['BC'][ii]
vscatter_idl = apfield['VSCATTER'][k]
except :
vhelio_idl,vrelerr_idl,bc_idl = -99999,-99999,-99999
vscatter_idl = -99999
if np.isfinite(apfv['VHELIO'][i]) == False :
bgcolor='bgcolor=red'
elif apfv['STARFLAG'][i] & starmask.getval('RV_REJECT') > 0 :
bgcolor='bgcolor=lightpink'
elif apfv['STARFLAG'][i] & starmask.getval('RV_SUSPECT') > 0 :
bgcolor='bgcolor=#F4DEDE'
else : bgcolor=''
fp.write(('<TR {:s}> <TD> <A HREF={:s} TARGET="_obj"> {:12.3f}</A> <TD> {:s} <TD> {:5d} <TD> {:5d}'+
'<TD> {:8.1f} <TD> {:8.2f} <TD> {:8.2f} <TD> {:8.2f} <TD> {:8.2f} <TD> {:8.2f} <TD> {:8.2f}'+
'<TD> {:8.2f} <TD> {:8.2f} <TD>{:8.2f}\n').format(
bgcolor,
apfv['FILE'][i].replace('.fits','_dopfit.png').replace('-r12-','-r13-'),
apfv['JD'][i],apfv['PLATE'][i],apfv['MJD'][i],apfv['FIBERID'][i],
apfv['SNR'][i],
apfv['XCORR_VHELIO'][i],apfv['XCORR_VRELERR'][i],
apfv['VHELIO'][i],apfv['VRELERR'][i],
vhelio_idl, vrelerr_idl, apfv['ESTBC'][i],apfv['BC'][i],bc_idl))
fp.write('</TABLE>\n')
# vscatter difference with IDL
fp.write('<TD> {:8.2f}\n'.format(star['VSCATTER']-vscatter_idl))
fp.write('<TD> {:8.2f}\n'.format(star['H']))
fp.write('<TD> {:8.2f}\n'.format(star['RV_TEFF']))
fp.write('<TD> {:d}\n'.format(star['N_COMPONENTS']))
# plot visit RVs
if doapfield :
vidl=apfieldvisits['VHELIO'][jj]
gd = np.where(np.abs(vidl) < 999)[0]
vmax=np.nanmax(np.append(vhelio,vidl[gd]))
vmin=np.nanmin(np.append(vhelio,vidl[gd]))
else :
vmax=np.nanmax(vhelio)
vmin=np.nanmin(vhelio)
yr=[vmin-0.1*(vmax-vmin),vmax+0.1*(vmax-vmin)]
try :
fig,ax=plots.multi(1,1)
gd_dop = np.where((apfv['STARFLAG'][j] & starmask.getval('RV_REJECT')) == 0)[0]
if len(gd_dop) > 0 :
plots.plotp(ax,apfv['MJD'][j[gd_dop]],vhelio[gd_dop],size=15,color='g',yr=yr,label='Doppler')
bd_dop = np.where((apfv['STARFLAG'][j] & starmask.getval('RV_REJECT')) > 0)[0]
if len(bd_dop) > 0 : ax.scatter(apfv['MJD'][j[bd_dop]],vhelio[bd_dop],s=15,
facecolors='none',edgecolors='g',label='rejected Doppler')
ax.plot(ax.get_xlim(),[star['VHELIO_AVG'],star['VHELIO_AVG']],color='g')
if doapfield :
plots.plotp(ax,apfieldvisits['MJD'][jj[gd]],vidl[gd],size=15,color='r',yr=yr,label='IDL')
ax.plot(ax.get_xlim(),[apfield['VHELIO_AVG'][k],apfield['VHELIO_AVG'][k]],color='r')
ax.legend()
fig.savefig(outdir+'/plots/'+obj+'_rv.png')
plt.close()
except KeyboardInterrupt: raise
except :
print('Plotting error....')
plt.close()
pass
# include plots
fp.write('<TD><a HREF=plots/{:s}.png TARGET="_obj"> <IMG SRC=plots/{:s}.png WIDTH=600></A>\n'.format(obj,obj))
fp.write('<TD><IMG SRC=plots/{:s}_rv.png TARGET="_obj">\n'.format(obj))
fp.write('<TD><A HREF=plots/{:s}_ccf.png TARGET="_obj"> <IMG SRC=plots/{:s}_ccf.png></A>\n'.format(obj,obj))
fp.write('<TD><A HREF=plots/{:s}_spec.png TARGET="_obj"> <IMG SRC=plots/{:s}_spec.png></a>\n'.format(obj,obj))
fp.write('<TD><A HREF=plots/{:s}_spec2.png TARGET="_obj"> <IMG SRC=plots/{:s}_spec2.png></a>\n'.format(obj,obj))
fp.write('<TD><A HREF=plots/{:s}_cont.png TARGET="_obj"> <IMG SRC=plots/{:s}_cont.png></a>\n'.format(obj,obj))
fp.close()
def overlap(fields) :
""" compare RVs from different fields for overlapping stars
"""
r13=apload.ApLoad(apred='r13')
f=[]
a=[]
for field in fields :
f.append(fits.open(field+'/'+field+'_rv.fits'))
a.append( r13.apFieldVisits(field))
outdir=fields[0]+'_'+fields[1]
try: os.makedirs(outdir)
except: pass
fp=open(outdir+'/'+outdir+'.html','w')
fp.write('<HTML>\n')
fp.write('<HEAD><script type=text/javascript src=../html/sorttable.js></script></head>')
fp.write('<BODY>\n')
fp.write('<TABLE BORDER=2>\n')
matplotlib.use('Agg')
i1,i2=match.match(f[0][1].data['APOGEE_ID'],f[1][1].data['APOGEE_ID'])
colors=['g','r','b','m']
for star in f[0][1].data['APOGEE_ID'][i1] :
print(star)
fp.write('<TR><TD>{:s}<BR>\n'.format(star))
fp.write('<TABLE BORDER=2>\n')
fig,ax=plots.multi(1,1)
for i,field in enumerate(f) :
j=np.where(field[2].data['APOGEE_ID'] == star)[0]
plots.plotp(ax,field[2].data['MJD'][j],field[2].data['VHELIO'][j],color=colors[i],size=10)
j=np.where(field[1].data['APOGEE_ID'] == star)[0][0]
fp.write('<TR><TD>Doppler<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\n'.format(
field[1].data['VHELIO_AVG'][j],field[1].data['VSCATTER'][j],
field[1].data['RV_TEFF'][j],field[1].data['RV_LOGG'][j],field[1].data['RV_FEH'][j]))
for i,field in enumerate(a) :
j=np.where(field[1].data['APOGEE_ID'] == star)[0]
gd=np.where(np.abs(field[1].data['VHELIO'][j]) < 999)[0]
plots.plotp(ax,field[1].data['MJD'][j[gd]],field[1].data['VHELIO'][j[gd]],color=colors[i+2],size=10)
#fp.write('<TR><TD>IDL<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.2f}<TD>{:8.0f}<TD>{:8.2f}<TD>{:8.2f}\n'.format(
# field[1].data['VHELIO_AVG'],field[1].data['VSCATTER'],
# field[1].data['RV_TEFF'],field[1].data['RV_LOGG'],field[1].data['RV_FEH']))
plt.draw()
plt.close()
fig.savefig(outdir+'/'+star+'.png')
fp.write('</TABLE>\n')
fp.write('<TD><a HREF={:s}.png> <IMG SRC={:s}.png> </a>\n'.format(star,star))
fp.write('</TABLE>')
fp.close()
from apogee.aspcap import aspcap
from apogee.apred import wave
from apogee.apred import sincint
def visitcomb(allvisit,load=None, apred='r13',telescope='apo25m',nres=[5,4.25,3.5],bconly=False,plot=False,write=True) :
""" Combine multiple visits with individual RVs to rest frame sum
"""
if load is None : load = apload.ApLoad(apred=apred,telescope=telescope)
cspeed = 2.99792458e5 # speed of light in km/s
wnew=aspcap.apStarWave()
nwave=len(wnew)
nvisit=len(allvisit)
# initialize array for stack of interpolated spectra
zeros = np.zeros([nvisit,nwave])
izeros = np.zeros([nvisit,nwave],dtype=int)
stack=apload.ApSpec(zeros,err=zeros.copy(),bitmask=izeros,cont=zeros.copy(),
sky=zeros.copy(),skyerr=zeros.copy(),telluric=zeros.copy(),telerr=zeros.copy())
apogee_target1, apogee_target2, apogee_target3 = 0, 0, 0
apogee2_target1, apogee2_target2, apogee2_target3, apogee2_target4 = 0, 0, 0, 0
starflag,andflag = 0,0
starmask=bitmask.StarBitMask()
# loop over each visit and interpolate to final wavelength grid
if plot : fig,ax=plots.multi(1,2,hspace=0.001)
for i,visit in enumerate(allvisit) :
if bconly : vrel = -visit['BC']
else : vrel = visit['VREL']
# skip if we don't have an RV
if np.isfinite(vrel) is False : continue
# load the visit
if load.telescope == 'apo1m' :
apvisit=load.apVisit1m(visit['PLATE'],visit['MJD'],visit['APOGEE_ID'],load=True)
else :
apvisit=load.apVisit(int(visit['PLATE']),visit['MJD'],visit['FIBERID'],load=True)
pixelmask=bitmask.PixelBitMask()
# rest-frame wavelengths transformed to this visit spectra
w=aspcap.apStarWave()*(1.0+vrel/cspeed)
print(vrel)
# loop over the chips
for chip in range(3) :
# get the pixel values to interpolate to
pix=wave.wave2pix(w,apvisit.wave[chip,:])
gd=np.where(np.isfinite(pix))[0]
# get a smoothed, filtered spectrum to use as replacement for bad values
cont = gaussian_filter(median_filter(apvisit.flux[chip,:],[501],mode='reflect'),100)
errcont = gaussian_filter(median_filter(apvisit.flux[chip,:],[501],mode='reflect'),100)
bd = np.where(apvisit.bitmask[chip,:]&pixelmask.badval())[0]
if len(bd) > 0 :
apvisit.flux[chip,bd] = cont[bd]
apvisit.err[chip,bd] = errcont[bd]
# load up quantity/error pairs for interpolation
raw=[[apvisit.flux[chip,:],apvisit.err[chip,:]**2],
[apvisit.sky[chip,:],apvisit.skyerr[chip,:]**2],
[apvisit.telluric[chip,:],apvisit.telerr[chip,:]**2]]
# load up individual mask bits
for ibit,name in enumerate(pixelmask.name) :
if name is not '' and len(np.where(apvisit.bitmask[chip,:]&2**ibit)[0]) > 0 :
raw.append([np.clip(apvisit.bitmask[chip,:]&2**ibit,None,1),None])
# do the sinc interpolation
out=sincint.sincint(pix[gd],nres[chip],raw)
# from output flux, get continuum to remove, so that all spectra are
# on same scale. We'll later multiply in the median continuum
flux = out[0][0]
stack.cont[i,gd] = gaussian_filter(median_filter(flux,[501],mode='reflect'),100)
# load interpolated spectra into output stack
stack.flux[i,gd] = out[0][0] / stack.cont[i,gd]
stack.err[i,gd] = out[0][1] / stack.cont[i,gd]
stack.sky[i,gd] = out[1][0]
stack.skyerr[i,gd] = out[1][1]
stack.telluric[i,gd] = out[2][0]
stack.telerr[i,gd] = out[2][1]
# for mask, set bits where interpolated value is above some threshold
# defined for each mask bit
iout=3
for ibit,name in enumerate(pixelmask.name) :
if name is not '' and len(np.where(apvisit.bitmask[chip,:]&2**ibit)[0]) > 0 :
j = np.where(np.abs(out[iout][0]) > pixelmask.maskcontrib[ibit])[0]
stack.bitmask[i,gd[j]] |= 2**ibit
iout+=1
# increase uncertainties for persistence pixels
bd = np.where((stack.bitmask[i,:]&pixelmask.getval('PERSIST_HIGH')) > 0)[0]
if len(bd) > 0 : stack.err[i,bd] *= np.sqrt(5)
bd = np.where(((stack.bitmask[i,:]&pixelmask.getval('PERSIST_HIGH')) == 0) &
((stack.bitmask[i,:]&pixelmask.getval('PERSIST_MED')) > 0) )[0]
if len(bd) > 0 : stack.err[i,bd] *= np.sqrt(4)
bd = np.where(((stack.bitmask[i,:]&pixelmask.getval('PERSIST_HIGH')) == 0) &
((stack.bitmask[i,:]&pixelmask.getval('PERSIST_MED')) == 0) &
((stack.bitmask[i,:]&pixelmask.getval('PERSIST_LOW')) == 0) )[0]
if len(bd) > 0 : stack.err[i,bd] *=
|
np.sqrt(3)
|
numpy.sqrt
|
#!/usr/bin/env python3
import numpy as np
import os,sys,copy
import glob
import random
sys.path.insert(0, '../')
from parameters_analysis.parameters_graphs import *
from projections.projections import *
from ranking_utils.ranking_construction import *
from utils.normalizations import *
from ranking_utils.ess_ranking import *
import keras
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import argparse
def load_features(base, names):
p_pl_train = np.load(os.path.join(base,names[0]))
p_pl_val = np.load(os.path.join(base,names[1]))
p_pl_test = np.load(os.path.join(base,names[2]))
p_im_train = np.load(os.path.join(base,names[3]))
p_im_val = np.load(os.path.join(base,names[4]))
p_im_test = np.load(os.path.join(base,names[5]))
p_reID_train = np.load(os.path.join(base,names[6]))
p_reID_val = np.load(os.path.join(base,names[7]))
p_reID_test = np.load(os.path.join(base,names[8]))
n_pl_train = np.load(os.path.join(base,names[9]))
n_pl_val = np.load(os.path.join(base,names[10]))
n_pl_test = np.load(os.path.join(base,names[11]))
n_im_train = np.load(os.path.join(base,names[12]))
n_im_val = np.load(os.path.join(base,names[13]))
n_im_test = np.load(os.path.join(base,names[14]))
n_reID_train = np.load(os.path.join(base,names[15]))
n_reID_val = np.load(os.path.join(base,names[16]))
n_reID_test = np.load(os.path.join(base,names[17]))
return [p_pl_train,p_pl_val,p_pl_test],[p_im_train,p_im_val,p_im_test],[p_reID_train,p_reID_val,p_reID_test],[n_pl_train,n_pl_val,n_pl_test],[n_im_train,n_im_val,n_im_test],[n_reID_train,n_reID_val,n_reID_test]
##################################MAIN#####################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compare different methods using best architecture.')
parser.add_argument('--dataset', dest='dataset', type=str,help='include the name of the dataset to extract image features',default='bombing')
parser.add_argument('--aug', dest='aug', type=str,help='include _aug to use augmented data', default='')
#parser.add_argument("--concatenation", action='store_true', help="compare concatenation")
parser.add_argument("--finetuning", action='store_true', help="compare concatenation of fine-tuned features")
parser.add_argument("--ESS", action='store_true', help="compare ESS features")
parser.add_argument("--cross_entropy", action='store_true', help="compare cross_entropy features")
parser.add_argument("--contrastive", action='store_true', help="compare contrastive features")
parser.add_argument("--triplet", action='store_true',help="compare triplet features")
args = parser.parse_args()
dataset = args.dataset #'wedding', 'fire', 'bombing', 'museu_nacional' or 'bangladesh_fire'
aug=args.aug
compare_concat = True
compare_finetuned = args.finetuning
compare_ESS = args.ESS
compare_cross = args.cross_entropy
compare_contrastive = args.contrastive
compare_triplet = args.triplet
path_base = '../out_files/features'
path_base_ranking = '../out_files/ranking'
path_base_graphs = '../out_files/graphs'
name_final_folder = dataset+aug
save_path_graph = os.path.join(path_base_graphs,name_final_folder)
if not os.path.isdir(save_path_graph):
os.mkdir(save_path_graph)
## feature paths
path_features = os.path.join(path_base,name_final_folder)
path_features_finetuning = os.path.join(path_base,'fine_tunned',name_final_folder)
path_features_crossEntropy = os.path.join(path_base,'cross_entropy',name_final_folder)
path_features_contrastive = os.path.join(path_base,'contrastive',name_final_folder)
path_features_triplet = os.path.join(path_base,'triplet',name_final_folder)
path_features_ess = os.path.join(path_base,'ess')
if not os.path.isdir(path_features_ess):
os.mkdir(path_features_ess)
path_features_ess = os.path.join(path_features_ess, name_final_folder)
if not os.path.isdir(path_features_ess):
os.mkdir(path_features_ess)
## ranking paths
path_ranking = os.path.join(path_base_ranking,name_final_folder)
if not os.path.isdir(path_ranking):
os.mkdir(path_ranking)
path_ranking_finetuning = os.path.join(path_base_ranking,'fine_tunned')
if not os.path.isdir(path_ranking_finetuning):
os.mkdir(path_ranking_finetuning)
path_ranking_finetuning = os.path.join(path_ranking_finetuning,name_final_folder)
if not os.path.isdir(path_ranking_finetuning):
os.mkdir(path_ranking_finetuning)
path_ranking_crossEntropy = os.path.join(path_base_ranking,'cross_entropy')
if not os.path.isdir(path_ranking_crossEntropy):
os.mkdir(path_ranking_crossEntropy)
path_ranking_crossEntropy = os.path.join(path_ranking_crossEntropy,name_final_folder)
if not os.path.isdir(path_ranking_crossEntropy):
os.mkdir(path_ranking_crossEntropy)
path_ranking_contrastive = os.path.join(path_base_ranking,'contrastive')
if not os.path.isdir(path_ranking_contrastive):
os.mkdir(path_ranking_contrastive)
path_ranking_contrastive = os.path.join(path_ranking_contrastive,name_final_folder)
if not os.path.isdir(path_ranking_contrastive):
os.mkdir(path_ranking_contrastive)
path_ranking_triplet = os.path.join(path_base_ranking,'triplet')
if not os.path.isdir(path_ranking_triplet):
os.mkdir(path_ranking_triplet)
path_ranking_triplet = os.path.join(path_ranking_triplet,name_final_folder)
if not os.path.isdir(path_ranking_triplet):
os.mkdir(path_ranking_triplet)
path_ranking_ess = os.path.join(path_base_ranking,'ess')
if not os.path.isdir(path_ranking_ess):
os.mkdir(path_ranking_ess)
path_ranking_ess = os.path.join(path_ranking_ess, name_final_folder)
if not os.path.isdir(path_ranking_ess):
os.mkdir(path_ranking_ess)
##------------------------
##------------------------
## Load features
names_normal = ['positive_train_places.npy','positive_val_places.npy','positive_test_places.npy','positive_train_imagenet.npy','positive_val_imagenet.npy',
'positive_test_imagenet.npy','positive_train_people.npy','positive_val_people.npy','positive_test_people.npy','negative_train_places.npy','negative_val_places.npy',
'negative_test_places.npy','negative_train_imagenet.npy','negative_val_imagenet.npy','negative_test_imagenet.npy','negative_train_people.npy','negative_val_people.npy',
'negative_test_people.npy']
###normal
if compare_concat:
p_pl, p_im, p_reID, n_pl, n_im, n_reID=load_features(path_features, names_normal)
people= np.concatenate([p_reID[2], n_reID[2]], axis=0)
people_z = normZ(people)
places= np.concatenate([p_pl[2], n_pl[2]], axis=0)
places_z = normZ(places)
imagenet= np.concatenate([p_im[2], n_im[2]], axis=0)
imagenet_z = normZ(imagenet)
test_where = np.concatenate([imagenet_z, places_z], axis=1)
complete = np.concatenate([places_z, imagenet_z], axis=1)
complete = np.concatenate([complete, people_z], axis=1)
train_places_z = normZ(p_pl[0])
train_imagenet_z = normZ(p_im[0])
train_reID_z = normZ(p_reID[0])
train_where = np.concatenate([train_imagenet_z, train_places_z], axis=1)
train_complete = np.concatenate([train_places_z, train_imagenet_z], axis=1)
train_complete = np.concatenate([train_complete, train_reID_z], axis=1)
notSortedIndicesConcat = []
sortedIndicesConcat = []
mean_concat = []
var_concat = []
non_mean_concat = []
non_var_concat = []
###finetuned
if(compare_finetuned):
f_p_pl, f_p_im, f_p_reID, f_n_pl, f_n_im, f_n_reID=load_features(path_features_finetuning, names_normal)
finetuning_people= np.concatenate([f_p_reID[2], f_n_reID[2]], axis=0)
finetuning_people_z = normZ(finetuning_people)
finetuning_places= np.concatenate([f_p_pl[2], f_n_pl[2]], axis=0)
finetuning_places_z = normZ(finetuning_places)
finetuning_imagenet= np.concatenate([f_p_im[2], f_n_im[2]], axis=0)
finetuning_imagenet_z = normZ(finetuning_imagenet)
finetuning_complete = np.concatenate([finetuning_places_z, finetuning_imagenet_z], axis=1)
finetuning_complete = np.concatenate([finetuning_complete, finetuning_people_z], axis=1)
f_train_places_z = normZ(f_p_pl[0])
f_train_imagenet_z = normZ(f_p_im[0])
f_train_reID_z = normZ(f_p_reID[0])
finetuning_train_complete = np.concatenate([f_train_places_z, f_train_imagenet_z], axis=1)
finetuning_train_complete = np.concatenate([finetuning_train_complete, f_train_reID_z], axis=1)
notSortedIndicesFinetuning = []
sortedIndicesFinetuning = []
mean_fine = []
var_fine = []
non_mean_fine = []
non_var_fine = []
####ess
if compare_ESS:
try:
description_ess = np.load(os.path.join(path_features_ess,'ess_test.npy'))
description_ess_query = np.load(os.path.join(path_features_ess,'ess_train.npy'))
except:
query = copy.copy(train_where)
distancias_where = pairwise_distances(test_where, Y=query, metric='euclidean')
distancias_where_query = pairwise_distances(train_where, metric='euclidean')
query= copy.copy(train_imagenet_z)
distancias_objects = pairwise_distances(imagenet_z, Y=query, metric='euclidean')
distancias_objects_query = pairwise_distances(train_imagenet_z, metric='euclidean')
query= copy.copy(train_reID_z)
distancias_people = pairwise_distances(people_z, Y=query, metric='euclidean')
distancias_people_query = pairwise_distances(train_reID_z, metric='euclidean')
description_ess = create_ess_representation([distancias_where,distancias_objects,distancias_people], 3, range(len(train_where)))
description_ess_query = create_ess_representation([distancias_where_query,distancias_objects_query,distancias_people_query], 3, range(len(train_where)))
np.save(os.path.join(path_features_ess,'ess_test.npy'),description_ess)
np.save(os.path.join(path_features_ess,'ess_train.npy'),description_ess_query)
notSortedIndicesESS = []
sortedIndicesESS = []
mean_ESS = []
var_ESS = []
non_mean_ESS = []
non_var_ESS = []
####cross entropy
if compare_cross:
c_p_test = np.load(os.path.join(path_features_crossEntropy,'1024_512_positive_'+dataset+aug+'_test.npy'))
c_n_test = np.load(os.path.join(path_features_crossEntropy,'1024_512_negative_'+dataset+aug+'_test.npy'))
cross_complete = np.concatenate([c_p_test, c_n_test], axis=0)
cross_train_complete = np.load(os.path.join(path_features_crossEntropy,'1024_512_positive_'+dataset+aug+'_train.npy'))
notSortedIndicesCross = []
sortedIndicesCross = []
mean_cross = []
var_cross = []
non_mean_cross = []
non_var_cross = []
####contrastive
if compare_contrastive:
cont_p_test = np.load(os.path.join(path_features_contrastive,'1024_512_positive_'+dataset+aug+'_test.npy'))
cont_n_test = np.load(os.path.join(path_features_contrastive,'1024_512_negative_'+dataset+aug+'_test.npy'))
contrastive_complete = np.concatenate([cont_p_test, cont_n_test], axis=0)
contrastive_train_complete = np.load(os.path.join(path_features_contrastive,'1024_512_positive_'+dataset+aug+'_train.npy'))
notSortedIndicesContrastive = []
sortedIndicesContrastive = []
mean_cont = []
var_cont = []
non_mean_cont = []
non_var_cont = []
####triplet
if compare_triplet:
t_p_test = np.load(os.path.join(path_features_triplet,'1024_512_positive_'+dataset+aug+'_test.npy'))
t_n_test = np.load(os.path.join(path_features_triplet,'1024_512_negative_'+dataset+aug+'_test.npy'))
triplet_complete = np.concatenate([t_p_test, t_n_test], axis=0)
triplet_train_complete = np.load(os.path.join(path_features_triplet,'1024_512_positive_'+dataset+aug+'_train.npy'))
notSortedIndicesTriplet = []
sortedIndicesTriplet = []
mean_triplet = []
var_triplet = []
non_mean_triplet = []
non_var_triplet = []
print('Normal Lenght: ',len(complete))
relevant = len(p_reID[2])
iterations = len(p_reID[0])
print('Iterations:', iterations)
#iterations = len(triplet_train_complete)
#print('Iterations:', iterations)
#iterations = 2
count_normal = 0
count_embedding = 0
##------------------------
##------------------------
## Obtain one ranking for query (positive images of training)
for iteration in range(0,iterations):
print('--------------------')
print('Iteration',iteration)
if compare_ESS:
distancias_ESS = pairwise_distances(description_ess, Y=[description_ess_query[iteration]], metric='euclidean')
mean_ESS.append(np.mean(distancias_ESS[0:relevant]))
non_mean_ESS.append(np.mean(distancias_ESS[relevant:-1]))
var_ESS.append(np.var(distancias_ESS[0:relevant]))
non_var_ESS.append(np.var(distancias_ESS[relevant:-1]))
rankingESS = np.transpose(distancias_ESS)
notSortedIndicesESS.append(rankingESS[0])
sortedIndicesESS.append(np.argsort(rankingESS)[0])
if compare_concat:
query = []
query.append(np.copy(train_complete[int(iteration)]))
distancias_concat = pairwise_distances(complete, Y=query, metric='euclidean')
mean_concat.append(np.mean(distancias_concat[0:relevant]))
non_mean_concat.append(np.mean(distancias_concat[relevant:-1]))
var_concat.append(np.var(distancias_concat[0:relevant]))
non_var_concat.append(np.var(distancias_concat[relevant:-1]))
rankingConcat = np.transpose(distancias_concat)
notSortedIndicesConcat.append(rankingConcat[0])
sortedIndicesConcat.append(np.argsort(rankingConcat)[0])
if compare_finetuned:
query = []
query.append(np.copy(finetuning_train_complete[int(iteration)]))
distancias_concat_finetuning = pairwise_distances(finetuning_complete, Y=query, metric='euclidean')
mean_fine.append(np.mean(distancias_concat_finetuning[0:relevant]))
non_mean_fine.append(np.mean(distancias_concat_finetuning[relevant:-1]))
var_fine.append(np.var(distancias_concat_finetuning[0:relevant]))
non_var_fine.append(np.var(distancias_concat_finetuning[relevant:-1]))
rankingConcatFinetuning = np.transpose(distancias_concat_finetuning)
notSortedIndicesFinetuning.append(rankingConcatFinetuning[0])
sortedIndicesFinetuning.append(np.argsort(rankingConcatFinetuning)[0])
if compare_cross:
query = []
query.append(np.copy(cross_train_complete[int(iteration)]))
distancias_cross = pairwise_distances(cross_complete, Y=query, metric='euclidean')
mean_cross.append(np.mean(distancias_cross[0:relevant]))
non_mean_cross.append(np.mean(distancias_cross[relevant:-1]))
var_cross.append(np.var(distancias_cross[0:relevant]))
non_var_cross.append(np.var(distancias_cross[relevant:-1]))
rankingCross = np.transpose(distancias_cross)
notSortedIndicesCross.append(rankingCross[0])
sortedIndicesCross.append(np.argsort(rankingCross)[0])
if compare_contrastive:
query = []
query.append(np.copy(contrastive_train_complete[int(iteration)]))
distancias_contrastive = pairwise_distances(contrastive_complete, Y=query, metric='euclidean')
mean_cont.append(np.mean(distancias_contrastive[0:relevant]))
non_mean_cont.append(np.mean(distancias_contrastive[relevant:-1]))
var_cont.append(np.var(distancias_contrastive[0:relevant]))
non_var_cont.append(np.var(distancias_contrastive[relevant:-1]))
rankingContrastive = np.transpose(distancias_contrastive)
notSortedIndicesContrastive.append(rankingContrastive[0])
sortedIndicesContrastive.append(np.argsort(rankingContrastive)[0])
if compare_triplet:
query = []
query.append(np.copy(triplet_train_complete[int(iteration)]))
distancias_triplet = pairwise_distances(triplet_complete, Y=query, metric='euclidean')
mean_triplet.append(
|
np.mean(distancias_triplet[0:relevant])
|
numpy.mean
|
import contextlib
import json
import os.path
import warnings
import fiona
import numpy as np
import sdi
from shapely.geometry import MultiLineString, shape, mapping
import tables
class HDF5Backend(object):
"""Read/write access for HDF5 data store."""
def __init__(self, filepath):
self.filepath = filepath
self.hydropick_format_version = 1
def import_binary_file(self, bin_file):
data = sdi.binary.read(bin_file)
data_raw = sdi.binary.read(bin_file, separate=False)
x = data['frequencies'][-1]['interpolated_easting']
y = data['frequencies'][-1]['interpolated_northing']
coords = np.vstack((x, y)).T
line_name = data['survey_line_number']
with self._open_file('a') as f:
line_group = self._get_survey_line_group(f, line_name)
self._write_array(f, line_group, 'navigation_line', coords)
f.flush()
self._write_freq_dicts(line_name, data['frequencies'])
self._write_raw_sdi_dict(line_name, data_raw)
# THIS IS MOVED BACK TO SURVEYLINE LOAD UNTIL TRACE_NUM
# ERRORS FIXED IN SDI BINARY SO THAT BAD TRACE NUM
# ARRAYS CAN BE FIXED
# current_surface_line = {
# 'name': 'current_surface_from_bin',
# 'depth_array': data_raw['depth_r1'],
# 'index_array': data_raw['trace_num'] - 1,
# 'edited': False,
# 'source': 'sdi_file',
# 'source_name': bin_file,
# }
# self.write_pick(current_surface_line, line_name, 'current')
def import_corestick_file(self, corestick_file):
core_sample_dicts = sdi.corestick.read(corestick_file)
self._write_core_samples(core_sample_dicts)
def import_pick_file(self, pick_file):
line_name = os.path.basename(pick_file).split('.')[0]
pick_data = sdi.pickfile.read(pick_file)
surface_number = pick_data['surface_number']
if surface_number == 1:
line_type = 'current'
elif surface_number == 2:
line_type = 'preimpoundment'
else:
raise NotImplementedError(
'unexpected line file type: {}'.format(surface_number))
line_data = {
'name': 'pickfile_' + line_type,
'depth_array': pick_data['depth'],
'index_array': pick_data['trace_number'] - 1,
'edited': False,
'source': 'previous depth line',
'source_name': pick_file,
}
self.write_pick(line_data, line_name, line_type)
def import_shoreline_file(self, lake_name, shoreline_file):
""" Load the shoreline from GIS file.
NB: Currently has side effects, loading crs and properties traits.
"""
with fiona.open(shoreline_file) as f:
crs = f.crs
geometries = []
for rec in f:
geometries.append(rec['geometry'])
# XXX: assuming that the properties aren't varying by geometry
properties = rec['properties']
if len(geometries) == 1:
geom = shape(geometries[0])
else:
# XXX: this assumes we'll always get lines, not polygons or other
geom = MultiLineString([
shape(geometry) for geometry in geometries])
with self._open_file('a') as f:
shoreline_group = self._get_shoreline_group(f)
shoreline_group._v_attrs.crs = self._safe_serialize(crs)
shoreline_group._v_attrs.lake_name = self._safe_serialize(lake_name)
shoreline_group._v_attrs.original_shapefile = self._safe_serialize(shoreline_file)
shoreline_group._v_attrs.properties = self._safe_serialize(properties)
geometry_str = self._safe_serialize(mapping(geom))
self._write_array(f, shoreline_group, 'geometry',
|
np.array(geometry_str)
|
numpy.array
|
from itertools import product
import skimage.morphology as morph
from skimage.filters import threshold_otsu
import scipy.ndimage as ndi
from scipy.stats import itemfreq
from skimage.color import label2rgb
import numpy as np
import cv2
import skfmm
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def watershed_center(image, center):
distance = ndi.distance_transform_edt(image)
markers, nr_blobs = ndi.label(center)
labeled = morph.watershed(-distance, markers, mask=image)
dropped, _ = ndi.label(image - (labeled > 0))
dropped = np.where(dropped > 0, dropped + nr_blobs, 0)
correct_labeled = dropped + labeled
return relabel(correct_labeled)
def watershed_contour(image, contour):
mask = np.where(contour == 1, 0, image)
distance = ndi.distance_transform_edt(mask)
markers, nr_blobs = ndi.label(mask)
labeled = morph.watershed(-distance, markers, mask=image)
dropped, _ = ndi.label(image - (labeled > 0))
dropped = np.where(dropped > 0, dropped + nr_blobs, 0)
correct_labeled = dropped + labeled
return relabel(correct_labeled)
def postprocess(image, contour):
cleaned_mask = clean_mask(image, contour)
good_markers = get_markers(cleaned_mask, contour)
good_distance = get_distance(cleaned_mask)
labels = morph.watershed(-good_distance, good_markers, mask=cleaned_mask)
labels = add_dropped_water_blobs(labels, cleaned_mask)
m_thresh = threshold_otsu(image)
initial_mask_binary = (image > m_thresh).astype(np.uint8)
labels = drop_artifacts_per_label(labels, initial_mask_binary)
labels = drop_small(labels, min_size=20)
labels = fill_holes_per_blob(labels)
return labels
def mpostprocess(score):
predition = np.argmax(score[:,:,:3], axis=2).astype('uint8')
predition = predition == 1
if predition.sum() < 20:
return np.zeros_like( predition ) # not predition !!!
score_prob = sigmoid(score)
m = score_prob[:,:,1]
#c = score_prob[:,:,2]
m_thresh = threshold_otsu(m)
#c_thresh = threshold_otsu(c)
m_b = m > m_thresh
#c_b = c > c_thresh
predition = ndi.binary_fill_holes(predition)
predition = morph.opening(predition,morph.disk(1))
mk, _ = ndi.label(predition)
distance = ndi.distance_transform_edt(predition)
labels = morph.watershed(-distance, mk, mask=m_b )
if labels.sum() < 20:
return np.zeros_like( labels ) # not predition !!!
labels = add_dropped_water_blobs(labels, m_b)
initial_mask_binary = (m_b).astype(np.uint8)
labels = drop_artifacts_per_label(labels, initial_mask_binary)
labels = drop_small(labels, min_size=20)
if labels.sum() < 20:
return np.zeros_like( labels ) # not predition !!!
labels = fill_holes_per_blob(labels)
labels = decompose(labels)
#ellipse aproximate
#elp = fit_ellipse(labels)
#labels_elp = create_ellipses_mask(score_prob.shape, elp)
#labels_elp = create_label(labels_elp)
#labels_elp = decompose(labels_elp)
labels = clean_label( labels )
labels = create_label(labels )
#labels = decompose( labels )
return labels
def mpostprocessthresh(score, prob=0.5):
score_prob = sigmoid(score)
predition = score_prob[:,:,1] > prob
labels, _ = ndi.label(predition)
labels = decompose(labels)
labels = clean_label( labels )
labels = create_label(labels )
#labels = decompose( labels )
#labels = predition
return labels
def mpostprocessmax(score):
predition = np.argmax(score[:,:,:3], axis=2).astype('uint8')
predition = predition == 1
labels, _ = ndi.label(predition)
labels = decompose(labels)
labels = clean_label( labels )
labels = create_label(labels )
#labels = decompose( labels )
#labels = predition
return labels
def clean_label( masks ):
cln_mask = []
mean_area, radio = mean_blob_size( masks.max(axis=0) )
for mask in masks:
mask = (mask>128).astype(np.uint8)
try:
_,contours,_ = cv2.findContours(mask, 1, 2)
if len(contours) == 0: continue
contour = contours[0]
if len(contour) < 5:
continue
area = cv2.contourArea(contour)
if area <= 10 or (mean_area !=1 and mean_area/area < 0.2): # skip ellipses smaller then 5x5
continue
epsilon = 0.1*cv2.arcLength(contour,True)
contour_aprox = cv2.approxPolyDP(contour,epsilon,True)
cv2.fillPoly(mask, contour_aprox, 1)
cln_mask.append(mask)
except ValueError as e:
pass
return np.array(cln_mask)
def create_label( labels ):
#classe 0 back
c,m = labels.shape[:2]
mlabel = np.zeros_like(labels)
for i in range(c):
mlabel[i,:,:] = labels[i,:,:]*(i+1)
mlabel = np.max(mlabel,axis=0)
return relabel(mlabel)
def fit_ellipse( masks ):
ellipses = []
pi_4 = np.pi * 4
for mask in masks:
#mask = pad_mask(mask, 5)
mask = (mask>128).astype(np.uint8)
try:
_,contours,_ = cv2.findContours(mask, 1, 2)
contour = contours[0]
if len(contour) < 5:
continue
area = cv2.contourArea(contour)
if area <= 25: # skip ellipses smaller then 5x5
continue
arclen = cv2.arcLength(contour, True)
circularity = (pi_4 * area) / (arclen * arclen)
ellipse = cv2.fitEllipse(contour)
area_ellpse = (ellipse[1][0]/2.0)*(ellipse[1][1]/2.0)*np.pi
#print(area,area_ellpse,area/area_ellpse)
if area/area_ellpse < 0.50:
#print(area,area_ellpse, area/area_ellpse, ellipse[1], ellipse[0])
continue
ellipses.append( (ellipse, area, area_ellpse, circularity) )
except ValueError as e:
pass
return np.array(ellipses)
def create_ellipses_mask(masksize, ellipses ):
n = len(ellipses)
masks = np.zeros( (n,masksize[0],masksize[1]), dtype=np.uint8 )
for k in range(n):
try:
ellipse = ellipses[k][0]
mask = masks[k,:,:]
#mask = pad_mask(mask, 5)
#cv2.ellipse(elp,ellipse,1,2)
poly = cv2.ellipse2Poly((int(ellipse[0][0]), int(ellipse[0][1])), (int(ellipse[1][0] / 2), int(ellipse[1][1] / 2)), int(ellipse[2]), 0, 360, 5)
cv2.fillPoly(mask, [poly], 1)
#mask = crop_mask(mask, 5)
masks[k,:,:] = mask
except ValueError as e:
pass
return masks
def drop_artifacts_per_label(labels, initial_mask):
labels_cleaned = np.zeros_like(labels)
for i in range(1, labels.max() + 1):
component = np.where(labels == i, 1, 0)
component_initial_mask = np.where(labels == i, initial_mask, 0)
component = drop_artifacts(component, component_initial_mask)
labels_cleaned = labels_cleaned + component * i
return labels_cleaned
def clean_mask(m, c):
# threshold
m_thresh = threshold_otsu(m)
c_thresh = threshold_otsu(c)
m_b = m > m_thresh
c_b = c > c_thresh
# combine contours and masks and fill the cells
m_ = np.where(m_b | c_b, 1, 0)
m_ = ndi.binary_fill_holes(m_)
# close what wasn't closed before
area, radius = mean_blob_size(m_b)
struct_size = int(1.25 * radius)
struct_el = morph.disk(struct_size)
m_padded = pad_mask(m_, pad=struct_size)
m_padded = morph.binary_closing(m_padded, selem=struct_el)
m_ = crop_mask(m_padded, crop=struct_size)
# open to cut the real cells from the artifacts
area, radius = mean_blob_size(m_b)
struct_size = int(0.75 * radius)
struct_el = morph.disk(struct_size)
m_ = np.where(c_b & (~m_b), 0, m_)
m_padded = pad_mask(m_, pad=struct_size)
m_padded = morph.binary_opening(m_padded, selem=struct_el)
m_ = crop_mask(m_padded, crop=struct_size)
# join the connected cells with what we had at the beginning
m_ = np.where(m_b | m_, 1, 0)
m_ = ndi.binary_fill_holes(m_)
# drop all the cells that weren't present at least in 25% of area in the initial mask
m_ = drop_artifacts(m_, m_b, min_coverage=0.25)
return m_
def get_markers(m_b, c):
# threshold
c_thresh = threshold_otsu(c)
c_b = c > c_thresh
mk_ = np.where(c_b, 0, m_b)
area, radius = mean_blob_size(m_b)
struct_size = int(0.25 * radius)
struct_el = morph.disk(struct_size)
m_padded = pad_mask(mk_, pad=struct_size)
m_padded = morph.erosion(m_padded, selem=struct_el)
mk_ = crop_mask(m_padded, crop=struct_size)
mk_, _ = ndi.label(mk_)
return mk_
def get_distance(m_b):
distance = ndi.distance_transform_edt(m_b)
return distance
def add_dropped_water_blobs(water, mask_cleaned):
water_mask = (water > 0).astype(np.uint8)
dropped = mask_cleaned - water_mask
dropped, _ = ndi.label(dropped)
dropped = np.where(dropped, dropped + water.max(), 0)
water = water + dropped
return water
def fill_holes_per_blob(image):
image_cleaned = np.zeros_like(image)
for i in range(1, image.max() + 1):
mask = np.where(image == i, 1, 0)
mask = ndi.morphology.binary_fill_holes(mask)
image_cleaned = image_cleaned + mask * i
return image_cleaned
def drop_artifacts(mask_after, mask_pre, min_coverage=0.5):
connected, nr_connected = ndi.label(mask_after)
mask = np.zeros_like(mask_after)
for i in range(1, nr_connected + 1):
conn_blob = np.where(connected == i, 1, 0)
initial_space = np.where(connected == i, mask_pre, 0)
blob_size = np.sum(conn_blob)
initial_blob_size = np.sum(initial_space)
coverage = float(initial_blob_size) / float(blob_size)
if coverage > min_coverage:
mask = mask + conn_blob
else:
mask = mask + initial_space
return mask
def mean_blob_size(mask):
labels, labels_nr = ndi.label(mask)
if labels_nr < 2:
mean_area = 1
mean_radius = 1
else:
mean_area = int(itemfreq(labels)[1:, 1].mean())
mean_radius = int(np.round(np.sqrt(mean_area / np.pi)))
return mean_area, mean_radius
def pad_mask(mask, pad):
if pad <= 1:
pad = 2
h, w = mask.shape
h_pad = h + 2 * pad
w_pad = w + 2 * pad
mask_padded = np.zeros((h_pad, w_pad))
mask_padded[pad:pad + h, pad:pad + w] = mask
mask_padded[pad - 1, :] = 1
mask_padded[pad + h + 1, :] = 1
mask_padded[:, pad - 1] = 1
mask_padded[:, pad + w + 1] = 1
return mask_padded
def crop_mask(mask, crop):
if crop <= 1:
crop = 2
h, w = mask.shape
mask_cropped = mask[crop:h - crop, crop:w - crop]
return mask_cropped
def drop_small(img, min_size):
img = morph.remove_small_objects(img, min_size=min_size)
return relabel(img)
def tolabel(mask):
labeled, nr_true = ndi.label(mask)
return labeled
def relabel(img):
h, w = img.shape
relabel_dict = {}
for i, k in enumerate(np.unique(img)):
if k == 0:
relabel_dict[k] = 0
else:
relabel_dict[k] = i
for i, j in product(range(h), range(w)):
img[i, j] = relabel_dict[img[i, j]]
return img
def decompose(labeled):
nr_true = labeled.max()
masks = []
for i in range(1, nr_true + 1):
msk = labeled.copy()
msk[msk != i] = 0.
msk[msk == i] = 255.
masks.append(msk)
if not masks: return np.array([labeled])
else: return np.array(masks)
def mpostprocess_soft( softpred, line_width = 4 ):
'''
Precess data
'''
# assume the only output is a CHW image where C is the number
# of classes, H and W are the height and width of the image
# retain only the top class for each pixel
class_data = np.argmax(softpred, axis=2).astype('uint8')
# remember the classes we found
found_classes = np.unique(class_data)
fill_data = np.ndarray((class_data.shape[0], class_data.shape[1], 4), dtype='uint8')
for x in range(3):
fill_data[:, :, x] = class_data.copy()
# Assuming that class 0 is the background
mask = np.greater(class_data, 0)
fill_data[:, :, 3] = mask * 255
line_data = fill_data.copy()
seg_data = fill_data.copy()
# Black mask of non-segmented pixels
mask_data = np.zeros(fill_data.shape, dtype='uint8')
mask_data[:, :, 3] = (1 - mask) * 255
# Generate outlines around segmented classes
if len(found_classes) > 1:
# Assuming that class 0 is the background.
line_mask = np.zeros(class_data.shape, dtype=bool)
max_distance = np.zeros(class_data.shape, dtype=float) + 1
for c in (x for x in found_classes if x != 0):
c_mask =
|
np.equal(class_data, c)
|
numpy.equal
|
from agent.Agent import Agent
from agent.examples.crypto.PPFL_ServiceAgent import PPFL_ServiceAgent
from message.Message import Message
from util.util import log_print
from util.crypto.logReg import getWeights, reportStats
import util.crypto.diffieHellman as dh
import numpy as np
from os.path import exists
import pandas as pd
import random
# The PPFL_ClientAgent class inherits from the base Agent class. It implements
# a secure federated learning protocol with basic differential privacy plus
# secure multiparty communication.
class PPFL_ClientAgent(Agent):
def __init__(self, id, name, type, peer_list=None, iterations=4, multiplier=10000, secret_scale = 100000,
X_train = None, y_train = None, X_test = None, y_test = None, split_size = None,
learning_rate = None, clear_learning = None, num_clients = None, num_subgraphs = None,
epsilon = None, max_logreg_iterations = None, collusion = False, random_state=None):
# Base class init.
super().__init__(id, name, type, random_state)
# Store the client's peer list (subgraph, neighborhood) with which it should communicate.
self.peer_list = peer_list
# Initialize a tracking attribute for the initial peer exchange and record the subgraph size.
self.peer_exchange_complete = False
self.num_peers = len(self.peer_list)
# Record the total number of clients participating in the protocol and the number of subgraphs.
# Neither of these are part of the protocol, or necessary for real-world implementation, but do
# allow for convenient logging of progress and results in simulation.
self.num_clients = num_clients
self.num_subgraphs = num_subgraphs
# Record whether the clients should be recording information about the potential accuracy of
# peer data reconstruction via collusion among the clients.
self.collusion = collusion
# Record the number of protocol (federated learning) iterations the clients will perform.
self.no_of_iterations = iterations
# Record the multiplier that will be used to protect against floating point accuracy loss and
# the scale of the client shared secrets.
self.multiplier = multiplier
self.secret_scale = secret_scale
# Record the number of local iterations of logistic regression each client will run during
# each protocol iteration and what local learning rate will be used.
self.max_logreg_iterations = max_logreg_iterations
self.learning_rate = learning_rate
# Record whether clients will do federated learning in the clear (no privacy, no encryption)
# and, if needed, the epsilon value for differential privacy.
self.clear_learning = clear_learning
self.epsilon = epsilon
# Record the training and testing splits for the data set to be learned.
self.X_train = X_train
self.y_train = y_train
self.X_test = X_test
self.y_test = y_test
# Record the number of features in the data set.
self.no_of_weights = X_train.shape[1]
# Initialize an attribute to remember the shared weights returned from the server.
self.prevWeight = None
# Each client receives only a portion of the training data each protocol iteration.
self.split_size = split_size
# Initialize a dictionary to remember which peers we have heard from during peer exchange.
self.peers_received = {}
# Initialize a dictionary to accumulate this client's timing information by task.
self.elapsed_time = { 'DH_OFFLINE' : pd.Timedelta(0), 'DH_ONLINE' : pd.Timedelta(0),
'TRAINING' : pd.Timedelta(0), 'ENCRYPTION' : pd.Timedelta(0) }
# Pre-generate this client's local training data for each iteration (for the sake of simulation speed).
self.trainX = []
self.trainY = []
# This is a faster PRNG than the default, for times when we must select a large quantity of randomness.
self.prng = np.random.Generator(np.random.SFC64())
### Data randomly selected from total training set each iteration, simulating online behavior.
for i in range(iterations):
slice = self.prng.choice(range(self.X_train.shape[0]), size = split_size, replace = False)
# Pull together the current local training set.
self.trainX.append(self.X_train[slice].copy())
self.trainY.append(self.y_train[slice].copy())
# Create dictionaries to hold the public and secure keys for this client, and the public keys shared
# by its peers.
self.pubkeys = {}
self.seckeys = {}
self.peer_public_keys = {}
# Create dictionaries to hold the shared key for each peer each iteration and the seed for the
# following iteration.
self.r = {}
self.R = {}
# Specify the parameters used for generation of randomness.
self.px_reg = 1
self.px_epsilon = epsilon
self.px_min_rows = self.split_size
self.px_shape = 1 / ( self.num_peers + 1)
self.px_scale = 2 / (( self.num_peers + 1 ) * self.px_min_rows * self.px_reg * self.px_epsilon )
if self.id == 1: print (f"px_shape is {self.px_shape}")
if self.id == 1: print (f"px_scale is {self.px_scale}")
# Specify the required shape for vectorized generation of randomness.
self.px_dims = ( self.num_peers, self.no_of_iterations, self.no_of_weights )
# Iteration counter.
self.current_iteration = 0
### Simulation lifecycle messages.
def kernelStarting(self, startTime):
# Initialize custom state properties into which we will later accumulate results.
# To avoid redundancy, we allow only the first client to handle initialization.
if self.id == 1:
self.kernel.custom_state['dh_offline'] = pd.Timedelta(0)
self.kernel.custom_state['dh_online'] = pd.Timedelta(0)
self.kernel.custom_state['training'] = pd.Timedelta(0)
self.kernel.custom_state['encryption'] = pd.Timedelta(0)
# Find the PPFL service agent, so messages can be directed there.
self.serviceAgentID = self.kernel.findAgentByType(PPFL_ServiceAgent)
# Request a wake-up call as in the base Agent. Noise is kept small because
# the overall protocol duration is so short right now. (up to one microsecond)
super().kernelStarting(startTime + pd.Timedelta(self.random_state.randint(low = 0, high = 1000), unit='ns'))
def kernelStopping(self):
# Accumulate into the Kernel's "custom state" this client's elapsed times per category.
# Note that times which should be reported in the mean per iteration are already so computed.
# These will be output to the config (experiment) file at the end of the simulation.
self.kernel.custom_state['dh_offline'] += self.elapsed_time['DH_OFFLINE']
self.kernel.custom_state['dh_online'] += (self.elapsed_time['DH_ONLINE'] / self.no_of_iterations)
self.kernel.custom_state['training'] += (self.elapsed_time['TRAINING'] / self.no_of_iterations)
self.kernel.custom_state['encryption'] += (self.elapsed_time['ENCRYPTION'] / self.no_of_iterations)
super().kernelStopping()
### Simulation participation messages.
def wakeup (self, currentTime):
super().wakeup(currentTime)
# Record start of wakeup for real-time computation delay..
dt_wake_start = pd.Timestamp('now')
# Check if the clients are still performing the one-time peer exchange.
if not self.peer_exchange_complete:
# Generate DH keys.
if not self.clear_learning: self.pubkeys, self.seckeys = dh.dict_keygeneration( self.peer_list )
# Record elapsed wallclock for Diffie Hellman offline.
dt_wake_end = pd.Timestamp('now')
self.elapsed_time['DH_OFFLINE'] += dt_wake_end - dt_wake_start
# Set computation delay to elapsed wallclock time.
self.setComputationDelay(int((dt_wake_end - dt_wake_start).to_timedelta64()))
# Send generated values to peers.
if not self.clear_learning:
for idx, peer in enumerate(self.peer_list):
# We assume a star network configuration where all messages between peers must be forwarded
# through the server.
self.sendMessage(self.serviceAgentID, Message({ "msg" : "FWD_MSG", "msgToForward" : "PEER_EXCHANGE",
"sender": self.id, "recipient": peer, "pubkey" : self.pubkeys[peer] }))
if self.clear_learning:
self.peer_exchange_complete = True
self.setWakeup(currentTime + pd.Timedelta('1ns'))
else:
# We are waking up to start a new iteration of the protocol.
# (Peer exchange is done before all this.)
if (self.current_iteration == 0):
# During iteration 0 (only) we complete the key exchange and prepare the
# common key list, because at this point we know we have received keys
# from all peers.
# R is the common key dictionary (by peer agent id).
if not self.clear_learning: self.R = dh.dict_keyexchange(self.peer_list, self.id, self.pubkeys,
self.seckeys, self.peer_public_keys)
# Pre-generate all of this client's local differential privacy noise (for simulation speed).
# We will need one per weight per protocol iteration.
self.my_noise = np.random.laplace(scale = self.px_scale, size = (self.no_of_iterations, self.no_of_weights))
# <NAME> is done in every iteration.
if not self.clear_learning:
for peer_id, common_key in self.R.items():
random.seed(common_key)
rand = random.getrandbits(512)
rand_b_raw = format(rand, '0512b')
rand_b_rawr = rand_b_raw[:256]
rand_b_rawR = rand_b_raw[256:]
# Negate offsets below this agent's id. This ensures each offset will be
# added once and subtracted once.
r = int(rand_b_rawr,2) % (2**32)
log_print ("SELECTED r: {}", r)
# Update dictionary of shared secrets for this iteration.
self.r[peer_id] = r if peer_id < self.id else -r
# Store the shared seeds for the next iteration.
self.R[peer_id] = int(rand_b_rawR,2)
# Record elapsed wallclock for Diffie Hellman online.
dt_online_complete = pd.Timestamp('now')
# For convenience of things indexed by iteration...
i = self.current_iteration
# Perform the local training for this client, using only its local (private) data. The configured learning
# rate might need to be increased if there are very many clients, each with very little data, otherwise
# convergence may take a really long time.
#
# max_iter controls how many iterations of gradient descent to perform on the logistic
# regression model. previous_weight should be passed as None for the first iteration.
weight = getWeights(previous_weight = self.prevWeight, max_iter = self.max_logreg_iterations, lr = self.learning_rate,
trainX = self.trainX[i], trainY = self.trainY[i], self_id = self.id)
# If in collusion analysis mode, write out the weights we will need to evaluate reconstruction.
if self.collusion:
with open('results/collusion_weights.csv', 'a') as results_file:
results_file.write(f"{self.id},{self.current_iteration},{','.join([str(x) for x in weight])}\n")
# Record elapsed wallclock for training model.
dt_training_complete = pd.Timestamp('now')
if not self.clear_learning:
# Add a random sample from Laplace to each of the weights.
noise = self.my_noise[i]
if self.collusion:
with open('results/collusion_selected_noise.csv', 'a') as results_file:
# Write out the noise added to each weight by this client.
results_file.write(f"{self.id},{self.current_iteration},{','.join([str(x) for x in noise])}\n")
log_print ("weight {}", weight)
log_print ("noise {}", noise)
if self.clear_learning: n = np.array(weight) * self.multiplier
else: n = (
|
np.array(weight)
|
numpy.array
|
# http://github.com/timestocome
# Zipf's law on words
# log frequency = log c - s log rank
# http://greenteapress.com/complexity/Zipf.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
############################################################################
# read in data
from collections import Counter
from collections import OrderedDict
import operator
############################################################################
# read in text, clean it up, get word frequencies and sort
# open file and read in text
file = open('BothBooks.txt', encoding='utf-8')
data = file.read()
# convert all text to lower case and remove new line chars
data = data.lower()
data = data.replace('\n', ' ')
# not going to worry about punctuation yet
data = data.replace('--', ' ')
data = data.replace('.', '')
data = data.replace(';', '')
data = data.replace(',', '')
data = data.replace('!', '')
data = data.replace('\"', '')
data = data.replace('(', '')
data = data.replace(')', '')
data = data.replace('?', '')
data = data.replace(':', '')
data = data.replace(" '", ' ')
data = data.replace("' ", ' ')
data = data.replace('-', ' ')
data = data.replace('*', '')
#print(data)
file.close()
# break text into words
words = data.split()
number_of_words = len(words)
print ("Total words in text", len(words))
# find unique words
words_set = set(words)
unique_words = len(words_set)
print("Unique words", len(words_set))
# count words
count = Counter(words)
with open('word_count.csv', 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in count.items()]
# word frequency
frequency = {}
for k, v in count.items():
frequency[k] = v #/number_of_words * 100.
with open('word_frequency.csv', 'w') as f:
[f.write('{0},{1}\n'.format(key, value)) for key, value in frequency.items()]
# sorted
sorted_list = sorted(frequency.items(), key=operator.itemgetter(1), reverse=True)
#############################################################
words, counts = zip(*sorted_list)
# plot histogram
'''
words = np.array(words[0:50])
counts = np.array(counts[0:50])
indexes = np.arange(len(words))
bar_width = 0.2
plt.figure(figsize=(18,18))
plt.bar(indexes, counts)
plt.title("Alice in Wonderland, Through the Looking Glass")
plt.xticks(indexes + bar_width, words, rotation=45)
plt.savefig("histogram.png")
plt.show()
'''
# plot frequency vs ranks
# should be a straight line
# log counts = log c - s log ranks
words = np.array(words)
counts =
|
np.array(counts)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
def gradient(x):
"""
Computes the differential operator for a given set of points x.
"""
b=len(x)
dx = x[1] - x[0]
ax = ((np.tri(b, b, 0 , dtype=int) - np.tri(b, b, 1,dtype=int)))
ax= ax + (np.tri(b, b, -1, dtype=int ) -
|
np.tri(b, b, -2, dtype=int)
|
numpy.tri
|
# (C) 2020 University of Colorado AES-CCAR-SEDA (Space Environment Data Analysis) Group
# Written by <NAME>
import numpy as np
from numpy import (sin,cos,tan,arcsin,arccos,arctan2)
def _azifac(aziunit):
if aziunit=='hour':
return np.pi/12
elif aziunit=='deg':
return np.pi/180.
elif aziunit=='rad':
return 1.
else:
raise ValueError(('Invalid aziunit {}'.format(aziunit)
+' valid values are deg or hour or rad'))
def angle_difference(ang1,ang2,aziunit):
"""Difference between two angles in degrees or hours (ang2-ang1),
taking into account wrapping
PARAMETERS
----------
ang1 : float or np.ndarray
Angle(s) to subtract
ang2 : float or np.ndarray
Angle(s) to subtract from
degorhour : str, optional
'deg' for input angles and result in degrees
'hour' for input angles in hours
RETURNS
-------
diff : float or np.ndarray
Difference (ang2-ang1)
"""
ang2rad = _azifac(aziunit)
y = np.sin(ang2*ang2rad-ang1*ang2rad)
x = np.cos(ang2*ang2rad-ang1*ang2rad)
diff = np.arctan2(y,x)/ang2rad
return diff
def angle_midpoint(ang1,ang2,aziunit):
"""
Midpoint between two angles in degrees or hours
"""
return ang1 + angle_difference(ang1,ang2,aziunit)/2.
def _great_circle_distance(location1,location2,lonorlt='lt'):
"""Return angular distance in radians between n-by-2 numpy arrays
location1, location2 (calculated row-wise so diff between
location1[0,] and location2[0,]
assuming that these arrays have the columns lat[deg],localtime[hours]
and that they are points on a sphere of constant radius
(the points are at the same altitude)
"""
azi2rad = np.pi/12. if lonorlt=='lt' else np.pi/180
wrappt = 24. if lonorlt=='lt' else 360.
#Bounds check
over = location1[:,1] > wrappt
under = location1[:,1] < 0.
location1[over,1]=location1[over,1]-wrappt
location1[under,1]=location1[under,1]+wrappt
if location1.ndim == 1 or location2.ndim == 1:
dphi = np.abs(location2[1]-location1[1])*azi2rad
a = (90-location1[0])/360*2*np.pi #get the colatitude in radians
b = (90-location2[0])/360*2*np.pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
else:
dphi = np.abs(location2[:,1]-location1[:,1])*azi2rad
a = (90-location1[:,0])/360*2*np.pi #get the colatitude in radians
b = (90-location2[:,0])/360*2*np.pi
C = np.pi - np.abs(dphi - np.pi)#get the angular distance in longitude in radians
return np.arccos(np.cos(a)*np.cos(b)+np.sin(a)*np.sin(b)*np.cos(C))
def _great_circle_distance_law_of_cosines(theta1,phi1,theta2,phi2):
"""Computes great circle distance between any number of paired
locations using the law of cosines. Warning:
can be inaccurate for short distances. All angles in radians.
Greek letter convention is theta for colatitude, phi for azimuth/longitude.
"""
a = theta1
b = theta2
dphi = np.abs(phi2-phi1)
C = np.pi -
|
np.abs(dphi - np.pi)
|
numpy.abs
|
#!/usr/bin/env python3
import numpy as np
class deck(object):
''' an object which holds a deck of cards. ace=1, 2=2, ..., q=12, k=13.
it can shuffle and deal n cards. '''
def __init__(self, start_shuffled = True):
self.suits = ['hearts', 'spades', 'clubs', 'diamonds']
self.values =
|
np.arange(1,14)
|
numpy.arange
|
''' This module contains functions necessary to fit a negative binomial
using the maximum likelihood estimator and some numerical analysis
@author: <NAME> (based on original code by <NAME>)
'''
## Libraries
import numpy as np
from scipy.optimize import minimize
from scipy.stats import nbinom
import matplotlib.pyplot as plt
## Functions
def nu_sum(vec_element, k):
'''
This function efficiently computes the gamma function term of the NB log lik
by expanding the sum into a grid. Treats the gamma function as a logged
factorial because the data must be integer values.
@param vec_element: an element of the data vector
@param k: the value of the dispersion parameter
'''
nu = np.arange(0, vec_element, 1)
return np.sum(np.log(1 + nu / k))
def neg_log_lik(k, y_bar, vec, n):
'''
This function computes the negative log likelihood of the NB dist. using the
MLE estimate of the mean, y_bar, and a set version of the dispersion parameter.
This approach produces a biased estimate because it does not account for
the use of the unbiased estimator of the sample mean (y_bar) in the place
of the population mean.
@param k: the dispersion parameter
@param y_bar: the sample mean, an unbiased estimator of the population mean
@param vec: the data vector
@param n: the number of observations
'''
x = 0
for i in range(n):
x += nu_sum(vec[i], k)
log_lik = (x / n) + y_bar * np.log(y_bar) - (y_bar + k) * np.log(1 + y_bar / k)
return -log_lik
def plot_pmf(k_hat, y_bar, vec):
'''
plot the estimated pmf over the data
@param k_hat: the estimated value of the NB dispersion parameter
@param y_bar: the estimated value of the NB mean
'''
p_hat = (y_bar**2 / k_hat) / (y_bar + (y_bar**2 / k_hat))
n_hat = y_bar**2 / (y_bar**2 / k_hat)
x = np.arange(min(vec), max(vec + 1), 1)
y_tilde = nbinom(n = n_hat,
p = p_hat)
plt.hist(vec, alpha = .2)
plt.plot(y_tilde.pmf(x) * len(vec), color = 'blue')
return None
def neg_bin_fit(vec, init = 1, plot = False):
'''
The workhorse function to fit negative binomial dist. to data. Assumes that underdispersion
does not occur, which guarantees the score has at least one root in the positive reals.
Uses the mean and dispersion parameterization of the pmf common in ecology.
@param vec: The data vector used to fit the negative binomial distribution
@param init: The initial estimate for k, the dispersion parameter
@param plot: whether to plot the fitted distribution over the data
'''
#####
## Type and data checking
# Check the input is properly specified
if not isinstance(vec, np.ndarray):
raise TypeError("Argument 'vec' must be a numpy.ndarray")
if len(vec.shape) != 1:
raise TypeError("Argument 'vec' must be a vector with shape (n,)")
if (not np.issubdtype(vec.dtype, np.integer)):
raise ValueError("Numpy array elements must be of type int")
if type(plot) is not bool:
raise TypeError('Argument `plot` must be a boolean')
if (type(init) is not float) & (type(init) is not int):
raise TypeError('Argument `init` must be of type float or type int')
if init <= 0:
raise ValueError('Argument `init` must be greater than zero')
# Check the data
if np.sum(vec < 0) > 0:
raise ValueError("Data must all be greater than or equal to zero, negative number provided")
if np.mean(vec) > np.var(vec):
raise ValueError("Data are underdispersed; fitting method does not allow for underdispersion")
#####
## Fit the NB dist. to the vector
# MLE of the mean
y_bar = np.mean(vec)
# MLE of k
fit = minimize(fun = neg_log_lik,
x0 = 1,
args = (np.mean(vec), vec, len(vec),),
method = 'L-BFGS-B',
bounds = ((0.00001, None),))
mean =
|
np.array([y_bar, fit.x[0]])
|
numpy.array
|
import pytest
import pyCGM_Single.pycgmStatic as pycgmStatic
import numpy as np
from mock import patch
rounding_precision = 8
class TestPycgmStaticAxis():
"""
This class tests the axis functions in pycgmStatic.py:
staticCalculationHead
pelvisJointCenter
hipJointCenter
hipAxisCenter
kneeJointCenter
ankleJointCenter
footJointCenter
headJC
uncorrect_footaxis
rotaxis_footflat
rotaxis_nonfootflat
findJointC
"""
nan_3d = [np.nan, np.nan, np.nan]
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["head", "expected"], [
# Test from running sample data
([[[244.87227957886893, 326.0240255639856, 1730.4189843948805],
[243.89575702706503, 325.0366593474616, 1730.1515677531293],
[244.89086730509763, 324.80072493605866, 1731.1283433097797]],
[244.89547729492188, 325.0578918457031, 1730.1619873046875]],
0.25992807335420975),
# Test with zeros for all params
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][0]
([[[-1, 8, 9], [0, 0, 0], [0, 0, 0]], [0, 0, 0]],
1.5707963267948966),
# Testing when values are added to head[0][1]
([[[0, 0, 0], [7, 5, 7], [0, 0, 0]], [0, 0, 0]],
np.nan),
# Testing when values are added to head[0][2]
([[[0, 0, 0], [0, 0, 0], [3, -6, -2]], [0, 0, 0]],
0.0),
# Testing when values are added to head[0]
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [0, 0, 0]],
-1.3521273809209546),
# Testing when values are added to head[1]
([[[0, 0, 0], [0, 0, 0], [0, 0, 0]], [-4, 7, 8]],
0.7853981633974483),
# Testing when values are added to head
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of lists of ints
([[[-1, 8, 9], [7, 5, 7], [3, -6, -2]], [-4, 7, 8]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of ints
([np.array([[-1, 8, 9], [7, 5, 7], [3, -6, -2]], dtype='int'), np.array([-4, 7, 8], dtype='int')],
-0.09966865249116204),
# Testing that when head is composed of lists of floats
([[[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], [-4.0, 7.0, 8.0]],
-0.09966865249116204),
# Testing that when head is composed of numpy arrays of floats
([np.array([[-1.0, 8.0, 9.0], [7.0, 5.0, 7.0], [3.0, -6.0, -2.0]], dtype='float'), np.array([-4.0, 7.0, 8.0], dtype='float')],
-0.09966865249116204)])
def test_staticCalculationHead(self, head, expected):
"""
This test provides coverage of the staticCalculationHead function in pycgmStatic.py, defined as staticCalculationHead(frame, head)
This test takes 2 parameters:
head: array containing the head axis and head origin
expected: the expected result from calling staticCalculationHead on head
This function first calculates the x, y, z axes of the head by subtracting the given head axes by the head
origin. It then calls headoffCalc on this head axis and a global axis to find the head offset angles.
This test ensures that:
- the head axis and the head origin both have an effect on the final offset angle
- the resulting output is correct when head is composed of lists of ints, numpy arrays of ints, lists of
floats, and numpy arrays of floats.
"""
result = pycgmStatic.staticCalculationHead(None, head)
np.testing.assert_almost_equal(result, expected, rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'RASI': np.array([357.90066528, 377.69210815, 1034.97253418]),
'LASI': np.array([145.31594849, 405.79052734, 1030.81445312]),
'RPSI': np.array([274.00466919, 205.64402771, 1051.76452637]),
'LPSI': np.array([189.15231323, 214.86122131, 1052.73486328])},
[np.array([251.60830688, 391.74131775, 1032.89349365]),
np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]),
np.array([231.57849121, 210.25262451, 1052.24969482])]),
# Test with zeros for all params
({'SACR': np.array([0, 0, 0]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([0, 0, 0])]),
# Testing when adding values to frame['RASI'] and frame['LASI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([0, 0, 0]),
'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.44458106, -1.48072284, 2.32771179], [-6.56593805, -2.48907071, 1.86812391], [-6.17841206, -1.64617634, 2.93552855]]),
np.array([0, 0, 0])]),
# Testing when adding values to frame['RPSI'] and frame['LPSI']
({'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([4., -1.0, -1.0])]),
# Testing when adding values to frame['SACR']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5, ])]),
# Testing when adding values to frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': np.array([1, 0, -4]),
'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-7.45825845, -1.47407957, 2.28472598], [-6.56593805, -2.48907071, 1.86812391], [-6.22180416, -1.64514566, 2.9494945]]),
np.array([4.0, -1.0, -1.0])]),
# Testing when adding values to frame['SACR'], frame['RASI'] and frame['LASI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([0, 0, 0]), 'LPSI': np.array([0, 0, 0])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([0, 0, 0]), 'LASI': np.array([0, 0, 0]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([0, 0, 0]), np.array([nan_3d, nan_3d, nan_3d]), np.array([-4, 8, -5])]),
# Testing when adding values to frame['SACR'], frame['RASI'], frame['LASI'], frame['RPSI'] and frame['LPSI']
({'SACR': np.array([-4, 8, -5]), 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]),
'RPSI': np.array([1, 0, -4]), 'LPSI': np.array([7, -2, 2])},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of ints
({'SACR': [-4, 8, -5], 'RASI': np.array([-6, 6, 3]), 'LASI': np.array([-7, -9, 1]), 'RPSI': [1, 0, -4],
'LPSI': [7, -2, 2]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed ofe numpy arrays of ints
({'SACR': np.array([-4, 8, -5], dtype='int'), 'RASI': np.array([-6, 6, 3], dtype='int'),
'LASI': np.array([-7, -9, 1], dtype='int'), 'RPSI': np.array([1, 0, -4], dtype='int'),
'LPSI': np.array([7, -2, 2], dtype='int')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of lists of floats
({'SACR': [-4.0, 8.0, -5.0], 'RASI': np.array([-6.0, 6.0, 3.0]), 'LASI': np.array([-7.0, -9.0, 1.0]),
'RPSI': [1.0, 0.0, -4.0], 'LPSI': [7.0, -2.0, 2.0]},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])]),
# Testing that when frame is composed of numpy arrays of floats
({'SACR': np.array([-4.0, 8.0, -5.0], dtype='float'), 'RASI': np.array([-6.0, 6.0, 3.0], dtype='float'),
'LASI': np.array([-7.0, -9.0, 1.0], dtype='float'), 'RPSI': np.array([1.0, 0.0, -4.0], dtype='float'),
'LPSI': np.array([7.0, -2.0, 2.0], dtype='float')},
[np.array([-6.5, -1.5, 2.0]),
np.array([[-6.72928306, -1.61360872, 2.96670695], [-6.56593805, -2.48907071, 1.86812391], [-5.52887619, -1.59397972, 2.21928602]]),
np.array([-4, 8, -5])])])
def test_pelvisJointCenter(self, frame, expected):
"""
This test provides coverage of the pelvisJointCenter function in pycgmStatic.py, defined as pelvisJointCenter(frame)
This test takes 2 parameters:
frame: dictionary of marker lists
expected: the expected result from calling pelvisJointCenter on frame
This test is checking to make sure the pelvis joint center and axis are calculated correctly given the input
parameters. The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to frame['RASI'] and frame['LASI'], expected[0] and expected[1] should be updated
When values are added to frame['RPSI'] and frame['LPSI'], expected[2] should be updated
When values are added to frame['SACR'], expected[2] should be updated, and expected[1] should also be updated
if there are values for frame['RASI'] and frame['LASI']
Values produced from frame['SACR'] takes precedent over frame['RPSI'] and frame['LPSI']
If RPSI and LPSI are given, then the sacrum will be the midpoint of those two markers. If they are not given then the sacrum is already calculated / specified.
The origin of the pelvis is midpoint of the RASI and LASI markers.
The axis of the pelvis is calculated using LASI, RASI, origin, and sacrum in the Gram-Schmidt orthogonalization procedure (ref. Kadaba 1990).
Lastly, it checks that the resulting output is correct when frame is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats. frame['LASI'] and frame['RASI'] were kept as numpy arrays
every time as list would cause an error in the following line of pycgmStatic.py as lists cannot be divided by floats:
origin = (RASI+LASI)/2.0
"""
result = pycgmStatic.pelvisJointCenter(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["pel_origin", "pel_x", "pel_y", "pel_z", "vsk", "expected"], [
# Test from running sample data
([251.608306884766, 391.741317749023, 1032.893493652344], [251.740636241119, 392.726947206848, 1032.788500732036], [250.617115540376, 391.872328624646, 1032.874106304030], [251.602953357582, 391.847951338178, 1033.887777624562],
{'MeanLegLength': 940.0, 'R_AsisToTrocanterMeasure': 72.512, 'L_AsisToTrocanterMeasure': 72.512, 'InterAsisDistance': 215.908996582031},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[0, 0, 0], [0, 0, 0]]),
# Testing when values are added to pel_origin
([1, 0, -3], [0, 0, 0], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[-6.1387721, 0, 18.4163163], [8.53165418, 0, -25.59496255]]),
# Testing when values are added to pel_x
([0, 0, 0], [-5, -3, -6], [0, 0, 0], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[54.02442793, 32.41465676, 64.82931352], [54.02442793, 32.41465676, 64.82931352]]),
# Testing when values are added to pel_y
([0, 0, 0], [0, 0, 0], [4, -1, 2], [0, 0, 0],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[29.34085257, -7.33521314, 14.67042628], [-29.34085257, 7.33521314, -14.67042628]]),
# Testing when values are added to pel_z
([0, 0, 0], [0, 0, 0], [0, 0, 0], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[31.82533363, 84.86755635, 21.21688909], [31.82533363, 84.86755635, 21.21688909]]),
# Test when values are added to pel_x, pel_y, and pel_z
([0, 0, 0], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[115.19061413, 109.94699997, 100.71662889], [56.508909 , 124.61742625, 71.37577632]]),
# Test when values are added to pel_origin, pel_x, pel_y, and pel_z
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[MeanLegLength]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[100.88576753, 97.85280235, 106.39612748], [61.83654463, 110.86920998, 41.31408931]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[R_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 0.0},
[[109.05184203, 109.94699997, 119.13294518], [-57.09307697, 115.44008189, 14.36512267]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[L_AsisToTrocanterMeasure]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0-7.0, 'InterAsisDistance': 0.0},
[[73.42953032, 107.27027453, 109.97003528], [65.04056318, 124.61742625, 45.78081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and vsk[InterAsisDistance]
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 0.0, 'R_AsisToTrocanterMeasure': 0.0, 'L_AsisToTrocanterMeasure': 0.0, 'InterAsisDistance': 11.0},
[[125.55184203, 104.44699997, 146.63294518], [48.54056318, 130.11742625, 18.28081377]]),
# Test when values are added to pel_origin, pel_x, pel_y, pel_z, and all values in vsk
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of ints and vsk values are ints
([1, 0, -3], [-5, -3, -6], [4, -1, 2], [3, 8, 2],
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of ints and vsk values are ints
(np.array([1, 0, -3], dtype='int'), np.array([-5, -3, -6], dtype='int'), np.array([4, -1, 2], dtype='int'),
np.array([3, 8, 2], dtype='int'),
{'MeanLegLength': 15, 'R_AsisToTrocanterMeasure': -24, 'L_AsisToTrocanterMeasure': -7, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are lists of floats and vsk values are floats
([1.0, 0.0, -3.0], [-5.0, -3.0, -6.0], [4.0, -1.0, 2.0], [3.0, 8.0, 2.0],
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11.0},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]]),
# Testing that when pel_origin, pel_x, pel_y, and pel_z are numpy arrays of floats and vsk values are floats
(np.array([1.0, 0.0, -3.0], dtype='float'), np.array([-5.0, -3.0, -6.0], dtype='float'),
np.array([4.0, -1.0, 2.0], dtype='float'), np.array([3.0, 8.0, 2.0], dtype='float'),
{'MeanLegLength': 15.0, 'R_AsisToTrocanterMeasure': -24.0, 'L_AsisToTrocanterMeasure': -7.0, 'InterAsisDistance': 11},
[[81.76345582, 89.67607691, 124.73321758], [-76.79709552, 107.19186562, -17.60160178]])])
def test_hipJointCenter(self, pel_origin, pel_x, pel_y, pel_z, vsk, expected):
"""
This test provides coverage of the hipJointCenter function in pycgmStatic.py, defined as hipJointCenter(frame, pel_origin, pel_x, pel_y, pel_z, vsk)
This test takes 6 parameters:
pel_origin: array of x,y,z position of origin of the pelvis
pel_x: array of x,y,z position of x-axis of the pelvis
pel_y: array of x,y,z position of y-axis of the pelvis
pel_z: array of x,y,z position of z-axis of the pelvis
vsk: dictionary containing subject measurements from a VSK file
expected: the expected result from calling hipJointCenter on pel_origin, pel_x, pel_y, pel_z, and vsk
This test is checking to make sure the hip joint center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added. Any
parameter that is added should change the value of every value in expected.
The hip joint center axis and origin are calculated using the Hip Joint Center Calculation (ref. Davis_1991).
Lastly, it checks that the resulting output is correct when pel_origin, pel_x, pel_y, and pel_z are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats and vsk values are ints or floats.
"""
result = pycgmStatic.hipJointCenter(None, pel_origin, pel_x, pel_y, pel_z, vsk)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["l_hip_jc", "r_hip_jc", "pelvis_axis", "expected"], [
# Test from running sample data
([182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061],
[np.array([251.60830688, 391.74131775, 1032.89349365]), np.array([[251.74063624, 392.72694721, 1032.78850073], [250.61711554, 391.87232862, 1032.8741063], [251.60295336, 391.84795134, 1033.88777762]]), np.array([231.57849121, 210.25262451, 1052.24969482])],
[[245.47574167208043, 331.1178713574418, 936.7593959314677], [[245.60807102843359, 332.10350081526684, 936.6544030111602], [244.48455032769033, 331.2488822330648, 936.7400085831541], [245.47038814489719, 331.22450494659665, 937.7536799036861]]]),
# Basic test with zeros for all params
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[0, 0, 0], [0, 0, 0], [0, 0, 0]]]),
# Testing when values are added to l_hip_jc
([1, -3, 2], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0.5, -1.5, 1], [[0.5, -1.5, 1], [0.5, -1.5, 1], [0.5, -1.5, 1]]]),
# Testing when values are added to r_hip_jc
([0, 0, 0], [-8, 1, 4],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[-4, 0.5, 2], [[-4, 0.5, 2], [-4, 0.5, 2], [-4, 0.5, 2]]]),
# Testing when values are added to l_hip_jc and r_hip_jc
([8, -3, 7], [5, -2, -1],
[np.array([0, 0, 0]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[6.5, -2.5, 3], [[6.5, -2.5, 3], [6.5, -2.5, 3], [6.5, -2.5, 3]]]),
# Testing when values are added to pelvis_axis[0]
([0, 0, 0], [0, 0, 0],
[np.array([1, -3, 6]), np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 3, -6], [-1, 3, -6], [-1, 3, -6]]]),
# Testing when values are added to pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([0, 0, 0]), np.array([[1, 0, 5], [-2, -7, -3], [9, -2, 7]]), np.array(rand_coor)],
[[0, 0, 0], [[1, 0, 5], [-2, -7, -3], [9, -2, 7]]]),
# Testing when values are added to pelvis_axis[0] and pelvis_axis[1]
([0, 0, 0], [0, 0, 0],
[np.array([-3, 0, 5]), np.array([[-4, 5, -2], [0, 0, 0], [8, 5, -1]]), np.array(rand_coor)],
[[0, 0, 0], [[-1, 5, -7], [3, 0, -5], [11, 5, -6]]]),
# Testing when values are added to all params
([-5, 3, 8], [-3, -7, -1],
[np.array([6, 3, 9]), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]]), np.array(rand_coor)],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of ints
([-5, 3, 8], [-3, -7, -1],
[[6, 3, 9], [[5, 4, -2], [0, 0, 0], [7, 2, 3]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of ints
(np.array([-5, 3, 8], dtype='int'), np.array([-3, -7, -1], dtype='int'),
[np.array([6, 3, 9], dtype='int'), np.array([[5, 4, -2], [0, 0, 0], [7, 2, 3]], dtype='int'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of lists of floats
([-5.0, 3.0, 8.0], [-3.0, -7.0, -1.0],
[[6.0, 3.0, 9.0], [[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]]),
# Testing that when l_hip_jc, r_hip_jc, and pelvis_axis are composed of numpy arrays of floats
(np.array([-5.0, 3.0, 8.0], dtype='float'), np.array([-3.0, -7.0, -1.0], dtype='float'),
[np.array([6.0, 3.0, 9.0], dtype='float'),
np.array([[5.0, 4.0, -2.0], [0.0, 0.0, 0.0], [7.0, 2.0, 3.0]], dtype='float'), rand_coor],
[[-4, -2, 3.5], [[-5, -1, -7.5], [-10, -5, -5.5], [-3, -3, -2.5]]])])
def test_hipAxisCenter(self, l_hip_jc, r_hip_jc, pelvis_axis, expected):
"""
This test provides coverage of the hipAxisCenter function in pycgmStatic.py, defined as hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
This test takes 4 parameters:
l_hip_jc: array of left hip joint center x,y,z position
r_hip_jc: array of right hip joint center x,y,z position
pelvis_axis: array of pelvis origin and axis
expected: the expected result from calling hipAxisCenter on l_hip_jc, r_hip_jc, and pelvis_axis
This test is checking to make sure the hip axis center is calculated correctly given the input parameters.
The test checks to see that the correct values in expected are updated per each input parameter added:
When values are added to l_hip_jc or r_hip_jc, every value in expected should be updated
When values are added to pelvis_axis, expected[1] should be updated
The hip axis center is calculated using the midpoint of the right and left hip joint centers.
Then, the given pelvis_axis variable is converted into x,y,z axis format.
The pelvis axis is then translated to the shared hip center by calculating the sum of:
pelvis_axis axis component + hip_axis_center axis component
Lastly, it checks that the resulting output is correct when l_hip_jc, r_hip_jc, and pelvis_axis are composed of
lists of ints, numpy arrays of ints, lists of floats, and numpy arrays of floats.
"""
result = pycgmStatic.hipAxisCenter(l_hip_jc, r_hip_jc, pelvis_axis)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "hip_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTHI': np.array([426.50338745, 262.65310669, 673.66247559]),
'LTHI': np.array([51.93867874, 320.01849365, 723.03186035]),
'RKNE': np.array([416.98687744, 266.22558594, 524.04089355]),
'LKNE': np.array([84.62355804, 286.69122314, 529.39819336])},
[[182.57097863, 339.43231855, 935.52900126], [308.38050472, 322.80342417, 937.98979061]],
{'RightKneeWidth': 105.0, 'LeftKneeWidth': 105.0},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753])],
[[[426.50338745, 262.65310669, 673.66247559], [308.38050472, 322.80342417, 937.98979061], [416.98687744, 266.22558594, 524.04089355], 59.5],
[[51.93867874, 320.01849365, 723.03186035], [182.57097863, 339.43231855, 935.52900126], [84.62355804, 286.69122314, 529.39819336], 59.5]],
[np.array([364.17774614, 292.17051722, 515.19181496]),
np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[[364.61959153, 293.06758353, 515.18513093], [363.29019771, 292.60656648, 515.04309095], [364.04724541, 292.24216264, 516.18067112]],
[[143.65611282, 280.88685896, 524.63197541], [142.56434499, 280.01777943, 524.86163553], [143.64837987, 280.04650381, 525.76940383]]])]),
# Test with zeros for all params
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to frame
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [0, 0, 0], [8, -4, 5], 7.0], [[-1, 0, 8], [0, 0, 0], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to hip_JC
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [1, -9, 2], [0, 0, 0], 7.0], [[0, 0, 0], [-8, 8, -2], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, [0.10783277, -0.97049496, 0.21566555]],
[nan_3d, nan_3d, [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to vsk
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 11.5], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]])]),
# Testing when values are added to mockReturnVal
({'RTHI': np.array([0, 0, 0]), 'LTHI': np.array([0, 0, 0]), 'RKNE': np.array([0, 0, 0]), 'LKNE': np.array([0, 0, 0])},
[[0, 0, 0], [0, 0, 0]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0], [[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[nan_3d, nan_3d, [-4.56314797, -4.56314797, -8.21366635]],
[nan_3d, nan_3d, [2.64143142, -5.28286283, -4.4023857]]])]),
# Testing when values are added to frame and hip_JC
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 0.0, 'LeftKneeWidth': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 7.0], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736, 0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, and vsk
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]),
'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[[-0.0512465, -0.22206816, -0.97368348], [0.99284736 ,0.09394289, -0.07368069], [0.10783277, -0.97049496, 0.21566555]],
[[-0.68318699, -0.71734633, -0.1366374 ], [-0.22001604, 0.02378552, 0.97520623], [-0.69631062, 0.69631062, -0.17407766]]])]),
# Testing when values are added to frame, hip_JC, vsk, and mockReturnVal
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8, 8, -2], [1, -9, 2]],
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of ints and vsk values are ints
({'RTHI': np.array([1, 2, 4], dtype='int'), 'LTHI': np.array([-1, 0, 8], dtype='int'),
'RKNE': np.array([8, -4, 5], dtype='int'), 'LKNE': np.array([8, -8, 5], dtype='int')},
np.array([[-8, 8, -2], [1, -9, 2]], dtype='int'),
{'RightKneeWidth': 9, 'LeftKneeWidth': -6},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of lists of floats and vsk values are floats
({'RTHI': np.array([1, 2, 4]), 'LTHI': np.array([-1, 0, 8]), 'RKNE': np.array([8, -4, 5]), 'LKNE': np.array([8, -8, 5])},
[[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]],
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])]),
# Testing that when hip_JC is composed of numpy arrays of floats and vsk values are floats
({'RTHI': np.array([1.0, 2.0, 4.0], dtype='float'), 'LTHI': np.array([-1.0, 0.0, 8.0], dtype='float'),
'RKNE': np.array([8.0, -4.0, 5.0], dtype='float'), 'LKNE': np.array([8.0, -8.0, 5.0], dtype='float')},
np.array([[-8.0, 8.0, -2.0], [1.0, -9.0, 2.0]], dtype='int'),
{'RightKneeWidth': 9.0, 'LeftKneeWidth': -6.0},
[np.array([-5, -5, -9]), np.array([3, -6, -5])],
[[[1, 2, 4], [1, -9, 2], [8, -4, 5], 11.5], [[-1, 0, 8], [-8, 8, -2], [8, -8, 5], 4.0]],
[np.array([-5, -5, -9]), np.array([3, -6, -5]),
np.array([[[-5.65539698, -5.75053525, -8.91543265], [-4.39803462, -5.58669523, -9.54168847], [-4.54382845, -5.30411437, -8.16368549]],
[[2.57620655, -6.14126448, -5.89467506], [2.32975119, -6.6154814, -4.58533245], [2.39076635, -5.22461171, -4.83384537]]])])])
def test_kneeJointCenter(self, frame, hip_JC, vsk, mockReturnVal, expectedMockArgs, expected):
"""
This test provides coverage of the kneeJointCenter function in pycgmStatic.py, defined as kneeJointCenter(frame, hip_JC, delta, vsk)
This test takes 6 parameters:
frame: dictionary of marker lists
hip_JC: array of hip_JC containing the x,y,z axes marker positions of the hip joint center
vsk: dictionary containing subject measurements from a VSK file
mockReturnVal: the value to be returned by the mock for findJointC
expectedMockArgs: the expected arguments used to call the mocked function, findJointC
expected: the expected result from calling kneeJointCenter on frame, hip_JC, vsk, and mockReturnVal
This test is checking to make sure the knee joint center and axis are calculated correctly given the input
parameters. This tests mocks findJointC to make sure the correct parameters are being passed into it given the
parameters passed into kneeJointCenter, and to also ensure that kneeJointCenter returns the correct value considering
the return value of findJointC, mockReturnVal.
For each direction (L or R) D, the D knee joint center is calculated using DTHI, D hip joint center, and
DKNE in the Rodriques' rotation formula. The knee width for each knee is applied after the rotation in the formula as well.
Each knee joint center and the RKNE / LKNE markers are used in the Knee Axis Calculation
(ref. Clinical Gait Analysis hand book, Baker2013) calculation formula.
Lastly, it checks that the resulting output is correct when hip_JC is composed of lists of ints, numpy arrays of
ints, lists of floats, and numpy arrays of floats and vsk values are ints and floats. The values in frame were
kept as numpy arrays as lists would cause an error in the following lines of pycgmStatic.py as lists cannot
be subtracted
by each other:
thi_kne_R = RTHI-RKNE
thi_kne_L = LTHI-LKNE
"""
with patch.object(pycgmStatic, 'findJointC', side_effect=mockReturnVal) as mock_findJointC:
result = pycgmStatic.kneeJointCenter(frame, hip_JC, None, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expectedMockArgs[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expectedMockArgs[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
np.testing.assert_almost_equal(result[2], expected[2], rounding_precision)
@pytest.mark.parametrize(["frame", "knee_JC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RTIB': np.array([433.97537231, 211.93408203, 273.3008728 ]), 'LTIB': np.array([50.04016495, 235.90718079, 364.32226562]),
'RANK': np.array([422.77005005, 217.74053955, 92.86152649]), 'LANK': np.array([58.57380676, 208.54806519, 86.16953278])},
[np.array([364.17774614, 292.17051722, 515.19181496]), np.array([143.55478579, 279.90370346, 524.78408753]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 70.0, 'LeftAnkleWidth': 70.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816])],
[[[433.97537231, 211.93408203, 273.3008728 ], [364.17774614, 292.17051722, 515.19181496], [422.77005005, 217.74053955, 92.86152649], 42.0],
[[50.04016495, 235.90718079, 364.32226562], [143.55478579, 279.90370346, 524.78408753], [58.57380676, 208.54806519, 86.16953278], 42.0]],
[np.array([393.76181608, 247.67829633, 87.73775041]), np.array([98.74901939, 219.46930221, 80.6306816]),
[[np.array([394.48171575, 248.37201348, 87.715368]),
np.array([393.07114384, 248.39110006, 87.61575574]),
np.array([393.69314056, 247.78157916, 88.73002876])],
[np.array([98.47494966, 220.42553803, 80.52821783]),
np.array([97.79246671, 219.20927275, 80.76255901]),
np.array([98.84848169, 219.60345781, 81.61663775])]]]),
# Test with zeros for all params
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to frame
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]),
'LANK': np.array([2, -4, -5])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [0, 0, 0], [1, 0, -5], 7.0],
[[0, 2, -1], [0, 0, 0], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to knee_JC
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [-7, 1, 2], [0, 0, 0], 7.0],
[[0, 0, 0], [9, -8, 9], [0, 0, 0], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array(nan_3d), np.array(nan_3d), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to vsk
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], -12.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)],
[np.array(nan_3d), np.array(nan_3d), np.array(nan_3d)]]]),
# Testing when values are added to mockReturnVal
({'RTIB': np.array([0, 0, 0]), 'LTIB': np.array([0, 0, 0]), 'RANK': np.array([0, 0, 0]), 'LANK': np.array([0, 0, 0])},
[np.array([0, 0, 0]), np.array([0, 0, 0]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], 7.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array(nan_3d), np.array(nan_3d), np.array([1.7018576 , -4.25464401, 3.40371521])],
[np.array(nan_3d), np.array(nan_3d), np.array([7.07001889, -2.65125708, 0.88375236])]]]),
# Testing when values are added to frame and knee_JC
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': 0.0, 'LeftAnkleWidth': 0.0, 'RightTibialTorsion': 0.0, 'LeftTibialTorsion': 0.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], 7.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 7.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.26726124, -0.80178373, -0.53452248]), np.array([0.14547859, -0.58191437, 0.80013226]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.79317435, 0.49803971, -0.35047239]), np.array([-0.11165737, 0.68466825, 0.72025136]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, and vsk
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([0, 0, 0]), np.array([0, 0, 0]),
[[np.array([-0.30428137, -0.41913816, -0.85541572]), np.array([-0.00233238, -0.89766624, 0.4406698]), np.array([-0.95257934, 0.13608276, 0.27216553])],
[np.array([0.7477279, 0.63929183, -0.1794685]), np.array([-0.287221, 0.55508569, 0.7806305]), np.array([0.59867109, -0.53215208, 0.59867109])]]]),
# Testing when values are added to frame, knee_JC, vsk and mockReturnVal
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[np.array([-7, 1, 2]), np.array([9, -8, 9]),
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38.0, 'LeftAnkleWidth': 18.0, 'RightTibialTorsion': 29.0, 'LeftTibialTorsion': -13.0},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997 ]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of lists of ints and vsk values are ints
({'RTIB': np.array([-9, 6, -9]), 'LTIB': np.array([0, 2, -1]), 'RANK': np.array([1, 0, -5]), 'LANK': np.array([2, -4, -5])},
[[-7, 1, 2], [9, -8, 9],
np.array([[rand_coor, rand_coor, rand_coor], [rand_coor, rand_coor, rand_coor]])],
{'RightAnkleWidth': -38, 'LeftAnkleWidth': 18, 'RightTibialTorsion': 29, 'LeftTibialTorsion': -13},
[np.array([2, -5, 4]), np.array([8, -3, 1])],
[[[-9, 6, -9], [-7, 1, 2], [1, 0, -5], -12.0],
[[0, 2, -1], [9, -8, 9], [2, -4, -5], 16.0]],
[np.array([2, -5, 4]), np.array([8, -3, 1]),
[[np.array([1.48891678, -5.83482493, 3.7953997]), np.array([1.73661348, -5.07447603, 4.96181124]), np.array([1.18181818, -4.45454545, 3.81818182])],
[np.array([8.87317138, -2.54514024, 1.17514093]), np.array([7.52412119, -2.28213872, 1.50814815]), np.array([8.10540926, -3.52704628, 1.84327404])]]]),
# Testing that when knee_JC is composed of numpy arrays of ints and vsk values are ints
({'RTIB':
|
np.array([-9, 6, -9])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create the data for entailment
"""
import os
import sys
import argparse
import numpy as np
import h5py
import itertools
from collections import defaultdict
class Indexer:
def __init__(self, symbols = ["<blank>","<unk>","<s>","</s>"]):
self.vocab = defaultdict(int)
self.PAD = symbols[0]
self.UNK = symbols[1]
self.BOS = symbols[2]
self.EOS = symbols[3]
self.d = {self.PAD: 1, self.UNK: 2, self.BOS: 3, self.EOS: 4}
def add_w(self, ws):
for w in ws:
if w not in self.d:
self.d[w] = len(self.d) + 1
def convert(self, w):
return self.d[w] if w in self.d else self.d['<oov' + str(np.random.randint(1,100)) + '>']
def convert_sequence(self, ls):
return [self.convert(l) for l in ls]
def clean(self, s):
s = s.replace(self.PAD, "")
s = s.replace(self.BOS, "")
s = s.replace(self.EOS, "")
return s
def write(self, outfile):
out = open(outfile, "w")
items = [(v, k) for k, v in self.d.iteritems()]
items.sort()
for v, k in items:
print >>out, k, v
out.close()
def prune_vocab(self, k, cnt=False):
vocab_list = [(word, count) for word, count in self.vocab.iteritems()]
if cnt:
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list if pair[1] > k}
else:
vocab_list.sort(key = lambda x: x[1], reverse=True)
k = min(k, len(vocab_list))
self.pruned_vocab = {pair[0]:pair[1] for pair in vocab_list[:k]}
for word in self.pruned_vocab:
if word not in self.d:
self.d[word] = len(self.d) + 1
def load_vocab(self, vocab_file):
self.d = {}
for line in open(vocab_file, 'r'):
v, k = line.strip().split()
self.d[v] = int(k)
def pad(ls, length, symbol, pad_back = True):
if len(ls) >= length:
return ls[:length]
if pad_back:
return ls + [symbol] * (length -len(ls))
else:
return [symbol] * (length -len(ls)) + ls
def get_glove_words(f):
glove_words = set()
for line in open(f, "r"):
word = line.split()[0].strip()
glove_words.add(word)
return glove_words
def get_data(args):
word_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
label_indexer = Indexer(["<blank>","<unk>","<s>","</s>"])
label_indexer.d = {}
glove_vocab = get_glove_words(args.glove)
for i in range(1,101): #hash oov words to one of 100 random embeddings, per Parikh et al. 2016
oov_word = '<oov'+ str(i) + '>'
word_indexer.vocab[oov_word] += 1
def make_vocab(srcfile, targetfile, labelfile, seqlength):
num_sents = 0
for _, (src_orig, targ_orig, label_orig) in \
enumerate(itertools.izip(open(srcfile,'r'),
open(targetfile,'r'), open(labelfile, 'r'))):
src_orig = word_indexer.clean(src_orig.strip())
targ_orig = word_indexer.clean(targ_orig.strip())
targ = targ_orig.strip().split()
src = src_orig.strip().split()
label = label_orig.strip().split()
if len(targ) > seqlength or len(src) > seqlength or len(targ) < 1 or len(src) < 1:
continue
num_sents += 1
for word in targ:
if word in glove_vocab:
word_indexer.vocab[word] += 1
for word in src:
if word in glove_vocab:
word_indexer.vocab[word] += 1
for word in label:
label_indexer.vocab[word] += 1
return num_sents
def convert(srcfile, targetfile, labelfile, batchsize, seqlength, outfile, num_sents,
max_sent_l=0, shuffle=0):
newseqlength = seqlength + 1 #add 1 for BOS
targets = np.zeros((num_sents, newseqlength), dtype=int)
sources = np.zeros((num_sents, newseqlength), dtype=int)
labels = np.zeros((num_sents,), dtype =int)
source_lengths = np.zeros((num_sents,), dtype=int)
target_lengths = np.zeros((num_sents,), dtype=int)
both_lengths = np.zeros(num_sents, dtype = {'names': ['x','y'], 'formats': ['i4', 'i4']})
dropped = 0
sent_id = 0
for _, (src_orig, targ_orig, label_orig) in \
enumerate(itertools.izip(open(srcfile,'r'), open(targetfile,'r')
,open(labelfile,'r'))):
src_orig = word_indexer.clean(src_orig.strip())
targ_orig = word_indexer.clean(targ_orig.strip())
targ = [word_indexer.BOS] + targ_orig.strip().split()
src = [word_indexer.BOS] + src_orig.strip().split()
label = label_orig.strip().split()
max_sent_l = max(len(targ), len(src), max_sent_l)
if len(targ) > newseqlength or len(src) > newseqlength or len(targ) < 2 or len(src) < 2:
dropped += 1
continue
targ = pad(targ, newseqlength, word_indexer.PAD)
targ = word_indexer.convert_sequence(targ)
targ = np.array(targ, dtype=int)
src = pad(src, newseqlength, word_indexer.PAD)
src = word_indexer.convert_sequence(src)
src = np.array(src, dtype=int)
targets[sent_id] = np.array(targ,dtype=int)
target_lengths[sent_id] = (targets[sent_id] != 1).sum()
sources[sent_id] = np.array(src, dtype=int)
source_lengths[sent_id] = (sources[sent_id] != 1).sum()
labels[sent_id] = label_indexer.d[label[0]]
both_lengths[sent_id] = (source_lengths[sent_id], target_lengths[sent_id])
sent_id += 1
if sent_id % 100000 == 0:
print("{}/{} sentences processed".format(sent_id, num_sents))
print(sent_id, num_sents)
if shuffle == 1:
rand_idx = np.random.permutation(sent_id)
targets = targets[rand_idx]
sources = sources[rand_idx]
source_lengths = source_lengths[rand_idx]
target_lengths = target_lengths[rand_idx]
labels = labels[rand_idx]
both_lengths = both_lengths[rand_idx]
#break up batches based on source/target lengths
source_lengths = source_lengths[:sent_id]
source_sort = np.argsort(source_lengths)
both_lengths = both_lengths[:sent_id]
sorted_lengths = np.argsort(both_lengths, order = ('x', 'y'))
sources = sources[sorted_lengths]
targets = targets[sorted_lengths]
labels = labels[sorted_lengths]
target_l = target_lengths[sorted_lengths]
source_l = source_lengths[sorted_lengths]
curr_l_src = 0
curr_l_targ = 0
l_location = [] #idx where sent length changes
for j,i in enumerate(sorted_lengths):
if source_lengths[i] > curr_l_src or target_lengths[i] > curr_l_targ:
curr_l_src = source_lengths[i]
curr_l_targ = target_lengths[i]
l_location.append(j+1)
l_location.append(len(sources))
#get batch sizes
curr_idx = 1
batch_idx = [1]
batch_l = []
target_l_new = []
source_l_new = []
for i in range(len(l_location)-1):
while curr_idx < l_location[i+1]:
curr_idx = min(curr_idx + batchsize, l_location[i+1])
batch_idx.append(curr_idx)
for i in range(len(batch_idx)-1):
batch_l.append(batch_idx[i+1] - batch_idx[i])
source_l_new.append(source_l[batch_idx[i]-1])
target_l_new.append(target_l[batch_idx[i]-1])
# Write output
f = h5py.File(outfile, "w")
f["source"] = sources
f["target"] = targets
f["target_l"] = np.array(target_l_new, dtype=int)
f["source_l"] =
|
np.array(source_l_new, dtype=int)
|
numpy.array
|
# Copyright (C) 2021-2022, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Dict, List, Optional, Tuple
import cv2
import numpy as np
from scipy.optimize import linear_sum_assignment
from unidecode import unidecode
__all__ = ['TextMatch', 'box_iou', 'box_ioa', 'mask_iou', 'polygon_iou',
'nms', 'LocalizationConfusion', 'OCRMetric', 'DetectionMetric']
def string_match(word1: str, word2: str) -> Tuple[bool, bool, bool, bool]:
"""Performs string comparison with multiple levels of tolerance
Args:
word1: a string
word2: another string
Returns:
a tuple with booleans specifying respectively whether the raw strings, their lower-case counterparts, their
unidecode counterparts and their lower-case unidecode counterparts match
"""
raw_match = (word1 == word2)
caseless_match = (word1.lower() == word2.lower())
unidecode_match = (unidecode(word1) == unidecode(word2))
# Warning: the order is important here otherwise the pair ("EUR", "€") cannot be matched
unicase_match = (unidecode(word1).lower() == unidecode(word2).lower())
return raw_match, caseless_match, unidecode_match, unicase_match
class TextMatch:
r"""Implements text match metric (word-level accuracy) for recognition task.
The raw aggregated metric is computed as follows:
.. math::
\forall X, Y \in \mathcal{W}^N,
TextMatch(X, Y) = \frac{1}{N} \sum\limits_{i=1}^N f_{Y_i}(X_i)
with the indicator function :math:`f_{a}` defined as:
.. math::
\forall a, x \in \mathcal{W},
f_a(x) = \left\{
\begin{array}{ll}
1 & \mbox{if } x = a \\
0 & \mbox{otherwise.}
\end{array}
\right.
where :math:`\mathcal{W}` is the set of all possible character sequences,
:math:`N` is a strictly positive integer.
Example::
>>> from doctr.utils import TextMatch
>>> metric = TextMatch()
>>> metric.update(['Hello', 'world'], ['hello', 'world'])
>>> metric.summary()
"""
def __init__(self) -> None:
self.reset()
def update(
self,
gt: List[str],
pred: List[str],
) -> None:
"""Update the state of the metric with new predictions
Args:
gt: list of groung-truth character sequences
pred: list of predicted character sequences
"""
if len(gt) != len(pred):
raise AssertionError("prediction size does not match with ground-truth labels size")
for gt_word, pred_word in zip(gt, pred):
_raw, _caseless, _unidecode, _unicase = string_match(gt_word, pred_word)
self.raw += int(_raw)
self.caseless += int(_caseless)
self.unidecode += int(_unidecode)
self.unicase += int(_unicase)
self.total += len(gt)
def summary(self) -> Dict[str, float]:
"""Computes the aggregated metrics
Returns:
a dictionary with the exact match score for the raw data, its lower-case counterpart, its unidecode
counterpart and its lower-case unidecode counterpart
"""
if self.total == 0:
raise AssertionError("you need to update the metric before getting the summary")
return dict(
raw=self.raw / self.total,
caseless=self.caseless / self.total,
unidecode=self.unidecode / self.total,
unicase=self.unicase / self.total,
)
def reset(self) -> None:
self.raw = 0
self.caseless = 0
self.unidecode = 0
self.unicase = 0
self.total = 0
def box_iou(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
"""Computes the IoU between two sets of bounding boxes
Args:
boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoU matrix of shape (N, M)
"""
iou_mat = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)
if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:
l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)
l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
union = (r1 - l1) * (b1 - t1) + ((r2 - l2) * (b2 - t2)).T - intersection
iou_mat = intersection / union
return iou_mat
def box_ioa(boxes_1: np.ndarray, boxes_2: np.ndarray) -> np.ndarray:
"""Computes the IoA (intersection over area) between two sets of bounding boxes:
ioa(i, j) = inter(i, j) / area(i)
Args:
boxes_1: bounding boxes of shape (N, 4) in format (xmin, ymin, xmax, ymax)
boxes_2: bounding boxes of shape (M, 4) in format (xmin, ymin, xmax, ymax)
Returns:
the IoA matrix of shape (N, M)
"""
ioa_mat = np.zeros((boxes_1.shape[0], boxes_2.shape[0]), dtype=np.float32)
if boxes_1.shape[0] > 0 and boxes_2.shape[0] > 0:
l1, t1, r1, b1 = np.split(boxes_1, 4, axis=1)
l2, t2, r2, b2 = np.split(boxes_2, 4, axis=1)
left = np.maximum(l1, l2.T)
top = np.maximum(t1, t2.T)
right = np.minimum(r1, r2.T)
bot = np.minimum(b1, b2.T)
intersection = np.clip(right - left, 0, np.Inf) * np.clip(bot - top, 0, np.Inf)
area = (r1 - l1) * (b1 - t1)
ioa_mat = intersection / area
return ioa_mat
def mask_iou(masks_1: np.ndarray, masks_2: np.ndarray) -> np.ndarray:
"""Computes the IoU between two sets of boolean masks
Args:
masks_1: boolean masks of shape (N, H, W)
masks_2: boolean masks of shape (M, H, W)
Returns:
the IoU matrix of shape (N, M)
"""
if masks_1.shape[1:] != masks_2.shape[1:]:
raise AssertionError("both boolean masks should have the same spatial shape")
iou_mat = np.zeros((masks_1.shape[0], masks_2.shape[0]), dtype=np.float32)
if masks_1.shape[0] > 0 and masks_2.shape[0] > 0:
axes = tuple(range(2, masks_1.ndim + 1))
intersection = np.logical_and(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)
union = np.logical_or(masks_1[:, None, ...], masks_2[None, ...]).sum(axis=axes)
iou_mat = intersection / union
return iou_mat
def polygon_iou(
polys_1: np.ndarray,
polys_2: np.ndarray,
mask_shape: Tuple[int, int],
use_broadcasting: bool = False
) -> np.ndarray:
"""Computes the IoU between two sets of rotated bounding boxes
Args:
polys_1: rotated bounding boxes of shape (N, 4, 2)
polys_2: rotated bounding boxes of shape (M, 4, 2)
mask_shape: spatial shape of the intermediate masks
use_broadcasting: if set to True, leverage broadcasting speedup by consuming more memory
Returns:
the IoU matrix of shape (N, M)
"""
if polys_1.ndim != 3 or polys_2.ndim != 3:
raise AssertionError("expects boxes to be in format (N, 4, 2)")
iou_mat = np.zeros((polys_1.shape[0], polys_2.shape[0]), dtype=np.float32)
if polys_1.shape[0] > 0 and polys_2.shape[0] > 0:
if use_broadcasting:
masks_1 = rbox_to_mask(polys_1, shape=mask_shape)
masks_2 = rbox_to_mask(polys_2, shape=mask_shape)
iou_mat = mask_iou(masks_1, masks_2)
else:
# Save memory by doing the computation for each pair
for idx, b1 in enumerate(polys_1):
m1 = _rbox_to_mask(b1, mask_shape)
for _idx, b2 in enumerate(polys_2):
m2 = _rbox_to_mask(b2, mask_shape)
iou_mat[idx, _idx] = np.logical_and(m1, m2).sum() / np.logical_or(m1, m2).sum()
return iou_mat
def _rbox_to_mask(box: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
"""Converts a rotated bounding box to a boolean mask
Args:
box: rotated bounding box of shape (4, 2)
shape: spatial shapes of the output masks
Returns:
the boolean mask of the specified shape
"""
mask = np.zeros(shape, dtype=np.uint8)
# Get absolute coords
if box.dtype != int:
abs_box = box.copy()
abs_box[:, 0] = abs_box[:, 0] * shape[1]
abs_box[:, 1] = abs_box[:, 1] * shape[0]
abs_box = abs_box.round().astype(int)
else:
abs_box = box
abs_box[2:] = abs_box[2:] + 1
cv2.fillPoly(mask, [abs_box - 1], 1)
return mask.astype(bool)
def rbox_to_mask(boxes: np.ndarray, shape: Tuple[int, int]) -> np.ndarray:
"""Converts rotated bounding boxes to boolean masks
Args:
boxes: rotated bounding boxes of shape (N, 4, 2)
shape: spatial shapes of the output masks
Returns:
the boolean masks of shape (N, H, W)
"""
masks = np.zeros((boxes.shape[0], *shape), dtype=np.uint8)
if boxes.shape[0] > 0:
# Get absolute coordinates
if boxes.dtype != np.int:
abs_boxes = boxes.copy()
abs_boxes[:, :, 0] = abs_boxes[:, :, 0] * shape[1]
abs_boxes[:, :, 1] = abs_boxes[:, :, 1] * shape[0]
abs_boxes = abs_boxes.round().astype(np.int)
else:
abs_boxes = boxes
abs_boxes[:, 2:] = abs_boxes[:, 2:] + 1
# TODO: optimize slicing to improve vectorization
for idx, _box in enumerate(abs_boxes):
cv2.fillPoly(masks[idx], [_box - 1], 1)
return masks.astype(bool)
def nms(boxes: np.ndarray, thresh: float = .5) -> List[int]:
"""Perform non-max suppression, borrowed from <https://github.com/rbgirshick/fast-rcnn>`_.
Args:
boxes: np array of straight boxes: (*, 5), (xmin, ymin, xmax, ymax, score)
thresh: iou threshold to perform box suppression.
Returns:
A list of box indexes to keep
"""
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = boxes[:, 4]
areas = (x2 - x1) * (y2 - y1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 =
|
np.maximum(y1[i], y1[order[1:]])
|
numpy.maximum
|
import numpy as np
from .pointmap import PointMap, EyeToImagePointMap
PI = np.pi
MYFLOAT = np.float64
def solve_quadratic(a, b, c):
"""
b is b/2 and computed D is D/4
so solutions are (-b +/- sqrt(D))/a
the solutions with the lowest absolute
values are returned
a,b,c can be arrays
"""
D = b * b - a * c
# select the minimum of the 2 roots
# so the closest intersection point will be chosen
l12 = np.array([(-b + np.sqrt(D)) / a,
(-b - np.sqrt(D)) / a]).reshape(2, -1)
indxl = np.argmin(abs(l12), axis=0)
indyl = np.indices(indxl.shape)[0]
# return the solution(s) with the lowest absolute value
return l12[indxl, indyl]
class AlbersProjectionMap(PointMap):
""" https://en.wikipedia.org/wiki/Albers_projection
Compared to the entry of 8/21/2015
(numbers were chosen to have simpler formulas)
phi1 = 90, phi2 = 0, phi0 = 90, lambda0 = 90
lambda <- 2n (lambda - lambda0) (doubles the area)
=>
n = 0.5
rho0 = 0, C = 1, rho = 2sqrt(1-sin(phi))
"""
def __init__(self, r, eulerangles=None):
""" r: radious of the sphere from which the projection is made
"""
self._r = r
self.rotationmap = EulerAnglesMap(eulerangles)
self.x_rotationmap = EulerAnglesMap(eulerangles=[0, np.pi / 2, 0])
def map(self, elev, azim):
""" Returns (nan, nan) if point cannot be mapped else (x, y)
arguments can be numpy arrays or scalars
"""
elev, azim = self.rotationmap.invmap(elev, azim)
elev, azim = self.x_rotationmap.invmap(elev, azim)
rxy = self._r * np.sqrt(1 - np.sin(elev))
x = -rxy * np.cos(azim)
y = -rxy * np.sin(azim)
return (x, y)
def invmap(self, x, y):
""" Returns (nan, nan) if point cannot be mapped else
(elevation, azimuth)
arguments can be numpy arrays or scalars
"""
r = self._r
rxy = np.sqrt(x * x + y * y)
elev = np.arcsin(1 - (rxy / r)**2)
azim = np.arctan2(-y, -x)
# this angle is nan if rxy = 0 but can be anything
try:
azim[
|
np.isnan(azim)
|
numpy.isnan
|
import numpy as np
import numpy.testing as npt
import pytest
from pyfar import Signal
def test_signal_init():
"""Test to init Signal without optional parameters."""
signal = Signal(np.array([1., 2., 3.]), 44100)
assert isinstance(signal, Signal)
def test_signal_init_list():
signal = Signal([1, 2, 3], 44100)
assert isinstance(signal, Signal)
def test_signal_init_default_parameter():
# using all defaults
signal = Signal([1, 2, 3], 44100)
assert signal.domain == 'time'
assert signal.fft_norm == 'none'
assert signal.comment == 'none'
assert signal.fft_norm == 'none'
def test_signal_comment():
signal = Signal([1, 2, 3], 44100, comment='Bla')
assert signal.comment == 'Bla'
signal.comment = 'Blub'
assert signal.comment == 'Blub'
def test_domain_getter_freq():
signal = Signal(np.array([1]), 44100)
signal._domain = 'freq'
assert signal.domain == 'freq'
def test_domain_getter_time():
signal = Signal(np.array([1]), 44100)
signal._domain = 'time'
assert signal.domain == 'time'
def test_domain_setter_error():
signal = Signal(np.array([1]), 44100)
with pytest.raises(ValueError, match='Incorrect domain'):
signal.domain = 'quark'
def test_domain_setter_freq_when_freq():
signal = Signal(np.array([1]), 44100)
domain = 'freq'
signal._domain = domain
signal.domain = domain
assert signal.domain == domain
def test_domain_setter_freq_when_time(sine_stub):
signal = Signal(
sine_stub.time, sine_stub.sampling_rate, domain='time',
fft_norm=sine_stub.fft_norm)
domain = 'freq'
signal.domain = domain
assert signal.domain == domain
npt.assert_allclose(
signal._data, sine_stub.freq, rtol=1e-10, atol=1e-10)
def test_domain_setter_time_when_time():
signal = Signal(np.array([1]), 44100)
domain = 'time'
signal._domain = domain
signal.domain = domain
assert signal.domain == domain
def test_domain_setter_time_when_freq(sine_stub):
signal = Signal(
sine_stub.freq, sine_stub.sampling_rate, domain='freq',
fft_norm=sine_stub.fft_norm)
domain = 'time'
signal.domain = domain
assert signal.domain == domain
npt.assert_allclose(
signal._data, sine_stub.time, atol=1e-10, rtol=1e-10)
def test_signal_init_val():
"""Test to init Signal with complete parameters."""
signal = Signal([1, 2, 3], 44100, domain='time', fft_norm='none')
assert isinstance(signal, Signal)
def test_n_samples():
"""Test for number of samples."""
signal = Signal([1, 2, 3], 44100, domain='time')
assert signal.n_samples == 3
def test_n_bins():
"""Test for number of freq bins."""
signal = Signal([1, 2, 3], 44100, domain='time')
assert signal.n_bins == 2
signal = Signal([1, 2, 3, 4], 44100, domain='time')
assert signal.n_bins == 3
def test_times():
"""Test for the time instances."""
signal = Signal([1, 2, 3, 4], 2, domain='time')
npt.assert_allclose(signal.times, [0., 0.5, 1., 1.5])
def test_getter_time():
"""Test if attribute time is accessed correctly."""
signal = Signal([1, 2, 3], 44100, domain='time')
signal._domain = 'time'
signal._data = np.array([[1., 2., 3.]])
npt.assert_allclose(signal.time, np.array([[1., 2., 3.]]))
def test_setter_time():
"""Test if attribute time is set correctly."""
signal = Signal([1, 2, 3], 44100, domain='time')
signal.time = np.array([1., 2., 3.])
assert signal._domain == 'time'
npt.assert_allclose(signal._data, np.array([[1., 2., 3.]]))
def test_getter_freq():
"""Test if attribute freq is accessed correctly."""
signal = Signal([1, 2, 3], 44100, fft_norm='rms')
signal._domain = 'freq'
signal._data = np.array([[1., 2., 3.]])
npt.assert_allclose(signal.freq, np.array([[1., 2., 3.]]))
def test_setter_freq():
"""Test if attribute freq is set correctly."""
signal = Signal([1, 2, 3], 44100, fft_norm='rms')
signal.freq = np.array([[1., 2., 3.]])
assert signal.domain == 'freq'
npt.assert_allclose(signal._data, np.array([[1., 2., 3.]]))
def test_re_setter_freq():
"""Test the warning for estimating the number of samples from n_bins."""
signal = Signal([1, 2, 3], 44100, domain='freq', n_samples=4)
with pytest.warns(UserWarning):
signal.freq = [1, 2, 3, 4]
def test_getter_sampling_rate():
"""Test if attribute sampling rate is accessed correctly."""
signal = Signal([1, 2, 3], 44100)
signal._sampling_rate = 1000
assert signal.sampling_rate == 1000
def test_setter_sampligrate():
"""Test if attribute sampling rate is set correctly."""
signal = Signal([1, 2, 3], 44100)
signal.sampling_rate = 1000
assert signal._sampling_rate == 1000
def test_getter_signal_type():
"""Test if attribute signal type is accessed correctly."""
signal = Signal([1, 2, 3], 44100, fft_norm='none')
npt.assert_string_equal(signal.signal_type, 'energy')
signal = Signal([1, 2, 3], 44100, fft_norm='rms')
npt.assert_string_equal(signal.signal_type, 'power')
def test_getter_fft_norm():
signal = Signal([1, 2, 3], 44100, fft_norm='psd')
assert signal.fft_norm == 'psd'
def test_setter_fft_norm():
spec_power_unitary = np.atleast_2d([1, 2, 1])
spec_power_amplitude = np.atleast_2d([1/4, 2/4, 1/4])
signal = Signal(
spec_power_unitary, 44100, n_samples=4, domain='freq',
fft_norm='unitary')
# changing the fft_norm also changes the spectrum
signal.fft_norm = 'amplitude'
assert signal.fft_norm == 'amplitude'
npt.assert_allclose(signal.freq, spec_power_amplitude, atol=1e-15)
# changing the fft norm in the time domain does not change the time data
signal.domain = 'time'
time_power_amplitude = signal._data.copy()
signal.fft_norm = 'unitary'
npt.assert_allclose(signal.time, time_power_amplitude)
npt.assert_allclose(signal.freq, spec_power_unitary)
# setting an invalid fft_norm
with pytest.raises(ValueError):
signal.fft_norm = 'bullshit'
def test_dtype():
"""Test for the getter of dtype."""
dtype = float
signal = Signal([1, 2, 3], 44100, dtype=dtype)
assert signal.dtype == dtype
def test_signal_length():
"""Test for the signal length."""
signal = Signal([1, 2, 3, 4], 2)
assert signal.signal_length == 1.5
def test_cshape():
"""Test the attribute cshape."""
time = np.arange(2 * 3 * 4).reshape((2, 3, 4))
signal = Signal(time, 44100)
assert signal.cshape == (2, 3)
def test_magic_getitem():
"""Test slicing operations by the magic function __getitem__."""
time = np.arange(2 * 3 * 4).reshape((2, 3, 4))
signal = Signal(time, 44100, domain='time')
npt.assert_allclose(signal[0]._data, time[0])
def test_magic_getitem_slice():
"""Test slicing operations by the magic function __getitem__."""
time = np.arange(2 * 3 * 4).reshape((2, 3, 4))
signal = Signal(time, 44100, domain='time')
npt.assert_allclose(signal[:1]._data, time[:1])
def test_magic_getitem_allslice():
"""Test slicing operations by the magic function __getitem__."""
time = np.arange(2 * 3 * 4).reshape((2, 3, 4))
signal = Signal(time, 44100, domain='time')
npt.assert_allclose(signal[:]._data, time[:])
def test_magic_setitem():
"""Test the magic function __setitem__."""
signal = Signal([1, 2, 3], 44100)
set_signal = Signal([2, 3, 4], 44100)
signal[0] = set_signal
npt.assert_allclose(signal._data, set_signal._data)
def test_magic_setitem_wrong_sr():
"""Test the magic function __setitem__."""
signal = Signal([1, 2, 3], 44100)
set_signal = Signal([1, 2, 3], 48000)
with pytest.raises(ValueError, match='sampling rates do not match'):
signal[0] = set_signal
def test_magic_setitem_wrong_norm():
"""Test the magic function __setitem__."""
signal = Signal([1, 2, 3], 44100, fft_norm='none')
set_signal = Signal([1, 2, 3], 44100, fft_norm='rms')
with pytest.raises(ValueError, match='FFT norms do not match'):
signal[0] = set_signal
def test_magic_setitem_wrong_n_samples():
"""Test the magic function __setitem__."""
signal = Signal([1, 2, 3, 4], 44100)
set_signal = Signal([1, 2, 3], 44100)
with pytest.raises(ValueError, match='number of samples does not match'):
signal[0] = set_signal
def test_magic_len():
"""Test the magic function __len__."""
signal = Signal([1, 2, 3], 44100)
assert len(signal) == 3
def test_find_nearest_time():
sampling_rate = 100
signal = Signal(np.zeros(100), sampling_rate)
actual = signal.find_nearest_time(0.5)
expected = 50
assert actual == expected
actual = signal.find_nearest_time([0.5, 0.75])
expected = [50, 75]
|
npt.assert_allclose(actual, expected)
|
numpy.testing.assert_allclose
|
#
# Copyright (c) 2021 The Markovflow Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module containing the integration tests for the `CVIGaussianProcess` class."""
import numpy as np
import tensorflow as tf
from gpflow.likelihoods import Gaussian
from markovflow.kernels import Matern12
from markovflow.models import (
GaussianProcessRegression,
SparseCVIGaussianProcess,
SparseVariationalGaussianProcess,
)
from markovflow.ssm_natgrad import SSMNaturalGradient
OUT_DIM = 1
LENGTH_SCALE = 2.0
VARIANCE = 2.25
NUM_DATA = 6
batch_shape = ()
output_dim = 1
def scvi_gpr_setup(num_inducing):
"""
Creates a GPR model and a matched Sparse VGP model (z=x),
and optimize the later (single step)
"""
time_points, observations, kernel, variance = _setup()
inducing_points = (
np.linspace(np.min(time_points.numpy()), np.max(time_points.numpy()), num_inducing) + 1e-10
)
chol_obs_covariance = tf.eye(output_dim, dtype=tf.float64) * tf.sqrt(variance)
input_data = (time_points, observations)
gpr = GaussianProcessRegression(
kernel=kernel,
input_data=input_data,
chol_obs_covariance=chol_obs_covariance,
mean_function=None,
)
likelihood = Gaussian(variance=variance)
scvi = SparseCVIGaussianProcess(
kernel=kernel,
inducing_points=tf.constant(inducing_points),
likelihood=likelihood,
learning_rate=1.0,
)
# do not train any hyper-parameters for these tests
for t in likelihood.trainable_variables + kernel.trainable_variables:
t._trainable = False
return scvi, gpr, (time_points, observations)
def _setup():
""" Data, kernel and likelihood setup """
time_points = np.linspace(0, 1, NUM_DATA)
observations = (np.cos(20.0 * time_points) + np.random.randn(*time_points.shape)).reshape(-1, 1)
time_points = tf.constant(time_points)
observations = tf.constant(observations)
kernel = Matern12(lengthscale=LENGTH_SCALE, variance=VARIANCE, output_dim=output_dim)
observation_noise = 1.0
variance = tf.constant(observation_noise, dtype=tf.float64)
return time_points, observations, kernel, variance
def test_scvi_unchanged_at_optimum(with_tf_random_seed):
"""Test that the update does not change sites at the optimum"""
scvi, _, data = scvi_gpr_setup(NUM_DATA)
scvi.update_sites(data)
with tf.GradientTape() as g:
g.watch(scvi.trainable_variables)
elbo = scvi.classic_elbo(data)
grad_elbo = g.gradient(elbo, scvi.trainable_variables)
for g in grad_elbo:
np.testing.assert_allclose(g, 0.0, atol=1e-9)
def test_optimal_sites(with_tf_random_seed):
"""Test that the optimal value of the exact sites match the true sites """
scvi, gpr, data = scvi_gpr_setup(NUM_DATA)
scvi.update_sites(data)
sd = scvi.kernel.state_dim
# for z = x, the sites are 2 sd x 2 sd but half empty
# one part must match the GPR site
scvi_nat1 = scvi.nat1.numpy()[:-1, sd:]
scvi_nat2 = scvi.nat2.numpy()[:-1, sd:, sd:]
# manually compute the optimal sites
s2 = gpr._chol_obs_covariance.numpy()
gpr_nat1 = gpr.observations / s2
gpr_nat2 = -0.5 / s2 * np.ones_like(scvi_nat2)
|
np.testing.assert_allclose(scvi_nat1, gpr_nat1)
|
numpy.testing.assert_allclose
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
import sklearn.metrics.pairwise as sk_metric
from pfbayes.common.distributions import KDE
import torch
from pfbayes.common.cmd_args import cmd_args
from numpy import linalg as LA
import pickle
import os
def square_mmd_fine(p_samples, q_samples, n_p, n_q, kernel_type):
"""
n_p: number of samples from true distribution p
assume n_p >> n_q
"""
kernel_dict = {
'gaussian': sk_metric.rbf_kernel,
'laplacian': sk_metric.laplacian_kernel,
'sigmoid': sk_metric.sigmoid_kernel,
'polynomial': sk_metric.polynomial_kernel,
'cosine': sk_metric.cosine_similarity,
}
kernel = kernel_dict[kernel_type]
p_samples = np.array(p_samples)
q_samples = np.array(q_samples)
k_xi_xj = kernel(p_samples, p_samples)
k_yi_yj = kernel(q_samples, q_samples)
k_xi_yj = kernel(p_samples, q_samples)
off_diag_k_xi_xj = (np.sum(k_xi_xj) - np.sum(np.diag(k_xi_xj))) / n_p / (n_p - 1)
off_diag_k_yi_yj = (np.sum(k_yi_yj) - np.sum(np.diag(k_yi_yj))) / n_q / (n_q - 1)
sum_k_xi_yj = np.sum(k_xi_yj) * 2 / n_p / n_q
return off_diag_k_xi_xj + off_diag_k_yi_yj - sum_k_xi_yj
def e_p_log_q(p_samples, q_samples):
q_samples = torch.Tensor(q_samples)
p_samples = torch.Tensor(p_samples)
kde = KDE(q_samples)
log_score = kde.log_pdf(p_samples)
return torch.mean(log_score)
class EvalMetric(object):
"""
using numpy
"""
def __init__(self, particles, true_mean, true_cov, dim, num_true_samples=None):
self.dim = dim
self.particles = np.array(particles).reshape(-1, dim)
self.n_particles = self.particles.shape[0]
self.true_mean = np.array(true_mean).reshape(dim)
self.true_cov = np.array(true_cov).reshape(dim, dim)
if num_true_samples is None:
self.n_samples = max(5000, 10 * cmd_args.num_particles)
else:
self.n_samples = num_true_samples
def square_mmd(self, kernel_type='gaussian'):
p_particles = np.random.multivariate_normal(self.true_mean.astype(np.float64), self.true_cov.astype(np.float64), self.n_samples)
return square_mmd_fine(p_particles, self.particles, self.n_samples, self.n_particles, kernel_type)
def cross_entropy(self):
p_particles = np.random.multivariate_normal(self.true_mean.astype(np.float64), self.true_cov.astype(np.float64), self.n_samples)
return -np.array(e_p_log_q(p_particles, self.particles).cpu())
def integral_eval(self, test_function):
full_path = os.path.realpath(__file__)
path = os.path.dirname(full_path)
filename = path+'/test_function/test_function_'+str(cmd_args.gauss_dim)+'.pkl'
with open(filename, 'rb') as f:
matrix_aa, matrix_a, matrix_b, a, b = pickle.load(f)
if test_function == 'x':
return self.dist_of_mean()
elif test_function == 'xAx':
return self.distance_of_xax(matrix_aa)
elif test_function == 'quadratic':
return self.distance_of_quadratic(matrix_a, a, matrix_b, b)
else:
print('test function not supported')
def dist_of_mean(self, q_samples=None):
if q_samples is None:
q_samples = self.particles
else:
q_samples = np.array(q_samples).reshape(-1, self.dim)
q_mean = np.mean(q_samples, 0)
return LA.norm(q_mean-self.true_mean)
def distance_of_xax(self, matrix_a, q_samples=None):
"""||E_q[x'Ax] - E_p[x'Ax]||"""
if q_samples is None:
q_samples = self.particles
else:
q_samples = np.array(q_samples).reshape(-1, self.dim)
mean = self.true_mean.reshape(1, self.dim)
true_val = np.trace(np.matmul(matrix_a, self.true_cov))
true_val += np.sum(np.dot(mean, np.matmul(matrix_a, mean.T)))
est_ax =
|
np.matmul(matrix_a, q_samples.T)
|
numpy.matmul
|
import os, glob, json, datetime, sys, pprint, subprocess, io, threading, configparser
from traits.api import HasTraits, Button, Instance, List, Str, Enum, Float, File, Int, Range, Bool, on_trait_change, \
Color, Directory, Array
from traitsui.api import View, Item, VGroup, HSplit, HGroup, FileEditor, ListEditor, DirectoryEditor
from tvtk.pyface.scene_editor import SceneEditor
from tvtk.pyface.scene import Scene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.core.api import PipelineBase, Source
from mayavi import mlab
import astropy.io.fits as fits
import numpy as np
from collections import namedtuple
from scipy.interpolate import splrep, splev
from tvtk.api import tvtk
from pyface.image_resource import ImageResource
class StellarMovementSimulation(HasTraits):
FitsFileData = namedtuple('FitsFileData', 'xend yend zend data_min data_max')
fits_file_data = FitsFileData(10,10,10,0.0,10)
xstart_low = Int(0)
xstart_high = Int(fits_file_data.xend)
xstart = Range('xstart_low', 'xstart_high', 0, mode='slider')
xend_low = Int(0)
xend_high = Int(fits_file_data.xend)
xend = Range('xend_low', 'xend_high', fits_file_data.xend, mode='slider')
ystart_low = Int(0)
ystart_high = Int(fits_file_data.yend)
ystart = Range('ystart_low', 'ystart_high', mode='slider')
yend_low = Int(0)
yend_high = Int(fits_file_data.yend)
yend = Range('yend_low', 'yend_high', fits_file_data.xend, mode='slider')
zstart_low = Int(0)
zstart_high = Int(fits_file_data.zend)
zstart = Range('zstart_low', 'zstart_high', mode='slider')
zend_low = Int(0)
zend_high = Int(fits_file_data.zend)
zend = Range('zend_low', 'zend_high', fits_file_data.xend, fits_file_data.zend, mode='slider')
for i in range(1, 11):
if i == 1:
exec('is_star_curve_visible_' + str(i) + ' = Bool(True)')
else:
exec('is_star_curve_visible_' + str(i) + ' = Bool(False)')
exec('radius_' + str(i) + " = Range(0.0, 15.0, 0.0, mode='slider')")
exec('angle0_' + str(i) + " = Range(0,360, mode='slider')")
exec('rot_vel_' + str(i) + " = Range(-25.0, 25.0, 0.0, mode='slider')")
exec('x0_' + str(i) + ' = Float(0.0)')
exec('y0_' + str(i) + ' = Float(0.0)')
exec('z0_' + str(i) + ' = Float(0.0)')
exec('z_vel_' + str(i) + ' = Float(0.0)')
exec('color_' + str(i) + " = Color('red')")
exec('initial_tube_radius_' + str(i) + ' = Float(5)')
exec('is_star_curve_display_' + str(i) + ' = Bool(True)')
starCurveGroups = []
for i in range(1, 11):
starCurveGroup = VGroup(HGroup(
Item('radius_' + str(i), tooltip=u"Curve Radius", show_label=True, width=4),
Item('angle0_' + str(i), tooltip=u"Initial Rotation Angle", show_label=True, width=4),
Item('rot_vel_' + str(i), tooltip=u"Rotation Velocity", show_label=True, width=4),
),
HGroup(
Item('x0_' + str(i), tooltip=u"Initial X Axis Position", show_label=True),
Item('y0_' + str(i), tooltip=u"Initial Y Axis Position", show_label=True),
Item('z0_' + str(i), tooltip=u"Initial Z Axis Position", show_label=True),
Item('z_vel_' + str(i), tooltip=u"Z Axis Velocity", show_label=True),
Item('color_' + str(i), tooltip=u"Curve Color", show_label=True),
Item('initial_tube_radius_' + str(i), label='TB', tooltip=u"Initial Tube Radius in cone approximation", show_label=True),
Item('is_star_curve_display_' + str(i), tooltip=u"Toggle display of star curve", label='Display')),
label='Star Curve #' + str(i), visible_when='is_star_curve_visible_' + str(i) + ' == True') #
starCurveGroups.append(starCurveGroup)
stars_curves_number = Range(1, 10, mode='spinner')
use_letters = Bool(True)
text_prefix = Str('Star_')
show_star_curves_text = Bool(False)
plot_curves_button = Button(u"Plot Curves")
show_cone_approximation = Bool(False)
fitsfile = File('test.fits', filter=[u"*.fits"])
plotbutton = Button(u"Plot")
clearbutton = Button(u"Clear")
scene = Instance(MlabSceneModel, ())
scene_x = Instance(MlabSceneModel, ())
scene_y = Instance(MlabSceneModel, ())
scene_z = Instance(MlabSceneModel, ())
is_scene_x = Bool(False)
is_scene_y = Bool(False)
is_scene_z = Bool(False)
x_plane_low = Int(0)
x_plane_high = Int(fits_file_data.xend)
x_plane = Range('x_plane_low', 'x_plane_high', 0, mode='slider')
x_plane_slice_position = Int(0)
y_plane_low = Int(0)
y_plane_high = Int(fits_file_data.xend)
y_plane = Range('y_plane_low', 'y_plane_high', 0, mode='slider')
y_plane_slice_position = Int(0)
z_plane_low = Int(0)
z_plane_high = Int(fits_file_data.xend)
z_plane = Range('z_plane_low', 'z_plane_high', 0, mode='slider')
z_plane_slice_position = Int(0)
ipw_3d_x = Instance(PipelineBase)
ipw_3d_y = Instance(PipelineBase)
ipw_3d_z = Instance(PipelineBase)
azimuth_x = Range(0, 360, 0, mode='spinner')
elevation_x = Range(0, 360, 270, mode='spinner')
distance_x = Int(-45)
setview_x_button = Button(u"Set View (X)")
azimuth_y = Range(0, 360, 270, mode='spinner')
elevation_y = Range(0, 360, 90, mode='spinner')
distance_y = Int(45)
setview_y_button = Button(u"Set View (Y)")
azimuth_z = Range(0, 360, 0, mode='spinner')
elevation_z = Range(0, 360, 0, mode='spinner')
distance_z = Int(-45)
setview_z_button = Button(u"Set View (Z)")
_axis_names = dict(x=0, y=1, z=2)
save_the_scene_button = Button(u"Save Scene")
save_in_file = Str("test.json")
load_scene_file = File('test.json', filter=[u"*.json"])
load_the_scene_button = Button('Load Scene')
result = Str('Saved Successfully')
display_success_result = Bool(False)
display_fail_result = Bool(False)
save_backup = Bool(True)
record = Button(u"Record Video")
stop_record = Button(u"Stop Recording")
spin = Button(u"Spin")
num_pics = Range(1,1000,200)
quality = Range(0,20,20,mode='spinner')
fps = Range(0,60,40,mode='spinner')
delay = Int(0)
angle = Int(360)
record_video_result = Str('Saved Successfully')
display_success_record_video_result = Bool(False)
display_fail_record_video_result = Bool(False)
is_recording_video = Bool(False)
record_video_state = Str('Testing')
video_name = Str()
save_video_dir = Directory()
data_min = round(np.float(fits_file_data.data_min), 2)
data_max = round(np.float(fits_file_data.data_max), 2)
minDT = Float(data_min)
maxDT = Float(data_max)
opacity = Range(0.0, 1.0, 0.1)
azimuth = Range(0, 360, 180, mode='slider')
elevation = Range(0, 180, 0, mode='slider')
distance = Int(-350)
setview_button = Button(u"Set View")
update_view = Bool(False)
display_arrows = Bool(True)
arrows_density = Range(1,100,5, mode='spinner')
factor = Float(1.5)
dx = Range(-256,256,0,mode='slider')
dy = Range(-256,256,0,mode='slider')
dz = Range(-256,256,0,mode='slider')
plot_colormap = Enum("Earth", "Rainbow", "Gray")
plot_colormap_button1 = Button(u"Set Colormap")
plot_colormap_button2 = Button(u"Set Colormap")
plot_colormap_button3 = Button(u"Set Colormap")
background_color = Color((127,127,127))
blank_gap = Str(''.ljust(10))
intensity_units = Str('Jy')
is_debug_mode = Bool(False)
view = View(
HSplit(
HGroup(
VGroup(
HGroup(Item("fitsfile", label=u"FITS Datacube:", show_label=True,
editor=FileEditor(dialog_style='open')),
Item("plotbutton", show_label=False, width=3),
Item("clearbutton", show_label=False, width=3), show_border=True),
VGroup(
HGroup(Item('xstart', tooltip=u"starting pixel in X axis", show_label=True, springy=True),
Item('xend', tooltip=u"ending pixel in X axis", show_label=True, springy=True)),
HGroup(Item('ystart', tooltip=u"starting pixel in Y axis", show_label=True, springy=True),
Item('yend', tooltip=u"ending pixel in Y axis", show_label=True, springy=True)),
HGroup(Item('zstart', tooltip=u"starting pixel in Z axis", show_label=True, springy=True),
Item('zend', tooltip=u"ending pixel in Z axis", show_label=True, springy=True)),
HGroup(Item('minDT', tooltip=u"Minimum display threshold", label="Min DT", show_label=True,
springy=True),
Item('maxDT', tooltip=u"Maximum display threshold", label="Max DT", show_label=True,
springy=True),
Item('opacity', tooltip=u"Opacity of the scene", show_label=True), show_labels=False),
show_border=True, label='Datacube View Configuration'),
HGroup(Item('display_arrows', tooltip='Toggle display of arrows', label='Display Arrows'),
Item('arrows_density', tooltip='Reduce number of arrows so that only 1 out of every \'Arrows Density\' arrows are displayed', show_label=True, style_sheet='*{width:20}'),
show_border=True, label='Velocity Arrows View Configuration'),
VGroup(HGroup(
Item('num_pics', tooltip=u"Number of pictures making up the video", label='Pics #', style_sheet='*{width:30}'),
Item('fps', tooltip=u"Frames per sec of video file", label='FPS', style_sheet='*{width:15}'),
Item('quality', tooltip=u"Quality of plots, 0 is worst, 8 is good, 20 is perfect", show_label=True, style_sheet='*{width:15}'),
Item('angle', tooltip=u"Angle the cube spins", show_label=True, springy=True),
Item("spin", tooltip=u"Spin 360 degrees", show_label=False, springy=True),
Item("record", tooltip="Make a MP4 video", show_label=False, springy=True),
Item("stop_record", tooltip="Stop recording video", show_label=False, springy=True, visible_when='is_recording_video == True')),
# HGroup(
# ,
# ),
HGroup(Item('video_name', tooltip=u"Video file name", label='Video File Name', style_sheet='*{width:50}'),
Item("save_video_dir", label=u"Save Video File Folder", tooltip=u"Folder where video file will be saved", editor=DirectoryEditor(dialog_style='open'), springy=True),
),
HGroup(
Item('record_video_state', style='readonly', label='State:', visible_when='is_recording_video == True', style_sheet='*{font-weight:bold}'),
Item('record_video_result', style='readonly', visible_when='display_success_record_video_result == True',
style_sheet="*{color:'green'}"),
Item('record_video_result', style='readonly', visible_when='display_fail_record_video_result == True',
style_sheet="*{color:'red'}"),
),
show_border=True, label='Spinning View'),
HGroup(Item('azimuth', tooltip=u"Angle on the x-y plane which varies from 0-360 degrees",
show_label=True, springy=True),
Item('elevation', tooltip=u"Angle from the z axis and varies from 0-180 degrees",
show_label=True, springy=True),
Item('distance', tooltip=u"Radius of the sphere centered around the visualization",
show_label=True, springy=True),
Item('update_view', tooltip='Toggle live updating of view', label='Update View'),
Item("setview_button", show_label=False, width=3),
show_border=True, label='Viewing Angles and Distance'),
HGroup(
Item('background_color', tooltip='Change 3d plot background color', label='Background Color'),
Item("plot_colormap", tooltip=u"Choose the colormap for the 3d plot", label='Choose Colormap'),
Item("intensity_units", tooltip=u"Legend colorscale's intensity units", label='Intensity Units'),
# Item('plot_colormap_button1', show_label=False, visible_when="plot_colormap=='Earth'"),
# Item('plot_colormap_button2', show_label=False, visible_when="plot_colormap=='Rainbow'"),
# Item('plot_colormap_button3', show_label=False, visible_when="plot_colormap=='Gray'"),
show_border=True, label='Color and Legend Settings'
),
HGroup(Item('show_star_curves_text', tooltip=u"Show star curves names in the 3D plot next to the curve's start",
label='Show Star Curve Names'),
Item('text_prefix', tooltip=u"Star curve names prefix",
label='Star Curve Name Prefix'),
Item('use_letters', tooltip=u"Use letters instead of numbers differentiate stars",
label='Use Letters'),
show_border=True, label='Star Curves Optional'),
VGroup(
HGroup(Item("save_the_scene_button", label="Save View Settings & Star Curves",
tooltip=u"Save current scene in a JSON file"),
Item("save_in_file", tooltip=u"JSON filename to save", show_label=False),
Item('save_backup', tooltip='Save backup of existing file in format FILENAME_YEAR.MONTH.DAY_HOUR.MINUTE.SEC.json', label='Save Backup')),
HGroup(Item("load_scene_file", label=u"JSON filename to load:", show_label=True,
editor=FileEditor(dialog_style='open')),
Item("load_the_scene_button", tooltip=u"JSON file name to load", show_label=False)),
Item('result', style='readonly', visible_when='display_success_result == True',
style_sheet="*{color:'green'}"),
Item('result', style='readonly', visible_when='display_fail_result == True',
style_sheet="*{color:'red'}"),
show_border=True, label='Save and Load Scenes'),
label="View Settings",
show_labels=False
),
VGroup(
HGroup(
Item('stars_curves_number', label='Stars #', style_sheet='*{width:15}'),
Item('show_cone_approximation', tooltip='Toggle approximation of stellar wind as a cone like shape', label='Cones'),
# Item('plot_curves_button', show_label=False),
Item('factor', tooltip='Star curves dimensions scale factor', show_label=True),
Item('dx', tooltip='Displacement of star curves in X axis', label='DX'),
Item('dy', tooltip='Displacement of star curves in X axis', label='DY'),
Item('dz', tooltip='Displacement of star curves in X axis', label='DZ'),
),
# HGroup(
#
# ),
starCurveGroups, label="Star Curves"),
VGroup(
VGroup(
HGroup(
Item('x_plane', tooltip=u"X plane position in X axis", label='X Plane Index', springy=True),
Item('is_scene_x', label='3D Display')),
HGroup(
VGroup(
HGroup(
Item('azimuth_x', tooltip=u"Angle on the x-y plane which varies from 0-360 degrees",
label='Azimuth (X)', style_sheet='*{width:25}')),
HGroup(
Item('elevation_x', tooltip=u"Angle from the z axis and varies from 0-360 degrees",
label='Elevation (X)', style_sheet='*{width:25}')),
HGroup(Item('distance_x',
tooltip=u"Radius of the sphere centered around the visualization",
label='Distance (X)', style_sheet='*{width:15}')),
Item("setview_x_button", show_label=False, width=30), visible_when='is_debug_mode == True'),
Item('blank_gap', style='readonly', show_label=False, ),
Item('scene_x', editor=SceneEditor(scene_class=Scene), show_label=False),
Item('blank_gap', style='readonly', show_label=False, )),
),
VGroup(
HGroup(
Item('y_plane', tooltip=u"Y plane position in Y axis", label='Y Plane Index', springy=True),
Item('is_scene_y', label='3D Display')),
HGroup(
VGroup(
HGroup(
Item('azimuth_y', tooltip=u"Angle on the x-y plane which varies from 0-360 degrees",
label='Azimuth (Y)', style_sheet='*{width:25}')),
HGroup(
Item('elevation_y', tooltip=u"Angle from the z axis and varies from 0-360 degrees",
label='Elevation (Y)', style_sheet='*{width:25}')),
HGroup(Item('distance_y',
tooltip=u"Radius of the sphere centered around the visualization",
label='Distance (Y)', style_sheet='*{width:15}')),
Item("setview_y_button", show_label=False, width=30), visible_when='is_debug_mode == True'),
Item('blank_gap', style='readonly', show_label=False, ),
Item('scene_y', editor=SceneEditor(scene_class=Scene), show_label=False),
Item('blank_gap', style='readonly', show_label=False, )),
),
VGroup(
HGroup(
Item('z_plane', tooltip=u"Z plane position in Z axis", label='Z Plane Index', springy=True),
Item('is_scene_z', label='3D Display')),
HGroup(
VGroup(
HGroup(
Item('azimuth_z', tooltip=u"Angle on the x-y plane which varies from 0-360 degrees",
label='Azimuth (Z)', style_sheet='*{width:25}')),
HGroup(
Item('elevation_z', tooltip=u"Angle from the z axis and varies from 0-360 degrees",
label='Elevation (Z)', style_sheet='*{width:25}')),
HGroup(Item('distance_z',
tooltip=u"Radius of the sphere centered around the visualization",
label='Distance (Z)', style_sheet='*{width:15}')),
Item("setview_z_button", show_label=False, width=30), visible_when='is_debug_mode == True'),
Item('blank_gap', style='readonly', show_label=False, ),
Item('scene_z', editor=SceneEditor(scene_class=Scene), show_label=False),
Item('blank_gap', style='readonly', show_label=False, )),
),
label="Plane Slices"),
# VGroup(Item('logText', show_label=False, style='custom')),
layout="tabbed"
),
VGroup(
Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
resizable=True,
height=600,
width=600
), show_labels=False
)
),
resizable=True,
title=u"Stellar Movement Simulation",
icon=ImageResource('shooting_star2.png')
)
def __init__(self):
self.colormap = 'gist_earth'
self.is_loading = True
for i in range(1, 11):
if i == 1:
setattr(self, 'is_star_curve_visible_' + str(i), True)
else:
setattr(self, 'is_star_curve_visible_' + str(i), False)
setattr(self, 'radius_' + str(i), 0.0)
setattr(self, 'angle0_' + str(i), 0)
setattr(self, 'rot_vel_' + str(i), 0.0)
setattr(self, 'x0_' + str(i), 0.0)
setattr(self, 'y0_' + str(i), 0.0)
setattr(self, 'z0_' + str(i), 0.0)
setattr(self, 'z_vel_' + str(i), 0.0)
setattr(self, 'color_' + str(i), (255,0,0))
setattr(self, 'initial_tube_radius_' + str(i), 5)
setattr(self, 'is_star_curve_display_' + str(i), True)
self.star_curve_1 = None
self.star_curve_2 = None
self.star_curve_3 = None
self.star_curve_4 = None
self.star_curve_5 = None
self.star_curve_6 = None
self.star_curve_7 = None
self.star_curve_8 = None
self.star_curve_9 = None
self.star_curve_10 = None
self.star_curve_cone_1 = [None] * 100
self.star_curve_cone_2 = [None] * 100
self.star_curve_cone_3 = [None] * 100
self.star_curve_cone_4 = [None] * 100
self.star_curve_cone_5 = [None] * 100
self.star_curve_cone_6 = [None] * 100
self.star_curve_cone_7 = [None] * 100
self.star_curve_cone_8 = [None] * 100
self.star_curve_cone_9 = [None] * 100
self.star_curve_cone_10 = [None] * 100
self.star_curve_start_1 = None
self.star_curve_start_2 = None
self.star_curve_start_3 = None
self.star_curve_start_4 = None
self.star_curve_start_5 = None
self.star_curve_start_6 = None
self.star_curve_start_7 = None
self.star_curve_start_8 = None
self.star_curve_start_9 = None
self.star_curve_start_10 = None
self.star_curve_end_1 = None
self.star_curve_end_2 = None
self.star_curve_end_3 = None
self.star_curve_end_4 = None
self.star_curve_end_5 = None
self.star_curve_end_6 = None
self.star_curve_end_7 = None
self.star_curve_end_8 = None
self.star_curve_end_9 = None
self.star_curve_end_10 = None
self.star_curve_text_1 = None
self.star_curve_text_2 = None
self.star_curve_text_3 = None
self.star_curve_text_4 = None
self.star_curve_text_5 = None
self.star_curve_text_6 = None
self.star_curve_text_7 = None
self.star_curve_text_8 = None
self.star_curve_text_9 = None
self.star_curve_text_10 = None
self.letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
self.xstart_low = 0
self.xstart_high = 10
self.xstart = 0
self.xend_low = 0
self.xend_high = 10
self.xend = 10
self.ystart_low = 0
self.ystart_high = 10
self.ystart = 0
self.yend_low = 0
self.yend_high = 10
self.yend = 10
self.zstart_low = 0
self.zstart_high = 10
self.zstart = 0
self.zend_low = 0
self.zend_high = 10
self.zend = 10
self.minDT = 0.0
self.maxDT = 10.0
self.opacity = 0.1
self.display_arrows = True
self.arrows_density = 5
self.num_pics = 200
self.fps = 40
self.quality = 20
self.angle = 360
self.video_name = 'test.mp4'
self.save_video_dir = os.path.join(os.getcwd(), 'Screenshots')
self.azimuth = 180
self.elevation = 0
self.distance = -350
self.update_view = False
self.background_color = (127,127,127)
self.plot_colormap = 'Earth'
self.show_star_curves_text = False
self.text_prefix = 'Star_'
self.use_letters = True
self.save_in_file = 'test.json'
self.load_scene_file = 'test.json'
self.save_backup = True
self.stars_curves_number = 1
self.factor = 1.5
self.dx = -256
self.dy = -256
self.dz = -256
self.ffmpeg_path = 'C:\\ffmpeg\\bin\\ffmpeg'
self.fitsfile = 'test.fits'
self.min_itensity_for_velocity = 0.12
if os.path.exists('conf.ini'):
print('loading from conf.ini')
config = configparser.ConfigParser()
config.read('conf.ini')
self.ffmpeg_path = config['DEFAULT']['FFMPEG_PATH']
self.fitsfile = config['DEFAULT']['DEFAULT_FITS_FILE_NAME']
self.min_itensity_for_velocity = float(config['DEFAULT']['MIN_INTENSITY_FOR_VELOCITY'])
print(self.ffmpeg_path, self.fitsfile)
self.ipw_3d_x = None
self.ipw_3d_y = None
self.ipw_3d_z = None
self.ipw_x = None
self.ipw_y = None
self.ipw_z = None
self.ipw_axes_x = None
self.ipw_axes_y = None
self.ipw_axes_z = None
self.data = []
self.data_src = None
self.data_src_x = None
self.data_src_y = None
self.data_src_z = None
self.is_loading = False
def _background_color_changed(self):
# if self.fig is None:
# return
color = self.background_color.getRgb()[:-1]
color_temp = (color[0] / 255, color[1] / 255, color[2] / 255)
mlab.gcf().scene.background = color_temp
def _plot_colormap_changed(self):
if self.obj is None:
return
colormap = ''
if self.plot_colormap == 'Earth':
colormap = 'gist_earth'
elif self.plot_colormap == 'Rainbow':
colormap = 'gist_rainbow'
elif self.plot_colormap == 'Gray':
colormap = 'gist_gray'
self.obj.module_manager.scalar_lut_manager.lut_mode = colormap
def _azimuth_changed(self):
if self.update_view:
mlab.view(self.azimuth, self.elevation, self.distance)
def _elevation_changed(self):
if self.update_view:
mlab.view(self.azimuth, self.elevation, self.distance)
def _distance_changed(self):
if self.update_view:
mlab.view(self.azimuth, self.elevation, self.distance)
def _arrows_density_changed(self):
self.obj2.glyph.mask_points.on_ratio = self.arrows_density
mlab.draw()
def _display_arrows_changed(self):
self.obj2.actor.property.opacity = self.display_arrows is True if 1 else 0
def _stars_curves_number_changed(self):
for i in range(1, 11):
# self.is_star_curve_visible_1 = True
# print(self['is_star_curve_visible_' + str(i)])
if i <= self.stars_curves_number:
exec('self.is_star_curve_visible_' + str(i) + ' = True')
else:
exec('self.is_star_curve_visible_' + str(i) + ' = False')
self.handle_star_curves_changes(-1)
def _load_the_scene_button_fired(self):
try:
print('_load_the_scene_fired')
print('Loading settings from ' + self.load_scene_file)
scene_json_file = open(self.load_scene_file, 'r')
scene_json_str = scene_json_file.read()
scene_json_file.close()
scene = json.loads(scene_json_str)
print(scene)
self.is_loading = True
self.fitsfile = scene['scene']['sceneSettings']['fits_file']
self.xstart = scene['scene']['sceneSettings']['scene_settings']['xstart']
self.xend = scene['scene']['sceneSettings']['scene_settings']['xend']
self.ystart = scene['scene']['sceneSettings']['scene_settings']['ystart']
self.yend = scene['scene']['sceneSettings']['scene_settings']['yend']
self.zstart = scene['scene']['sceneSettings']['scene_settings']['zstart']
self.zend = scene['scene']['sceneSettings']['scene_settings']['zend']
self.minDT = scene['scene']['sceneSettings']['scene_settings']['minDT']
self.maxDT = scene['scene']['sceneSettings']['scene_settings']['maxDT']
self.opacity = scene['scene']['sceneSettings']['scene_settings']['opacity']
self.display_arrows = scene['scene']['sceneSettings']['velocityArrowsSettings']['display_arrows']
self.arrows_density = scene['scene']['sceneSettings']['velocityArrowsSettings']['arrows_density']
self.azimuth = scene['scene']['sceneSettings']['viewSettings']['azimuth']
self.elevation = scene['scene']['sceneSettings']['viewSettings']['elevation']
self.distance = scene['scene']['sceneSettings']['viewSettings']['distance']
self.background_color = tuple(map(int, scene['scene']['sceneSettings']['viewSettings']['background_color'].replace('(', '').replace(')', '').split(', ')))
self.plot_colormap = scene['scene']['sceneSettings']['viewSettings']['plot_colormap']
self.num_pics = scene['scene']['sceneSettings']['spinningViewsSettings']['num_pics']
self.quality = scene['scene']['sceneSettings']['spinningViewsSettings']['quality']
self.fps = scene['scene']['sceneSettings']['spinningViewsSettings']['quality']
self.angle = scene['scene']['sceneSettings']['spinningViewsSettings']['angle']
self.video_name = scene['scene']['sceneSettings']['spinningViewsSettings']['video_name']
self.save_video_dir = scene['scene']['sceneSettings']['spinningViewsSettings']['save_video_dir']
self.stars_curves_number = scene['scene']['starCurvesSettings']['starCurvesNumber']
self.show_cone_approximation = scene['scene']['starCurvesSettings']['show_cone_approximation']
self.factor = scene['scene']['starCurvesSettings']['factor']
self.dx = scene['scene']['starCurvesSettings']['dx']
self.dy = scene['scene']['starCurvesSettings']['dy']
self.dz = scene['scene']['starCurvesSettings']['dz']
self.show_star_curves_text = scene['scene']['starCurvesSettings']['show_star_curves_text']
self.text_prefix = scene['scene']['starCurvesSettings']['text_prefix']
self.use_letters = scene['scene']['starCurvesSettings']['use_letters']
starCurves = scene['scene']['starCurvesSettings']['starCurves']
for starCurve in starCurves:
num = starCurve['num']
setattr(self, 'radius_' + str(num), starCurve['r'])
print(starCurve['angle0'])
setattr(self, 'angle0_' + str(num), starCurve['angle0'])
setattr(self, 'rot_vel_' + str(num), starCurve['rot_vel'])
setattr(self, 'x0_' + str(num), starCurve['x0'])
setattr(self, 'y0_' + str(num), starCurve['y0'])
setattr(self, 'z0_' + str(num), starCurve['z0'])
setattr(self, 'z_vel_' + str(num), starCurve['z_vel'])
setattr(self, 'color_' + str(num),
tuple(map(int, starCurve['color'].replace('(', '').replace(')', '').split(', '))))
setattr(self, 'initial_tube_radius_' + str(num), starCurve['init_tb'])
self.is_loading = False
self.display_success_result = True
self.display_fail_result = False
self.result = 'Loaded Successfully'
self.update_display('view_configuration')
# time.sleep(5)
self.handle_star_curves_changes(-1)
except Exception:
err_type, err_value, traceback = sys.exc_info()
self.display_success_result = False
self.display_fail_result = True
self.result = 'Load Failed: %s - %s' % (err_type, err_value)
raise
def _save_the_scene_button_fired(self):
try:
starCurvesParams = self.get_star_curves_params()
scene = {
'scene': {
'sceneSettings': {
'fits_file': self.fitsfile,
'scene_settings': {
'xstart': self.xstart,
'xend': self.xend,
'ystart': self.ystart,
'yend': self.yend,
'zstart': self.zstart,
'zend': self.zend,
'minDT': self.minDT,
'maxDT': self.maxDT,
'opacity': self.opacity,
},
'velocityArrowsSettings': {
'display_arrows': self.display_arrows,
'arrows_density': self.arrows_density,
},
'spinningViewsSettings': {
'num_pics': self.num_pics,
'quality': self.quality,
'fps': self.fps,
'angle': self.angle,
'video_name': self.video_name,
'save_video_dir': self.save_video_dir
},
'viewSettings': {
'azimuth': self.azimuth,
'elevation': self.elevation,
'distance': self.distance,
'background_color': str(self.background_color.getRgb()[:-1]),
'plot_colormap': self.plot_colormap
},
},
'starCurvesSettings': {
'starCurvesNumber': self.stars_curves_number,
'show_cone_approximation': self.show_cone_approximation,
'factor': self.factor,
'dx': self.dx,
'dy': self.dy,
'dz': self.dz,
'show_star_curves_text': self.show_star_curves_text,
'text_prefix': self.text_prefix,
'use_letters': self.use_letters,
'starCurves': []
}
}
}
for i in range(0, 10):
scene['scene']['starCurvesSettings']['starCurves'].append({
'num': i + 1,
'r': starCurvesParams[i].r,
'angle0': starCurvesParams[i].angle0,
'rot_vel': starCurvesParams[i].rot_vel,
'x0': starCurvesParams[i].x0,
'y0': starCurvesParams[i].y0,
'z0': starCurvesParams[i].z0,
'z_vel': starCurvesParams[i].z_vel,
'color': str(starCurvesParams[i].color),
'init_tb': starCurvesParams[i].init_tb,
})
print(scene)
scene_json_str = json.dumps(scene, indent=4)
if self.save_backup:
filename = self.save_in_file
filename = filename.replace('.json', '_%s.json' % datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S"))
print('Renaming existing json file \'%s\' to \'%s\'' % (self.save_in_file, filename))
if os.path.exists(self.save_in_file):
os.rename(self.save_in_file, filename)
print('Saving settings to ' + self.save_in_file)
scene_json_file = open(self.save_in_file, 'w')
scene_json_file.write(scene_json_str)
scene_json_file.close()
self.display_success_result = True
self.display_fail_result = False
self.result = 'Saved Successfully at ' + datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
except Exception:
err_type, err_value, traceback = sys.exc_info()
self.display_success_result = False
self.display_fail_result = True
self.result = 'Save Failed: %s - %s' % (err_type, err_value)
raise
def _stop_record_fired(self):
self.is_recording_video = False
def _record_fired(self):
try:
print('_record_fired')
self.display_success_record_video_result = False
self.display_fail_record_video_result = False
screenshots_dir = 'Screenshots'
if os.path.exists(screenshots_dir) is False:
print('Creating screenshots directory')
os.mkdir(screenshots_dir)
print('Clearing screenshots directory of existing png files')
png_files = [f for f in os.listdir(screenshots_dir) if os.path.isfile(os.path.join(screenshots_dir, f)) and f.endswith('.png')]
if len(png_files) > 0:
for i in range(len(png_files)):
os.remove(os.path.join(screenshots_dir, png_files[i]))
self.is_recording_video = True
## Quality of the movie: 0 is the worst, 8 is ok.
self.obj.scene.anti_aliasing_frames = self.quality
self.obj.scene.disable_render = True
for i in range(0, self.num_pics):
if self.is_recording_video is not True:
break
self.record_video_state = 'Saving Image %d/%d' % (i+1, self.num_pics)
mlab.savefig(os.path.join(screenshots_dir, 'image_' + str(i).zfill(3) + '.png'))
self.obj.scene.camera.azimuth(self.angle / self.num_pics)
self.obj.scene.render()
if self.is_recording_video is not True:
return
self.record_video_state = 'Running ffmpeg'
self.obj.scene.disable_render = False
image_path = os.path.join(os.getcwd(), screenshots_dir, 'image_%03d.png')
output_path = os.path.join(self.save_video_dir, self.video_name)
ffmpeg_command_with_args = self.ffmpeg_path + ' -r ' + str(self.fps) + ' -f image2 -s 964x954 -i ' + image_path + ' -vcodec libx264 -crf 0 -pix_fmt yuv420p ' + output_path
print(ffmpeg_command_with_args)
if os.path.exists(output_path):
os.remove(output_path)
completed = subprocess.run(ffmpeg_command_with_args.split(' '), stdout=subprocess.PIPE)
print('returncode:', completed.returncode)
print('{} bytes in stdout:\n{}'.format(len(completed.stdout),completed.stdout.decode('utf-8')))
self.is_recording_video = False
if completed.returncode == 0:
self.record_video_result = 'Recorded Video Successfully to ' + output_path
self.display_success_record_video_result = True
self.display_fail_record_video_result = False
else:
self.record_video_result = 'Record Video Failed - ffmpeg ERROR'
self.display_success_record_video_result = False
self.display_fail_record_video_result = True
except Exception:
self.is_recording_video = False
err_type, err_value, traceback = sys.exc_info()
self.display_success_record_video_result = False
self.display_fail_record_video_result = True
self.record_video_result = 'Record Video Failed: %s - %s' % (err_type, err_value)
raise
def _spin_fired(self):
i = 0
self.obj.scene.disable_render = True
@mlab.animate
def anim():
while i < self.num_pics:
self.obj.scene.camera.azimuth(360/self.num_pics)
self.obj.scene.render()
yield
a = anim()
self.obj.scene.disable_render = False
def _clearbutton_fired(self):
print('_clearbutton_fired')
mlab.clf()
self.sregion = None
self.region = None
self.vol = None
self.v_opt = None
self.obj = None
self.obj2 = None
self.ax = None
self.__init__()
def _plotbutton_fired(self):
print('_plotbutton_fired')
self.load_fits_file(self.fitsfile)
def _setview_button_fired(self):
mlab.view(self.azimuth, self.elevation, self.distance)
def _xstart_changed(self):
self.update_display(change_source='view_configuration')
def _xend_changed(self):
self.update_display(change_source='view_configuration')
def _ystart_changed(self):
self.update_display(change_source='view_configuration')
def _yend_changed(self):
self.update_display(change_source='view_configuration')
def _zstart_changed(self):
self.update_display(change_source='view_configuration')
def _zend_changed(self):
self.update_display(change_source='view_configuration')
def _minDT_changed(self):
self.update_display(change_source='view_configuration')
def _maxDT_changed(self):
self.update_display(change_source='view_configuration')
def _opacity_changed(self):
self.update_display(change_source='view_configuration')
def _factor_changed(self):
self.handle_star_curves_changes(-1)
def _dx_changed(self):
self.handle_star_curves_changes(-1)
def _dy_changed(self):
self.handle_star_curves_changes(-1)
def _dz_changed(self):
self.handle_star_curves_changes(-1)
def _show_star_curves_text_changed(self):
self.handle_star_curves_changes(-1)
def _text_prefix_changed(self):
self.handle_star_curves_changes(-1)
def _use_letters_changed(self):
self.handle_star_curves_changes(-1)
def _show_cone_approximation_changed(self):
self.handle_star_curves_changes(-1)
def get_star_curves_params(self):
StarCurveParams = namedtuple('StarCurveParams', 'r angle0 rot_vel x0 y0 z0 z_vel color init_tb')
starCurvesParams = []
starCurvesParams.append(
StarCurveParams(self.radius_1, self.angle0_1, self.rot_vel_1, self.x0_1, self.y0_1, self.z0_1, self.z_vel_1,
self.color_1.getRgb()[:-1], self.initial_tube_radius_1))
starCurvesParams.append(
StarCurveParams(self.radius_2, self.angle0_2, self.rot_vel_2, self.x0_2, self.y0_2, self.z0_2, self.z_vel_2,
self.color_2.getRgb()[:-1], self.initial_tube_radius_2))
starCurvesParams.append(
StarCurveParams(self.radius_3, self.angle0_3, self.rot_vel_3, self.x0_3, self.y0_3, self.z0_3, self.z_vel_3,
self.color_3.getRgb()[:-1], self.initial_tube_radius_3))
starCurvesParams.append(
StarCurveParams(self.radius_4, self.angle0_4, self.rot_vel_4, self.x0_4, self.y0_4, self.z0_4, self.z_vel_4,
self.color_4.getRgb()[:-1], self.initial_tube_radius_4))
starCurvesParams.append(
StarCurveParams(self.radius_5, self.angle0_5, self.rot_vel_5, self.x0_5, self.y0_5, self.z0_5, self.z_vel_5,
self.color_5.getRgb()[:-1], self.initial_tube_radius_5))
starCurvesParams.append(
StarCurveParams(self.radius_6, self.angle0_6, self.rot_vel_6, self.x0_6, self.y0_6, self.z0_6, self.z_vel_6,
self.color_6.getRgb()[:-1], self.initial_tube_radius_6))
starCurvesParams.append(
StarCurveParams(self.radius_7, self.angle0_7, self.rot_vel_7, self.x0_7, self.y0_7, self.z0_7, self.z_vel_7,
self.color_7.getRgb()[:-1], self.initial_tube_radius_7))
starCurvesParams.append(
StarCurveParams(self.radius_8, self.angle0_8, self.rot_vel_8, self.x0_8, self.y0_8, self.z0_8, self.z_vel_8,
self.color_8.getRgb()[:-1], self.initial_tube_radius_8))
starCurvesParams.append(
StarCurveParams(self.radius_9, self.angle0_9, self.rot_vel_9, self.x0_9, self.y0_9, self.z0_9, self.z_vel_9,
self.color_9.getRgb()[:-1], self.initial_tube_radius_9))
starCurvesParams.append(
StarCurveParams(self.radius_10, self.angle0_10, self.rot_vel_10, self.x0_10, self.y0_10, self.z0_10,
self.z_vel_10, self.color_10.getRgb()[:-1], self.initial_tube_radius_10))
return starCurvesParams
def get_star_curve_name(self, i):
if self.use_letters:
return self.letters[i-1]
else:
return str(i)
def handle_star_curves_changes(self, curve_num):
if self.is_loading:
return 0
print('handle_star_curves_changes', curve_num)
StarCurveParams = namedtuple('StarCurveParams', 'r angle0 rot_vel x0 y0 z0 z_vel color initial_tube_radius')
starCurvesParams = self.get_star_curves_params()
for i in range(self.stars_curves_number + 1, 11):
# exec('self.is_star_curve_visible_' + str(i) + ' = True')
star_curve = getattr(self, 'star_curve_' + str(i))
star_curve_start = getattr(self, 'star_curve_start_' + str(i))
star_curve_end = getattr(self, 'star_curve_end_' + str(i))
star_curve_text = getattr(self, 'star_curve_text_' + str(i))
star_curve_cone_parts = getattr(self, 'star_curve_cone_' + str(i))
for j in range(len(star_curve_cone_parts)):
if star_curve_cone_parts[j] is not None:
star_curve_cone_parts[j].actor.property.opacity = 0
if star_curve is not None:
star_curve.actor.property.opacity = 0
if star_curve_start is not None:
star_curve_start.actor.property.opacity = 0
if star_curve_end is not None:
star_curve_end.actor.property.opacity = 0
if star_curve_text is not None:
star_curve_text.actor.property.opacity = 0
min_range = curve_num
max_range = curve_num + 1
if curve_num == -1:
min_range = 1
max_range = self.stars_curves_number + 1
text_opacity_val = self.show_star_curves_text is True if 1 else 0
self.scene.disable_render = True
for j in range(min_range, max_range):
print('STAR', j)
star_curve = getattr(self, 'star_curve_' + str(j))
star_curve_start = getattr(self, 'star_curve_start_' + str(j))
star_curve_end = getattr(self, 'star_curve_end_' + str(j))
star_curve_text = getattr(self, 'star_curve_text_' + str(j))
star_curve_cone_parts = getattr(self, 'star_curve_cone_' + str(j))
is_star_curve_display = getattr(self, 'is_star_curve_display_' + str(j))
opacity_val = is_star_curve_display is True if 1 else 0
for k in range(len(star_curve_cone_parts)):
if star_curve_cone_parts[k] is not None:
star_curve_cone_parts[k].actor.property.opacity = opacity_val
if star_curve is not None:
star_curve.actor.property.opacity = opacity_val
if star_curve_start is not None:
star_curve_start.actor.property.opacity = 0
if star_curve_end is not None:
star_curve_end.actor.property.opacity = 0
if star_curve_text is not None:
star_curve_text.actor.property.opacity = text_opacity_val
if is_star_curve_display is False:
continue
# draw each curve
r = starCurvesParams[j - 1].r
angle0 = starCurvesParams[j - 1].angle0
rot_vel = starCurvesParams[j - 1].rot_vel
x0 = starCurvesParams[j - 1].x0
y0 = starCurvesParams[j - 1].y0
z0 = starCurvesParams[j - 1].z0
z_vel = starCurvesParams[j - 1].z_vel
color = starCurvesParams[j - 1].color
mid_point_x = 0
mid_point_y = 0
mid_point_z = 0
xx = 0
yy = 0
zz = 0
x = []
y = []
z = []
for i in np.linspace(0, 20, 500):
angle = np.sign(rot_vel) * (np.abs(rot_vel) * i + angle0) * np.pi / 180
xx = x0 + r * np.cos(angle)
yy = y0 + r * np.sin(angle)
zz = z0 + z_vel * i
x.append(xx)
y.append(yy)
z.append(zz)
if i == 0:
print(j, r, rot_vel, angle0, x0, y0, z0, xx, yy, zz, z_vel)
if i == 10:
mid_point_x = xx
mid_point_y = yy
mid_point_z = zz
x = np.array(x)
y = np.array(y)
z = np.array(z)
color_temp = (color[0] / 255, color[1] / 255, color[2] / 255)
final_tube_radius = 0.25
initial_tube_radius = getattr(self, 'initial_tube_radius_' + str(j))
fff = 20
inc = (1.5 - initial_tube_radius) / len(x) * fff
# plot star cone
if (self.show_cone_approximation == True):
for i in range(0, len(x), fff):
if star_curve_cone_parts[int(i / fff)] is not None:
star_curve_cone_parts[int(i / fff)].remove()
star_curve_cone_parts[int(i / fff)] = mlab.plot3d(self.factor * x[i:i + fff - 1] + self.dx, -self.factor * y[i:i + fff - 1] + self.dy, -self.factor * z[i:i + fff - 1] + self.dz,
tube_radius=initial_tube_radius + i / fff * inc, color=color_temp)
else:
for i in range(0, len(x), fff):
if star_curve_cone_parts[int(i / fff)] is not None:
star_curve_cone_parts[int(i / fff)].remove()
star_curve_cone_parts[int(i / fff)] = None
# plot star curve
if star_curve is None:
setattr(self, 'star_curve_' + str(j),
mlab.plot3d(self.factor * x + self.dx, -self.factor * y + self.dy, -self.factor * z + self.dz, tube_radius=final_tube_radius,
color=color_temp)) # colormap='Spectral')
else:
star_curve.mlab_source.reset(x=self.factor * x + self.dx, y=-self.factor * y + self.dy, z=-self.factor * z + self.dz)
star_curve.actor.property.color = color_temp
# plot star start and stop glyphs
if star_curve_start is not None:
star_curve_start.mlab_source.set(x=self.factor * x[0] + self.dx, y=-self.factor * y[0] + self.dy, z=-self.factor * z[0] + self.dz)
setattr(self, 'star_curve_start_' + str(j),
mlab.points3d(self.factor * x[0] + self.dx, -self.factor * y[0] + self.dy, -self.factor * z[0] + self.dz, color=color_temp, mode='cube'))
if star_curve_end is not None:
star_curve_end.mlab_source.set(x=self.factor * x[len(x) - 1] + self.dx, y=-self.factor * y[len(y) - 1] + self.dy, z=-self.factor * z[len(z) - 1] + self.dz)
setattr(self, 'star_curve_end_' + str(j),
mlab.points3d(self.factor * x[len(x) - 1] + self.dx, -self.factor * y[len(y) - 1] + self.dy, -self.factor * z[len(z) - 1] + self.dz, color=color_temp, mode='sphere'))
if text_opacity_val:
if star_curve_text is not None:
star_curve_text.remove()
star_curve_text = mlab.text3d(self.factor * x[0] + self.dx, -self.factor * y[0] + self.dy - 3, -self.factor * z[0] + self.dz, self.text_prefix + self.get_star_curve_name(j),
color=color_temp, scale=1.2)
star_curve_text.actor.property.edge_color = color_temp
star_curve_text.actor.property.edge_visibility = True
setattr(self, 'star_curve_text_' + str(j), star_curve_text)
self.scene.disable_render = False
def _radius_1_changed(self):
self.handle_star_curves_changes(1)
def _angle0_1_changed(self):
self.handle_star_curves_changes(1)
def _rot_vel_1_changed(self):
self.handle_star_curves_changes(1)
def _x0_1_changed(self):
self.handle_star_curves_changes(1)
def _y0_1_changed(self):
self.handle_star_curves_changes(1)
def _z0_1_changed(self):
self.handle_star_curves_changes(1)
def _z_vel_1_changed(self):
self.handle_star_curves_changes(1)
def _radius_2_changed(self):
self.handle_star_curves_changes(2)
def _angle0_2_changed(self):
self.handle_star_curves_changes(2)
def _rot_vel_2_changed(self):
self.handle_star_curves_changes(2)
def _x0_2_changed(self):
self.handle_star_curves_changes(2)
def _y0_2_changed(self):
self.handle_star_curves_changes(2)
def _z0_2_changed(self):
self.handle_star_curves_changes(2)
def _z_vel_2_changed(self):
self.handle_star_curves_changes(2)
def _radius_3_changed(self):
self.handle_star_curves_changes(3)
def _angle0_3_changed(self):
self.handle_star_curves_changes(3)
def _rot_vel_3_changed(self):
self.handle_star_curves_changes(3)
def _x0_3_changed(self):
self.handle_star_curves_changes(3)
def _y0_3_changed(self):
self.handle_star_curves_changes(3)
def _z0_3_changed(self):
self.handle_star_curves_changes(3)
def _z_vel_3_changed(self):
self.handle_star_curves_changes(3)
def _radius_4_changed(self):
self.handle_star_curves_changes(4)
def _angle0_4_changed(self):
self.handle_star_curves_changes(4)
def _rot_vel_4_changed(self):
self.handle_star_curves_changes(4)
def _x0_4_changed(self):
self.handle_star_curves_changes(4)
def _y0_4_changed(self):
self.handle_star_curves_changes(4)
def _z0_4_changed(self):
self.handle_star_curves_changes(4)
def _z_vel_4_changed(self):
self.handle_star_curves_changes(4)
def _radius_5_changed(self):
self.handle_star_curves_changes(5)
def _angle0_5_changed(self):
self.handle_star_curves_changes(5)
def _rot_vel_5_changed(self):
self.handle_star_curves_changes(5)
def _x0_5_changed(self):
self.handle_star_curves_changes(5)
def _y0_5_changed(self):
self.handle_star_curves_changes(5)
def _z0_5_changed(self):
self.handle_star_curves_changes(5)
def _z_vel_5_changed(self):
self.handle_star_curves_changes(5)
def _radius_6_changed(self):
self.handle_star_curves_changes(6)
def _angle0_6_changed(self):
self.handle_star_curves_changes(6)
def _rot_vel_6_changed(self):
self.handle_star_curves_changes(6)
def _x0_6_changed(self):
self.handle_star_curves_changes(6)
def _y0_6_changed(self):
self.handle_star_curves_changes(6)
def _z0_6_changed(self):
self.handle_star_curves_changes(6)
def _z_vel_6_changed(self):
self.handle_star_curves_changes(6)
def _radius_7_changed(self):
self.handle_star_curves_changes(7)
def _angle0_7_changed(self):
self.handle_star_curves_changes(7)
def _rot_vel_7_changed(self):
self.handle_star_curves_changes(7)
def _x0_7_changed(self):
self.handle_star_curves_changes(7)
def _y0_7_changed(self):
self.handle_star_curves_changes(7)
def _z0_7_changed(self):
self.handle_star_curves_changes(7)
def _z_vel_7_changed(self):
self.handle_star_curves_changes(7)
def _radius_8_changed(self):
self.handle_star_curves_changes(8)
def _angle0_8_changed(self):
self.handle_star_curves_changes(8)
def _rot_vel_8_changed(self):
self.handle_star_curves_changes(8)
def _x0_8_changed(self):
self.handle_star_curves_changes(8)
def _y0_8_changed(self):
self.handle_star_curves_changes(8)
def _z0_8_changed(self):
self.handle_star_curves_changes(8)
def _z_vel_8_changed(self):
self.handle_star_curves_changes(8)
def _radius_9_changed(self):
self.handle_star_curves_changes(9)
def _angle0_9_changed(self):
self.handle_star_curves_changes(9)
def _rot_vel_9_changed(self):
self.handle_star_curves_changes(9)
def _x0_9_changed(self):
self.handle_star_curves_changes(9)
def _y0_9_changed(self):
self.handle_star_curves_changes(9)
def _z0_9_changed(self):
self.handle_star_curves_changes(9)
def _z_vel_9_changed(self):
self.handle_star_curves_changes(9)
def _radius_10_changed(self):
self.handle_star_curves_changes(10)
def _angle0_10_changed(self):
self.handle_star_curves_changes(10)
def _rot_vel_10_changed(self):
self.handle_star_curves_changes(10)
def _x0_10_changed(self):
self.handle_star_curves_changes(10)
def _y0_10_changed(self):
self.handle_star_curves_changes(10)
def _z0_10_changed(self):
self.handle_star_curves_changes(10)
def _z_vel_10_changed(self):
self.handle_star_curves_changes(10)
def _color_1_changed(self):
self.handle_star_curves_changes(1)
def _color_2_changed(self):
self.handle_star_curves_changes(2)
def _color_3_changed(self):
self.handle_star_curves_changes(3)
def _color_4_changed(self):
self.handle_star_curves_changes(4)
def _color_5_changed(self):
self.handle_star_curves_changes(5)
def _color_6_changed(self):
self.handle_star_curves_changes(6)
def _color_7_changed(self):
self.handle_star_curves_changes(7)
def _color_8_changed(self):
self.handle_star_curves_changes(8)
def _color_9_changed(self):
self.handle_star_curves_changes(9)
def _color_10_changed(self):
self.handle_star_curves_changes(10)
def _initial_tube_radius_1_changed(self):
self.handle_star_curves_changes(1)
def _initial_tube_radius_2_changed(self):
self.handle_star_curves_changes(2)
def _initial_tube_radius_3_changed(self):
self.handle_star_curves_changes(3)
def _initial_tube_radius_4_changed(self):
self.handle_star_curves_changes(4)
def _initial_tube_radius_5_changed(self):
self.handle_star_curves_changes(5)
def _initial_tube_radius_6_changed(self):
self.handle_star_curves_changes(6)
def _initial_tube_radius_7_changed(self):
self.handle_star_curves_changes(7)
def _initial_tube_radius_8_changed(self):
self.handle_star_curves_changes(8)
def _initial_tube_radius_9_changed(self):
self.handle_star_curves_changes(9)
def _initial_tube_radius_10_changed(self):
self.handle_star_curves_changes(10)
def _is_star_curve_display_1_changed(self):
self.handle_star_curves_changes(1)
def _is_star_curve_display_2_changed(self):
self.handle_star_curves_changes(2)
def _is_star_curve_display_3_changed(self):
self.handle_star_curves_changes(3)
def _is_star_curve_display_4_changed(self):
self.handle_star_curves_changes(4)
def _is_star_curve_display_5_changed(self):
self.handle_star_curves_changes(5)
def _is_star_curve_display_6_changed(self):
self.handle_star_curves_changes(6)
def _is_star_curve_display_7_changed(self):
self.handle_star_curves_changes(7)
def _is_star_curve_display_8_changed(self):
self.handle_star_curves_changes(8)
def _is_star_curve_display_9_changed(self):
self.handle_star_curves_changes(9)
def _is_star_curve_display_10_changed(self):
self.handle_star_curves_changes(10)
def scf_scene(self):
mlab.get_engine().current_scene = self.scene.mayavi_scene
def handle_plane_changed(self, axis_name):
pos = getattr(self, axis_name + '_plane')
print('handle_plane_changed', axis_name, pos)
ipw_3d_axis = getattr(self, 'ipw_3d_' + axis_name)
is_scene_axis = getattr(self, 'is_scene_' + axis_name)
if not (ipw_3d_axis == None or is_scene_axis == False):
ipw_3d_axis.ipw.slice_index = pos # - getattr(self, axis_name + 'start')
ipw_axis = getattr(self, 'ipw_' + axis_name)
if ipw_axis is not None:
ipw_axis.ipw.slice_index = pos
self.handle_setview_axis_fired(axis_name) # unclear bug forces me to run this
self.scf_scene()
def _x_plane_changed(self):
self.handle_plane_changed('x')
def _y_plane_changed(self):
self.handle_plane_changed('y')
def _z_plane_changed(self):
self.handle_plane_changed('z')
def handle_is_scene_axis_changed(self, axis_name):
print('handle_is_scene_axis_changed', axis_name)
is_scene_axis = getattr(self, 'is_scene_' + axis_name)
ipw_3d_axis = getattr(self, 'ipw_3d_' + axis_name)
if ipw_3d_axis is not None:
ipw_3d_axis.remove()
setattr(self, 'ipw_3d_' + axis_name, None)
if is_scene_axis:
self.load_image_plane(axis_name)
self.scf_scene()
def _is_scene_x_changed(self):
self.handle_is_scene_axis_changed('x')
def _is_scene_y_changed(self):
self.handle_is_scene_axis_changed('y')
def _is_scene_z_changed(self):
self.handle_is_scene_axis_changed('z')
def load_image_planes(self):
self.x_plane_high = self.data.shape[0]
self.y_plane_high = self.data.shape[1]
self.z_plane_high = self.data.shape[2]
self.x_plane = int(self.x_plane_high / 2)
self.y_plane = int(self.y_plane_high / 2)
self.z_plane = int(self.z_plane_high / 2)
print('load_image_planes', self.data.shape)
for axis_name in self._axis_names:
if getattr(self, 'is_scene_' + axis_name) == False:
continue
self.load_image_plane(axis_name)
self.scf_scene()
def load_image_plane(self, axis_name):
pos = getattr(self, axis_name + '_plane')
print('load_image_plane', axis_name, getattr(self, axis_name + '_plane'), getattr(self, axis_name + '_plane_slice_position'), getattr(self, 'ipw_' + axis_name).ipw.slice_index)
ipw3d = mlab.pipeline.image_plane_widget(self.data_src, plane_orientation='%s_axes' % axis_name, opacity=0.1,
transparent=True,
slice_index=pos,
reset_zoom= False,
colormap=self.colormap, vmin=self.minDT, vmax=self.maxDT, figure=self.scene.mayavi_scene)
ipw3d.ipw.margin_size_x = 0
ipw3d.ipw.margin_size_y = 0
ipw3d.ipw.sync_trait('slice_index', self, axis_name + '_plane_slice_position')
setattr(self, 'ipw_3d_' + axis_name, ipw3d)
# self.make_side_view(axis_name)
self.scf_scene()
def make_side_view(self, axis_name):
print('make_side_view', axis_name)
scene = getattr(self, 'scene_' + axis_name)
ipw = mlab.pipeline.image_plane_widget(getattr(self,'data_src_' + axis_name),
plane_orientation='%s_axes' % axis_name, opacity=0.1,
transparent=True,
slice_index=getattr(self, axis_name + '_plane'),
reset_zoom=True,
colormap=self.colormap, vmin=self.minDT, vmax=self.maxDT, figure=scene.mayavi_scene)
ipw.ipw.margin_size_x = 0
ipw.ipw.margin_size_y = 0
ipw.ipw.interaction = False
ipw.ipw.display_text = False
if axis_name == 'x':
vertical_size = ipw.ipw.point1[1] - 0.5
horizontal_size = ipw.ipw.point2[2] - 0.5
elif axis_name == 'y':
vertical_size = ipw.ipw.point1[2] - 0.5
horizontal_size = ipw.ipw.point2[0] - 0.5
else:
vertical_size = ipw.ipw.point1[0] - 0.5
horizontal_size = ipw.ipw.point2[1] - 0.5
# print(axis_name, vertical_size, horizontal_size)
sign = np.sign(getattr(self, 'distance_' + axis_name))
if sign == 0:
sign = 1
new_distance = 45
if vertical_size >= horizontal_size:
new_distance = sign*int(0.714*vertical_size)
else:
new_distance = sign*int(0.684*horizontal_size)
setattr(self, 'distance_' + axis_name, new_distance)
print(axis_name, 'vert', vertical_size, 'horz', horizontal_size, 'new_distance', new_distance)
setattr(self, 'ipw_' + axis_name, ipw)
ax = mlab.axes(nb_labels=5, figure=scene.mayavi_scene)
ax.axes.property.color = (0, 0, 0)
ax.axes.axis_title_text_property.color = (0, 0, 0)
ax.axes.axis_title_text_property.italic = 0
ax.axes.axis_label_text_property.color = (0, 0, 0)
ax.axes.axis_label_text_property.italic = 0
ax.axes.label_format = '%.0f'
ax.axes.font_factor = 2.0
if axis_name == 'x':
ax.axes.x_axis_visibility = False
elif axis_name == 'y':
if vertical_size >= horizontal_size:
ax.axes.z_axis_visibility = False
else:
ax.axes.x_axis_visibility = False
elif axis_name == 'z':
ax.axes.y_axis_visibility = False
setattr(self, 'ipw_axes_' + axis_name, ax)
azimuth = getattr(self, 'azimuth_' + axis_name)
elevation = getattr(self, 'elevation_' + axis_name)
distance = getattr(self, 'distance_' + axis_name)
scene.scene.interactor.interactor_style = tvtk.InteractorStyleImage()
scene.scene.parallel_projection = True
scene.scene.background = (1, 1, 1)
scene.mlab.view(azimuth, elevation, distance, figure=scene.mayavi_scene)
scene.camera.parallel_scale = distance
self.scf_scene()
def handle_setview_axis_fired(self, axis_name):
print('handle_setview_axis_fired', axis_name)
azimuth = getattr(self, 'azimuth_' + axis_name)
elevation = getattr(self, 'elevation_' + axis_name)
distance = getattr(self, 'distance_' + axis_name)
scene = getattr(self, 'scene_' + axis_name)
# print(scene.mayavi_scene.name)
scene.mlab.view(azimuth, elevation, distance, figure=scene.mayavi_scene)
scene.camera.parallel_scale = distance
self.scf_scene()
def _setview_x_button_fired(self):
self.handle_setview_axis_fired('x')
def _setview_y_button_fired(self):
self.handle_setview_axis_fired('y')
def _setview_z_button_fired(self):
self.handle_setview_axis_fired('z')
def handle_axis_plane_slice_position_changed(self, axis_name):
pos = getattr(self, axis_name + '_plane_slice_position')
print('handle_axis_plane_slice_position_changed', axis_name, pos) #- getattr(self, axis_name + 'start'))
setattr(self, axis_name + '_plane', int(round(pos))) # - getattr(self, axis_name + 'start'))
def _x_plane_slice_position_changed(self):
self.handle_axis_plane_slice_position_changed('x')
def _y_plane_slice_position_changed(self):
self.handle_axis_plane_slice_position_changed('y')
def _z_plane_slice_position_changed(self):
self.handle_axis_plane_slice_position_changed('z')
def dump(self, obj):
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
def load_fits_file(self, fits_file):
print('load_fits_file', fits_file)
self.is_loading = True
img = fits.open(fits_file) # Read the fits data
dat = img[0].data
hdr = img[0].header
## The three axes loaded by fits are: velo, dec, ra so swap the axes, RA<->velo
print(dat.shape)
data =
|
np.swapaxes(dat, 0, 2)
|
numpy.swapaxes
|
#!/usr/bin/env python3
# Author: <NAME>
# Ver 0.1
# Description: Calculate the top and bottom heights for glyphs for horizontal shifts of dX pixels
import os
import cv2 as cv
import glob
import numpy as np
import matplotlib.pyplot as plt
BASE_DIR = "C:/Ligatures"
nBins = 11
minHeight = 417
maxHeight = 1400
shiftX = 200
scaling = 4.9 #2048 units per em font assumption, images are 300 dpi
DESC_RATE = 0.2 # per x-unit rate at which the bottom heigh descends where there is glyph data (extrapolation)
ASC_RATE_1 = 4.0 # Ascent rate for small glyphs that are smaller that shiftX*11 units
ASC_RATE_2 = 8.0 # Ascent rate for very small glyphs. these glyphs should
# be prevented to kern too much as the glyphs behind them can collide with
# the glyphs at the front
HAROOF_BOTTOM_DEFAULT = 300 #default height at bottom for haroof for the purpose of extrapolation
HAROOF_TOP_DEFAULT = 370 #top dfault height for haroof
HAROOF_TOP_EXCEPTION = 440 #exception default height
SHORT_GLYPH_BOUNDARY = 61 #boundary for definition of short glyphs
ASC_RATE_EXCEPTION = 4
ASC_RATE_HAROOF = 7
ASC_RATE_DEFAULT_HAROOF = 4
SYMBOL_TOP_DEFAULT = 460 #top default height for symbols,
# it controls how much the symbols will kern outside symbol boundary
SYMBOL_BOTTOM_DEFAULT = 100
NUM_PIXEL_SCAN = 9 #number of horizontal pixels to scan
dXValid = [10,20,40,50,100] #valid values for dX
LookUp = {} #instantiate an empty dictionary
GlyphHeight = []
haroofExceptions = ["alef.png","alefwah"] #define exception haroof here, these will have differnt default extrpolation height
animation = "|/-\\"
def set_base_dir(loc):
BASE_DIR = loc
def calc_glyph_heights(baseDir,dX,enableKasheeda):
if dX not in dXValid:
print("dX value is not correct. It should be one of the folowing values: ")
print(dXValid)
print("Program exiting now")
return 0
numGlyphs = 0
haroofDir = baseDir+"/Haroof_Regular/"
ligatureDir = baseDir+"/Ligatures_Regular/"
symbolDir = baseDir+"/Symbols/"
kasheedaDir = baseDir+"/Ligatures_Kashida/"
kasheedaHaroofDir = baseDir+"/Haroof_Kashida/"
numGlyphs = regular_glyphs(ligatureDir,dX,GlyphHeight)
if numGlyphs == 0:
print("Please check the input folder. Exiting now ")
return 0
else:
print("Number of glyphs processed: " + str(numGlyphs))
# regular Haroof processing
numGlyphs = haroof_glyphs(haroofDir,dX,GlyphHeight)
if numGlyphs == 0:
print("Please check the input folder. Exiting now ")
return 0
else:
print("Number of glyphs processed: " + str(numGlyphs))
numGlyphs = symbol_glyphs(symbolDir,dX,GlyphHeight)
if numGlyphs == 0:
print("Please check the input folder. Exiting now ")
return 0
else:
print("Number of glyphs processed: " + str(numGlyphs))
if enableKasheeda[0] == 1:
numGlyphs = regular_glyphs(kasheedaDir,dX,GlyphHeight)
if numGlyphs == 0:
print("Please check the input folder. Exiting now ")
return 0
else:
print("Number of glyphs processed: " + str(numGlyphs))
# kasheeda Haroof processing
numGlyphs = haroof_glyphs(kasheedaHaroofDir,dX,GlyphHeight)
if numGlyphs == 0:
print("Please check the input folder. Exiting now ")
return 0
else:
print("Number of glyphs processed: " + str(numGlyphs))
if numGlyphs == 0:
print("Zero glyphs processed. Please check if the images are placed in correct directories.\n")
return 0
else:
print("All glyphs processed successfully")
return LookUp
def regular_glyphs(dir,dX,GlyphHeight):
numGlyphs = 0
idx = 0
for filepath in glob.iglob(dir + '**/*.png', recursive=True):
filename = os.path.basename(filepath)
if numGlyphs%10 == 0:
print(animation[idx % len(animation)], end="\r")
idx += 1
if numGlyphs%100 == 0:
print("%d glyphs processed \n"%numGlyphs)
im = cv.imread(filepath, cv.IMREAD_GRAYSCALE)
if im is None:
print("Unsuccessful in reading image (check if glyph directory is not empty)\n exiting now\n")
return 0
H, W = im.shape
if len(GlyphHeight) == 0:
GlyphHeight.append(H)
if H < minHeight or H > maxHeight or H != GlyphHeight[0]:
print("Image height should be between "+str(minHeight)+" pixels and "+str(maxHeight)+" pixels. All images should have the exact same height")
print("Program exiting now")
return 0
numGlyphs += 1
nbinsTemp = nBins*dX
shiftXTemp = shiftX/dX
divisor = int(np.floor(shiftXTemp/scaling))
nPixelScan = NUM_PIXEL_SCAN
nW = int(np.floor((W-nPixelScan)/divisor) + 1)
if nW < nPixelScan:
nPixelScan = nW - 2
if nW > nbinsTemp:
nW = nbinsTemp
starting = np.ones((H,nW),dtype=np.int8) #bottom height of the glyph strokes
ending = np.ones((H,nW),dtype=np.int8) #top height of the glyph strokes
start_h = np.zeros((nbinsTemp,),dtype=int)
end_h = np.fix(H*0.5)*np.ones((nbinsTemp,),dtype=int)
extents = np.zeros((nBins,2),dtype=int)
for j in range(0,nW):
for i in range(0,nPixelScan):
starting[0:H,j] = starting[0:H,j] & im[0:H,W - j*divisor - i - 1]
ending[0:H,j] = ending[0:H,j] & im[0:H,i + j*divisor]
for j in range(0,nW):
a = np.argwhere(starting[0:H,j] == 0)
if not a.any():
start_h[j] = -1
else:
start_h[j] = a[-1]
start_h[j] = H - start_h[j]
a = np.argwhere(ending[0:H,j] == 0)
if not a.any():
end_h[j] = -1
else:
end_h[j] = a[1]
end_h[j] = H - end_h[j]
hmax = start_h[0]
for k in range(1,nbinsTemp):
if start_h[k] > hmax:
start_h[k] = hmax
else:
hmax = start_h[k]
if nW < SHORT_GLYPH_BOUNDARY:
rate = ASC_RATE_2
else:
rate = ASC_RATE_1
hmax = start_h[nW-1]
for k in range(nW,nbinsTemp):
if(hmax - (k-nW)*DESC_RATE < 0):
start_h[k] = 0
else:
start_h[k] = int(hmax - (k-nW)*DESC_RATE)
hmin = end_h[0]
for k in range(1,nbinsTemp):
if end_h[k] < hmin:
end_h[k] = hmin
else:
hmin = end_h[k]
hmin = end_h[nW-1]
for k in range(nW,nbinsTemp):
if(hmin + (k-nW)*rate > H):
end_h[k] = H
else:
end_h[k] = int(hmin + (k-nW)*rate)
extents[0:nBins,0] = start_h[0::dX]*scaling # x scaling = conversion from pixels to points
extents[0:nBins,1] = end_h[0::dX]*scaling
LookUp[filename[0:-4]] = extents
return numGlyphs
def haroof_glyphs(dir,dX,GlyphHeight):
numGlyphs = 0
for filepath in glob.iglob(dir + '**/*.png', recursive=True):
exceptionFlag = 0
filename = os.path.basename(filepath)
print(animation[numGlyphs % len(animation)], end="\r")
im = cv.imread(filepath, cv.IMREAD_GRAYSCALE)
if im is None:
print("Unsuccessful in reading image (check if glyph directory is not empty)\n exiting now\n")
return 0
H, W = im.shape
if H < minHeight or H > maxHeight or H != GlyphHeight[0]:
print("Image height should be between "+str(minHeight)+" pixels and "+str(maxHeight)+" pixels. All images should have the exact same height")
print("Program exiting now")
return 0
numGlyphs += 1
nbinsTemp = nBins*dX
shiftXTemp = shiftX/dX
divisor = int(np.floor(shiftXTemp/scaling))
nPixelScan = NUM_PIXEL_SCAN
nW = int(np.floor((W-nPixelScan)/divisor) + 1)
if nW > nbinsTemp:
nW = nbinsTemp
starting = np.ones((H,nW),dtype=np.int8) #bottom
ending = np.ones((H,nW),dtype=np.int8) #top
start_h = HAROOF_BOTTOM_DEFAULT*np.ones((nbinsTemp,),dtype=int)
extents = np.zeros((nBins,2),dtype=int)
if(filename[0:-4] in haroofExceptions):
end_h = HAROOF_TOP_EXCEPTION*
|
np.ones((nbinsTemp,),dtype=int)
|
numpy.ones
|
import math
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('dark_background')
def f(x):
#return x * math.sin(1/x)
#return x**3
return x**3 - x
#return math.sin(x)
def g(x):
#return -math.sqrt((x**3)**2)
return 2 * x**2
def result(x_values, q):
result = []
for x in x_values:
try:
result.append(q(x))
except ValueError as e:
# the point here is to indicate that
# the undefined roots, should be made visible
# somehow
result.append(10)
result = np.array(result)
return result
x_values =
|
np.arange(-4,4, 0.001)
|
numpy.arange
|
#!/usr/bin/env python
#
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
# Copyright (c) 2020 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# Phylanx K-Means algorithm example in Python. Iteratively clusters 250
# randomly generated points into 3 clusters for the specified number of
# iterations.
#
# Code adapted from: http://flothesof.github.io/k-means-numpy.html
# Original source code is BSD-licensed
#
# \param iterations Number of iterations
# \returns the cluster centroids
# flake8: noqa
from phylanx import Phylanx
import argparse
import numpy as np
import time
@Phylanx
def closest_centroid(points, centroids):
points_x = np.expand_dims(np.slice_column(points, 0), -1)
points_y = np.expand_dims(np.slice_column(points, 1), -1)
centroids_x = np.slice_column(centroids, 0)
centroids_y = np.slice_column(centroids, 1)
return np.argmin(np.sqrt(
np.power(points_x - centroids_x, 2) +
|
np.power(points_y - centroids_y, 2)
|
numpy.power
|
import typing
from abc import ABC, abstractmethod
import numpy as np
from scipy.stats import kendalltau, pearsonr, spearmanr
from sklearn.metrics import accuracy_score, mean_squared_error, pairwise_distances
class Metric(ABC):
def __init__(self) -> None:
pass
def __call__(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
if X.ndim == 1:
X = X.reshape(-1, 1)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if any(y.ndim != 2 for y in [X, Y]):
raise ValueError("X and Y must be 1D or 2D arrays.")
if X.shape[0] != Y.shape[0]:
raise ValueError("X and Y must have the same number of samples.")
if self.__class__.__name__ not in ["RepresentationalSimilarity", "LinearCKA"]:
if X.shape[1] != Y.shape[1]:
raise ValueError("X and Y must have the same number of dimensions.")
return self._apply_metric(X, Y)
@abstractmethod
def _apply_metric(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
raise NotImplementedError("Handled by subclass.")
class VectorMetric(Metric):
def __init__(self, reduction: typing.Callable = np.mean) -> None:
if reduction:
if not callable(reduction):
raise TypeError("Reduction argument must be callable.")
self._reduction = reduction
super().__init__()
def _apply_metric(
self, X: np.ndarray, Y: np.ndarray
) -> typing.Union[np.float, np.ndarray]:
scores = np.zeros(X.shape[1])
for i in range(scores.size):
scores[i] = self._score(X[:, i], Y[:, i])
if self._reduction:
return self._reduction(scores)
return scores
@staticmethod
@abstractmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
raise NotImplementedError("Handled by subclass.")
class MatrixMetric(Metric):
def __init__(self) -> None:
super().__init__()
def _apply_metric(self, X, Y):
score = self._score(X, Y)
return score
@abstractmethod
def _score(self, X: np.ndarray, Y: np.ndarray) -> np.float:
raise NotImplementedError("Handled by subclass.")
class PearsonR(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
r, _ = pearsonr(x, y)
return r
class SpearmanRho(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
rho, _ = spearmanr(x, y)
return rho
class KendallTau(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
tau, _ = kendalltau(x, y)
return tau
class FisherCorr(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
r, _ = pearsonr(x, y)
corr = np.arctanh(r)
return corr
class RMSE(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
loss = mean_squared_error(x, y, squared=False)
return loss
class ClassificationAccuracy(VectorMetric):
@staticmethod
def _score(x: np.ndarray, y: np.ndarray) -> np.float:
score = accuracy_score(x, y, normalize=True)
return score
class RankAccuracy(MatrixMetric):
def __init__(self, distance: str = "euclidean") -> None:
self._distance = distance
super().__init__()
def _score(self, X: np.ndarray, Y: np.ndarray) -> np.float:
distances = pairwise_distances(X, Y, metric=self._distance)
scores = (distances.T > np.diag(distances)).sum(axis=0) / (
distances.shape[1] - 1
)
return scores.mean()
class RepresentationalSimilarity(MatrixMetric):
def __init__(
self, distance: str = "correlation", comparison: VectorMetric = PearsonR()
) -> None:
self._distance = distance
self._comparison = comparison
super().__init__()
def _score(self, X: np.ndarray, Y: np.ndarray) -> np.float:
X_rdm = pairwise_distances(X, metric=self._distance)
Y_rdm = pairwise_distances(Y, metric=self._distance)
if any(m.shape[1] == 1 for m in (X, Y)): # can't calc 1D corr dists
X_rdm[np.isnan(X_rdm)] = 0
Y_rdm[np.isnan(Y_rdm)] = 0
indices = np.triu_indices(X_rdm.shape[0], k=1)
score = self._comparison(X_rdm[indices], Y_rdm[indices])
return score
# inspired by https://github.com/yuanli2333/CKA-Centered-Kernel-Alignment/blob/master/CKA.py
class LinearCKA(MatrixMetric):
def __init__(self) -> None:
super().__init__()
@staticmethod
def _center(K: np.ndarray) -> np.ndarray:
N = K.shape[0]
U = np.ones([N, N])
I =
|
np.eye(N)
|
numpy.eye
|
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ctypes
import itertools
import math
import numpy
from ctypes import POINTER, c_bool, c_int32, c_int64, c_void_p
from numpy.ctypeslib import ndpointer
from .common import SubSolver
from ..model import Factor, Model, make_labeling, walk_shape, walk_sub_shape
try:
import cplex
except ImportError:
cplex = None
try:
import gurobipy
except ImportError:
gurobipy = None
def cplex_is_optimal(cpl):
optimal_statuses = (
cpl.solution.status.MIP_optimal,
cpl.solution.status.MIP_optimal,
cpl.solution.status.optimal_tolerance,
)
return cpl.solution.get_status() in optimal_statuses
def range_length(start, length):
return range(start, start + length)
class ILPSolver(SubSolver):
def add_full_model(self):
for variable in range(self.model.number_of_variables):
self.add_variable(variable)
for factor in self.model.factors:
self.add_factor(factor)
def add_variable(self, variable):
raise NotImplementedError
def add_factor(self, factor):
raise NotImplementedError
def lower_bound(self):
return self.upper_bound()
def prepare(self):
pass
class Cplex(ILPSolver):
DEFAULT_PARAMETERS = {
'threads': None,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
if not cplex:
raise RuntimeError('Required module cplex is not available.')
self._cplex = cplex.Cplex()
self._variables = [None] * model.number_of_variables # FIXME: consider numpy array
self.constant = 0.0
def index_of_variable(self, variable, label=0):
assert(self._variables[variable] is not None)
assert(label >= 0 and label < self.model.shape[variable])
return self._variables[variable] + label
def add_variable(self, variable):
self._variables[variable] = self._cplex.variables.get_num()
num_labs = self.model.shape[variable]
lb = [0.0] * num_labs
ub = [1.0] * num_labs
types = [self._cplex.variables.type.integer] * num_labs
self._cplex.variables.add(lb=lb, ub=ub, types=types)
ind = range_length(self._variables[variable], num_labs)
lin_expr = cplex.SparsePair(ind, val=[1.0]*len(ind))
self._cplex.linear_constraints.add(lin_expr=[lin_expr], senses=['E'], rhs=[1.0])
def add_factor(self, factor):
# copy factor for normalization
factor = Factor(factor.variables, data=numpy.copy(factor.data))
minimum = factor.data.min()
self.constant += minimum
factor.data -= minimum
# handle infinity
factor.data[factor.data > cplex.infinity] = cplex.infinity
factor.data[factor.data < -cplex.infinity] = -cplex.infinity
if factor.number_of_variables == 1:
self._add_factor_unary(factor)
else:
self._add_factor_generic(factor)
def _add_factor_unary(self, factor):
variable, = factor.variables
start = self.index_of_variable(variable)
end = int(start + self.model.shape[variable] - 1)
old = self._cplex.objective.get_linear(start, end)
self._cplex.objective.set_linear(
(i, o+v) for i, o, v in zip(range(start, end+1), old, factor.data))
def _add_factor_generic(self, factor):
first_index = self._cplex.variables.get_num()
obj = factor.data.ravel()
lb = [0.0] * len(obj)
ub = [1.0] * len(obj)
self._cplex.variables.add(lb=lb, ub=ub, obj=obj)
number_of_constraints = sum(factor.shape)
lin_expr = [cplex.SparsePair() for x in range(number_of_constraints)]
it = iter(lin_expr)
for variable, labels in zip(factor.variables, factor.shape):
for label in range(labels):
current = next(it)
current.ind.append(self.index_of_variable(variable, label))
current.val.append(-1.0)
# FIXME: This here is rather slow
it = numpy.nditer(factor.data, flags=['c_index', 'multi_index'])
while not it.finished:
contraint_index = 0
for local_variable, labels in enumerate(factor.shape):
current = lin_expr[contraint_index + it.multi_index[local_variable]]
current.ind.append(first_index + it.index)
current.val.append(1.0)
contraint_index += labels
it.iternext()
self._cplex.linear_constraints.add(lin_expr=lin_expr,
senses=['E']*len(lin_expr), range_values=[0.0]*len(lin_expr))
def solve(self):
if self.parameters['threads']:
self._cplex.parameters.threads.set(self.parameters['threads'])
self._cplex.solve()
if not cplex_is_optimal(self._cplex):
raise RuntimeError('CPLEX inference was not optimal')
def upper_bound(self):
return self._cplex.solution.get_objective_value() + self.constant
def labeling(self):
result = [None] * self.model.number_of_variables
for variable in range(len(result)):
if self._variables[variable] is None:
continue
for label in range(self.model.number_of_labels(variable)):
if self._cplex.solution.get_values(self.index_of_variable(variable, label)) > .5:
result[variable] = label
break
return result
class Gurobi(ILPSolver):
DEFAULT_PARAMETERS = {
'threads': 0,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
if not gurobipy:
raise RuntimeError('Required module gurobipy is not available.')
self._env = gurobipy.Env()
self._model = gurobipy.Model(env=self._env)
self._obj = gurobipy.LinExpr()
self._variables = [None] * model.number_of_variables
self._constant = 0.0
def add_variable(self, variable):
num_labs = int(self.model.shape[variable])
result = self._model.addVars(num_labs, lb=0.0, ub=1.0, vtype=gurobipy.GRB.BINARY)
self._model.addConstr(result.sum() == 1.0)
self._variables[variable] = result
def add_factor(self, factor):
# copy factor for normalization
factor = Factor(factor.variables, data=numpy.copy(factor.data))
minimum = factor.data.min()
self._constant += minimum
factor.data -= minimum
# handle infinity
factor.data[factor.data > gurobipy.GRB.INFINITY] = gurobipy.GRB.INFINITY
factor.data[factor.data < -gurobipy.GRB.INFINITY] = -gurobipy.GRB.INFINITY
if factor.number_of_variables == 1:
self._add_factor_unary(factor)
else:
self._add_factor_generic(factor)
def _add_factor_unary(self, factor):
variable, = factor.variables
self._obj.addTerms(factor.data, self._variables[variable].select())
def _add_factor_generic(self, factor):
result = self._model.addVars(*factor.shape, lb=0.0, ub=1.0)
# FIXME: Check correct index ordering
self._obj.addTerms(factor.data.ravel(), result.select())
# FIXME: This is slow as hell.
for variable, num_labels in enumerate(factor.shape):
for label in range(num_labels):
unary_var = self._variables[factor.variables[variable]][label]
selector = ['*'] * num_labels
selector[variable] = label
lin_expr = sum(result.select(*selector)) - unary_var
self._model.addConstr(lin_expr == 0.0)
def solve(self):
self._model.setParam('Threads', self.parameters['threads'])
self._model.setObjective(self._obj, gurobipy.GRB.MINIMIZE)
self._model.optimize()
def upper_bound(self):
return self._model.getObjective().getValue() + self._constant
def labeling(self):
result = [None] * self.model.number_of_variables
for variable in range(len(result)):
if self._variables[variable] is None:
continue
assert(len([k for k, v in self._variables[variable].items() if v.x >.5]) == 1)
for label, var in self._variables[variable].items():
if var.x > .5:
result[variable] = label
break
return result
class NonIterativeSolver(ILPSolver):
DEFAULT_PARAMETERS = {
'warmstart': None,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self.solver = None
self.variable_map = None
self.variables = []
self.factors = []
self.warmstart = self.parameters['warmstart']
del self.parameters['warmstart']
def add_variable(self, variable):
self.variables.append(variable)
def add_factor(self, factor):
self.factors.append(factor)
def prepare(self):
print('Rebuilding full model...')
self.variable_map = dict((x, i) for i, x in enumerate(self.variables))
model = Model([self.model.number_of_labels(x) for x in self.variables])
for factor in self.factors:
wrapped_factor = Factor([self.variable_map[x] for x in factor.variables],
data=factor.data)
model.add_factor(wrapped_factor)
params = self.parameters
if self.warmstart is not None:
print('Adjusting warmstart parameter...')
if self.solver:
prev_labeling = self.labeling()
else:
prev_labeling = [None] * self.model.number_of_variables
current_warmstart = make_labeling(model.number_of_variables)
for variable in range(model.number_of_variables):
original_variable = self.variables[variable]
if prev_labeling[original_variable] is not None:
current_warmstart[variable] = prev_labeling[original_variable]
else:
current_warmstart[variable] = self.warmstart[original_variable]
params = self.parameters.copy()
params['warmstart'] = current_warmstart
print('Reconstructing new solver...')
self.solver = self._SOLVER(model, params)
self.solver.add_full_model()
def solve(self):
return self.solver.solve()
def upper_bound(self):
return self.solver.upper_bound()
def labeling(self):
result = [None] * self.model.number_of_variables
for variable, label in enumerate(self.solver.labeling()):
result[self.variables[variable]] = label
return result
class CplexNonIterative(NonIterativeSolver):
_SOLVER = Cplex
class GurobiNonIterative(NonIterativeSolver):
_SOLVER = Gurobi
class ToulBar2(NonIterativeSolver):
class _SOLVER(ILPSolver):
DEFAULT_PARAMETERS = {
'scaling_factor': 1e6,
'warmstart': None,
'vac': True,
}
def __init__(self, model, parameters=None):
super().__init__(model, parameters)
self.constant = 0
self._init_library()
min_cost, max_cost = c_int64(), c_int64()
self._initialize(self.parameters['vac'], min_cost, max_cost)
self.min_cost, self.max_cost = min_cost.value, max_cost.value
def __del__(self):
if self._solver:
self._destroy(self._solver)
self._solver = None
def _init_library(self):
self._lib = ctypes.cdll.LoadLibrary('libcombilp_toulbar2_stub.so')
self._initialize = self._lib.combilp_toulbar2_stub_initialize
self._initialize.argtypes = [c_bool, POINTER(c_int64), POINTER(c_int64)]
self._create = self._lib.combilp_toulbar2_stub_create
self._create.argtypes = [c_int32, ndpointer(dtype=c_int32)]
self._create.restype = c_void_p
self._destroy = self._lib.combilp_toulbar2_stub_destroy
self._destroy.argtypes = [c_void_p]
self._add_factor = self._lib.combilp_toulbar2_stub_add_factor
self._add_factor.argtypes = [c_void_p, c_int32, ndpointer(dtype=c_int32), ndpointer(dtype=c_int32), ndpointer(dtype=c_int64)]
self._solve = self._lib.combilp_toulbar2_stub_solve
self._solve.argtypes = [c_void_p, c_int64]
self._solve.restype = c_bool
self._get_labeling = self._lib.combilp_toulbar2_stub_get_labeling
self._get_labeling.argtypes = [c_void_p, ndpointer(dtype=c_int32)]
def add_full_model(self):
self._solver = self._create(self.model.number_of_variables, self.model.shape)
for factor in self.model.factors:
shape = [self.model.number_of_labels(x) for x in factor.variables]
shape = numpy.asarray(shape, dtype=c_int32)
self._add_factor(self._solver, factor.number_of_variables,
factor.variables, shape, self.convert_costs(factor.data))
self.constant += factor.data.min()
def solve(self):
ub = self.max_cost
if self.parameters['warmstart'] is not None:
possible_ub = (self.model.evaluate(self.parameters['warmstart']) - self.constant) * self.parameters['scaling_factor']
if not math.isinf(possible_ub):
ub = int(possible_ub) + 1
result = self._solve(self._solver, ub)
if not result:
raise RuntimeError('Inference was not optimal.')
def upper_bound(self):
labeling = self.labeling()
return self.model.evaluate(labeling)
def labeling(self):
result = make_labeling(self.model.number_of_variables)
self._get_labeling(self._solver, result)
return result
def convert_costs(self, values):
minimal = values.min()
result = numpy.asarray((values - minimal) * self.parameters['scaling_factor'],
dtype=c_int64)
result[
|
numpy.isposinf(values)
|
numpy.isposinf
|
import numpy as np
from functions import identify_nearest_centroid_for_multiple_tables
from algorithm_2 import algorithm_2
from double_Q import DQPID
import time
def algorithm_4(Q_arrange,Mt,next_state,reward,maximum_depth,number_of_actions,e_greed_counter,set_point, flag_ab, K_step):
start = time.time()
#print('algoritmo 4')
state_index = Mt[0].astype(int)
action_index = Mt[1].astype(int)
Q_index = Mt[2].astype(int)
action = Mt[3:,]
#print('Q_index', Q_index)
next_Q_index = Q_index
next_state_index, min_distance_to_centroid = Q_arrange[Q_index].identify_nearest_centroid(next_state)
delta = Q_arrange[Q_index].max_state
l_depth = Q_arrange[Q_index].depth
#if(state_index != next_state_index):
#print('*********************************')
#print('****houston we have a problem****')
#print('*********************************')
# Create new table with higher depth, because next state is inside
if (np.abs(min_distance_to_centroid) <= np.abs(delta)) and (l_depth<maximum_depth):
print('new object in algorithm 4')
#
#
amount_of_objects = len(Q_arrange)
min_state = Q_arrange[Q_index].centroids[state_index] + Q_arrange[Q_index].min_state
max_state = Q_arrange[Q_index].centroids[state_index] + Q_arrange[Q_index].max_state
Q_cheat = (Q_arrange[Q_index].Q_A[state_index][action_index] + Q_arrange[Q_index].Q_B[state_index][action_index])/2.
current_depth_v = Q_arrange[Q_index].depth
new_centroid = np.array([Q_arrange[Q_index].centroids[state_index]]) # this is to give it an array format
Q_arrange.append(DQPID(new_centroid,Q_index, current_depth_v + 1, action, number_of_actions, maximum_depth, action_index, Q_cheat, K_step ))
#Q_arrange[amount_of_objects] = DQPID(new_centroid,Q_index, current_depth_v + 1, action, number_of_actions, maximum_depth, action_index, Q_cheat )
#temp_Q_index = len(Q_arrange)
temp_Q_index = amount_of_objects
new_state_index, new_min_distance_to_centroid = Q_arrange[temp_Q_index].identify_nearest_centroid(next_state)
delta_new = Q_arrange[temp_Q_index].max_state
if (np.abs(new_min_distance_to_centroid) >= delta_new):
#print('new centroid when creating object in algortihm 4')
new_state_index = Q_arrange[temp_Q_index].get_new_centroid(next_state)
#TODO this is controversial, but when Creating a new object I like to reset the greed counter
e_greed_counter = 0.
#I increase the descendence of the parent
Q_arrange[Q_index].descendence = Q_arrange[Q_index].descendence + 1
# I store the index of the son in the parent
Q_arrange[Q_index].descendence_index = np.append(Q_arrange[Q_index].descendence_index , amount_of_objects)
# I set the next table
next_Q_index = temp_Q_index
#next_state_index, min_distance_to_centroid = Q_arrange[Q_index].identify_nearest_centroid(next_state)
print('state_index',state_index, action_index)
if flag_ab=='A':
Q_B_max_next_value = np.max(Q_arrange[next_Q_index].Q_B[new_state_index])
Q_arrange[Q_index].update_Q(state_index,action_index,reward,Q_B_max_next_value,flag_ab)
else:
Q_A_max_next_value = np.max(Q_arrange[next_Q_index].Q_A[new_state_index])
Q_arrange[Q_index].update_Q(state_index,action_index,reward,Q_A_max_next_value,flag_ab)
# maximum depth
elif(l_depth==maximum_depth and np.abs(min_distance_to_centroid)<=np.abs(delta) ):
#print('algorithm 2')
Q_arrange[Q_index] = algorithm_2(Q_arrange[Q_index], state_index, next_state, reward, action_index, flag_ab)
next_Q_index = Q_index
# goes back one discratization level because next state is outside
elif(np.abs(min_distance_to_centroid)>= np.abs(delta)):
stop_loop = False
print('it went in the else algorithm_4')
print('**********************************')
print('****are you sure this is correct**')
print('**********************************')
print('*********you should check this****')
print('**********************************')
while stop_loop == False:
l_depth = l_depth - 1
# I search in lower depths to see if there is a centroid for the state
if l_depth > 0:
temp_min_distance_to_centroid, temp_state_index, temp_Q_index = identify_nearest_centroid_for_multiple_tables(Q_arrange,l_depth,next_state)
else:
l_depth = 1 # bug catcher, means deapth cant be smaller than 1
temp_min_distance_to_centroid, temp_state_index, temp_Q_index = identify_nearest_centroid_for_multiple_tables(Q_arrange,l_depth,next_state)
# if the distance the the centroid is less than the minimum I create a new centroid in higher depth
if(
|
np.abs(temp_min_distance_to_centroid)
|
numpy.abs
|
import mxnet as mx
import numpy as np
from itertools import cycle
from mxnet.image import ForceResizeAug
from mxnet.io import DataDesc
from models.loss import pairwise_distance
def get_datasets(train_image_files, test_image_files, boxes, data_shape, use_crops, scale_image_data, use_aug=True,
with_proxy=False, **kwargs):
"""Return training and testing datasets"""
return (ImageDataset(train_image_files, data_shape, boxes if use_crops else None, use_aug=use_aug,
with_proxy=with_proxy, scale_image_data=scale_image_data, **kwargs),
ImageDataset(test_image_files, data_shape, boxes if use_crops else None, use_aug=False,
with_proxy=with_proxy, scale_image_data=scale_image_data, **kwargs))
def get_iterators(train_image_files, test_image_files, boxes, batch_k, batch_size, data_shape, use_crops,
scale_image_data, **kwargs):
"""Return training and testing iterators."""
return (
ImageDatasetIterator(train_image_files, test_image_files, boxes, batch_k, batch_size, data_shape, use_crops,
is_train=True, scale_image_data=scale_image_data, **kwargs),
ImageDatasetIterator(train_image_files, test_image_files, boxes, batch_k, batch_size, data_shape, use_crops,
is_train=False, scale_image_data=scale_image_data, **kwargs))
def get_npairs_iterators(train_image_files, test_image_files, boxes, batch_size, data_shape, test_batch_size, use_crops,
scale_image_data, same_image_sampling):
"""Return training and testing npairs iterators."""
return (NPairsIterator(train_image_files, test_image_files, boxes, batch_size, data_shape, use_crops=use_crops,
is_train=True, scale_image_data=scale_image_data,
same_image_sampling=same_image_sampling),
NPairsIterator(train_image_files, test_image_files, boxes, batch_size, data_shape, use_crops=use_crops,
test_batch_size=test_batch_size, is_train=False, scale_image_data=scale_image_data,
same_image_sampling=0))
def get_prototype_iterators(train_image_files, test_image_files, boxes, Nc, Ns, Nq, data_shape, test_batch_size,
use_crops, scale_image_data):
"""Return training and testing prototype iterators."""
return (PrototypeIterator(train_image_files, test_image_files, boxes, Nc, Ns, Nq, data_shape, use_crops=use_crops,
is_train=True, scale_image_data=scale_image_data),
PrototypeIterator(train_image_files, test_image_files, boxes, Nc, Ns, Nq, data_shape, use_crops=use_crops,
test_batch_size=test_batch_size, is_train=False, scale_image_data=scale_image_data))
class CircularIterator:
"""
Circular iterator over a dataset
"""
def __init__(self, dataset):
self.pool = cycle(dataset)
def next_batch(self, b):
return [self.pool.next() for _ in range(b)]
def next(self):
return self.pool.next()
color_mean = [123.68, 116.779, 103.939]
color_std = [58.393, 57.12, 57.375]
def transform(data, resize_to, output_shape, use_aug, box, scale):
"""Crop and normalize an image nd array."""
if box is not None:
x, y, w, h = box
data = data[y:min(y + h, data.shape[0]), x:min(x + w, data.shape[1])]
data = data.astype('float32')
if use_aug:
augmeters = mx.image.CreateAugmenter(data_shape=(3, output_shape, output_shape),
#resize=resize_to,
rand_resize=True,
rand_crop=True,
rand_mirror=True,
mean=np.array(color_mean),
std=np.array(color_std) if scale else None,
inter_method=10)
if resize_to is not None:
augmeters = [ForceResizeAug((resize_to, resize_to))] + augmeters
for aug in augmeters:
data = aug(data)
else:
augmeters = mx.image.CreateAugmenter(data_shape=(3, output_shape, output_shape),
resize=256 if resize_to is None else resize_to,
mean=np.array(color_mean),
std=np.array(color_std) if scale else None)
if resize_to is not None:
augmeters[0] = ForceResizeAug((resize_to, resize_to))
for aug in augmeters:
data = aug(data)
data = mx.nd.transpose(data, (2, 0, 1))
# If image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data
class ImageDataset(mx.gluon.data.Dataset):
def __init__(self, class_data, data_shape, boxes, use_aug, scale_image_data, max_images_per_class=-1,
with_proxy=False, resize_img=None):
super(ImageDataset, self).__init__()
self._num_classes = len(class_data)
self._data_shape = data_shape
self._use_aug = use_aug
self._boxes = boxes
self._scale_image_data = scale_image_data
self._with_proxy = with_proxy
self._class_mapping = None
self._num_remapped_classes = None
self._resize_img = resize_img
self._data = []
for i, image_list in enumerate(class_data):
images = [(path, i) for path in image_list]
self._data += images if max_images_per_class < 0 else images[:max_images_per_class]
def set_class_mapping(self, mapping, num_classes):
self._class_mapping = mapping
self._num_remapped_classes = num_classes
def __getitem__(self, idx):
path, c = self._data[idx]
if self._class_mapping is not None:
c = self._class_mapping[c]
img_arr = mx.image.imread(path, flag=1)
if self._boxes:
img_arr = transform(img_arr, self._resize_img, self._data_shape, self._use_aug, self._boxes[path],
self._scale_image_data)
else:
img_arr = transform(img_arr, self._resize_img, self._data_shape, self._use_aug, None, self._scale_image_data)
if self._with_proxy:
if self._num_remapped_classes is not None:
ar = np.arange(0, self._num_remapped_classes)
else:
ar = np.arange(0, self._num_classes)
negatives = ar[ar != c]
return img_arr, c, negatives
return img_arr, c
def __len__(self):
return len(self._data)
def num_classes(self):
return self._num_classes
class ImageDatasetIterator(mx.io.DataIter):
"""Iterator for an image dataset. Supports custom batch samples.
"""
def __init__(self, train_images, test_images, boxes, batch_k, batch_size, data_shape, use_crops, is_train,
scale_image_data, resize_image=256, batchify=True):
super(ImageDatasetIterator, self).__init__(batch_size)
self._data_shape = data_shape
self._batch_size = batch_size
self._batch_k = batch_k
self.is_train = is_train
self.num_train_classes = len(train_images)
self.num_test_classes = len(test_images)
self._resize_image = resize_image
self._batchify = batchify
self._train_images = train_images
self._boxes = boxes
self._test_count = 0
self._use_crops = use_crops
self._scale_image_data = scale_image_data
self._test_data = [(f, l) for l, files in enumerate(test_images) for f in files]
self.n_test = len(self._test_data)
self._train_images_it = [CircularIterator(x) for x in self._train_images]
if batch_k is None:
self._flattened_training_data = [(item, label) for label, class_images in enumerate(self._train_images) for
item in class_images]
def num_training_images(self):
return sum([len(x) for x in self._train_images])
def num_classes(self):
return self.num_train_classes if self.is_train else self.num_test_classes
def get_image(self, img, is_train):
"""Load and transform an image."""
img_arr = mx.image.imread(img)
img_arr = transform(img_arr, self._resize_image, self._data_shape, is_train,
self._boxes[img] if self._use_crops else None, self._scale_image_data)
return img_arr.expand_dims(0)
def sample_train_batch(self):
"""Sample a training batch (data and label)."""
batch = []
labels = []
num_classes = self._batch_size // self._batch_k # initial number of classes to be selected
expected_batch_size = num_classes * self._batch_k
# we choose the first set of classes
sampled_classes = np.random.choice(self.num_train_classes, num_classes, replace=False)
# verify we have enough samples fill up the batch
num_images_per_samples_class = [min(len(self._train_images[x]), self._batch_k) for x in sampled_classes]
# add more classes until batch is full
while sum(num_images_per_samples_class) < expected_batch_size:
# sample a new class and add it to the existing list
new_sample_class = np.random.choice(np.delete(np.arange(self.num_train_classes), sampled_classes), 1,
replace=False)
sampled_classes = np.concatenate((sampled_classes, new_sample_class))
# recompute number of images
num_images_per_samples_class = [min(len(self._train_images[x]), self._batch_k) for x in sampled_classes]
# collect images
for c in sampled_classes:
img_fnames = np.random.choice(self._train_images[c],
min(self._batch_k, len(self._train_images[c])), replace=False)
batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames]
labels += [c for _ in range(self._batch_k)]
# remove overflow
batch = batch[:expected_batch_size]
labels = labels[:expected_batch_size]
return mx.nd.concatenate(batch, axis=0), labels
def sample_proxy_train_batch(self, sampled_classes, chose_classes_randomly=False, distances=None):
"""Sample a training batch for proxy training (data, label and negative labels) from the given classes."""
batch = []
labels = []
negative_labels = []
if sampled_classes is not None:
if isinstance(sampled_classes, list):
num_groups = len(sampled_classes)
expected_batch_size = num_groups * self._batch_k
else:
num_groups = sampled_classes
expected_batch_size = num_groups * self._batch_k
if distances is None:
# Sample classes randomly
sampled_classes = np.random.choice(self.num_train_classes, num_groups, replace=False)
else:
# Sample classes based on class distances
distances = distances.asnumpy()
mask = np.tril(distances, 0)
distances[mask == 0] = 1e10 # CxC
num_classes = distances.shape[0]
distances_flat = np.reshape(distances, -1)
asorted_dist = np.argsort(distances_flat)
first_null = (distances_flat.size - num_classes) // 2
asorted_dist = asorted_dist[:first_null] # (C*(C -1)) // 2
probs = 1 / distances_flat[asorted_dist]
probs = (probs / np.sum(probs))
pairs = np.random.choice(np.arange(0, probs.size), probs.size, p=probs, replace=False)
pairs_indices_flat = asorted_dist[pairs]
pairs_indices = np.unravel_index(pairs_indices_flat, distances.shape)
sampled_classes = set()
counter = 0
while len(sampled_classes) < num_groups:
pair_idx = pairs_indices[0][counter], pairs_indices[1][counter]
sampled_classes.add(pair_idx[0])
if len(sampled_classes) < num_groups:
sampled_classes.add(pair_idx[1])
counter += 1
# make sure we have enough data
num_images_per_samples_class = [min(len(self._train_images[x]), self._batch_k) for x in sampled_classes]
# add more classes until batch is full
while sum(num_images_per_samples_class) < expected_batch_size:
# sample a new class and add it to the existing list
new_sample_class = np.random.choice(np.delete(np.arange(self.num_train_classes), sampled_classes),
1, replace=False)
sampled_classes = np.concatenate((sampled_classes, new_sample_class))
# recompute number of images
num_images_per_samples_class = [min(len(self._train_images[x]), self._batch_k) for x in
sampled_classes]
for c in sampled_classes:
if chose_classes_randomly:
img_fnames = np.random.choice(self._train_images[c],
min(self._batch_k, len(self._train_images[c])), replace=False)
else:
img_fnames = self._train_images_it[c].next_batch(self._batch_k)
batch += [self.get_image(img_fname, is_train=True) for img_fname in img_fnames]
labels += [c for _ in range(self._batch_k)]
ar = np.arange(0, self.num_train_classes)
negatives = ar[ar != c]
negative_labels += [mx.nd.array(negatives).expand_dims(0) for _ in range(self._batch_k)]
# remove overflow
batch = batch[:expected_batch_size]
labels = labels[:expected_batch_size]
negative_labels = negative_labels[:expected_batch_size]
else:
chosen_data_idx = np.random.choice(range(len(self._flattened_training_data)), self.batch_size,
replace=False)
batch += [self.get_image(self._flattened_training_data[idx][0], is_train=True) for idx in chosen_data_idx]
labels += [self._flattened_training_data[idx][1] for idx in chosen_data_idx]
for l in labels:
ar = np.arange(0, self.num_train_classes)
negatives = ar[ar != l]
negative_labels.append(mx.nd.array(negatives).expand_dims(0))
return mx.nd.concatenate(batch, axis=0), labels, mx.nd.concatenate(negative_labels, axis=0)
def get_test_batch(self, batch_size=None):
"""Sample a testing batch (data and label)."""
if batch_size is None:
batch_size = self._batch_size
data, labels = zip(
*[self._test_data[(self._test_count * batch_size + i) % len(self._test_data)] for i in range(batch_size)])
data = [self.get_image(x, is_train=False) for x in data]
return mx.nd.concatenate(data, axis=0), labels
def reset(self):
"""Reset an iterator."""
self._test_count = 0
def next(self):
"""Return a batch."""
if self.is_train:
data, labels = self.sample_train_batch()
else:
if self._test_count * self._batch_size < len(self._test_data):
data, labels = self.get_test_batch()
self._test_count += 1
else:
self._test_count = 0
raise StopIteration
if self._batchify:
return mx.io.DataBatch(data=[data], label=[labels])
return data, labels
def next_proxy_sample(self, sampled_classes, chose_classes_randomly=False, proxies=None):
if self.is_train:
if proxies is not None:
distances = pairwise_distance(mx.nd, proxies) # CxC
else:
distances = None
data, labels, negative_labels = self.sample_proxy_train_batch(sampled_classes, chose_classes_randomly, distances)
if self._batchify:
return mx.io.DataBatch(data=[data, labels, negative_labels], label=[])
else:
return data, labels, negative_labels
else:
return self.next()
@property
def provide_data(self):
"""The name and shape of data provided by this iterator."""
real_batch_size = (self._batch_size // self._batch_k) * self._batch_k
return [
DataDesc('data', (real_batch_size, 3, self._data_shape, self._data_shape), np.float32)
]
@property
def provide_label(self):
real_batch_size = (self._batch_size // self._batch_k) * self._batch_k
return [
DataDesc('label', (real_batch_size,), np.int64)
]
class NPairsIterator(ImageDatasetIterator):
"""NPairs data iterator for the CUB200-2011 dataset.
"""
def __init__(self, train_images, test_images, boxes, batch_size, data_shape, use_crops, is_train, scale_image_data,
test_batch_size=128, resize_image=256, same_image_sampling=0):
if is_train:
self.N = batch_size // 2
batch_size = self.N * 2
else:
batch_size = test_batch_size
super(NPairsIterator, self).__init__(train_images, test_images, boxes, 0, batch_size, data_shape, use_crops,
is_train, scale_image_data, resize_image=resize_image)
self._test_batch_size = test_batch_size
self._same_image_sampling = same_image_sampling
def sample_train_batch(self):
"""Sample a training batch (data and label)."""
anchors = []
positives = []
labels = []
sampled_classes = np.random.choice(self.num_train_classes, self.N, replace=False)
for i in range(self.N):
label = sampled_classes[i]
if np.random.random_sample() < self._same_image_sampling:
img_fnames = np.random.choice(self._train_images[label], 1, replace=False)
img_fnames = np.repeat(img_fnames, 2)
else:
img_fnames = np.random.choice(self._train_images[label], 2, replace=False)
anchors.append(self.get_image(img_fnames[0], is_train=True))
positives.append(self.get_image(img_fnames[1], is_train=True))
labels.append(label)
return mx.nd.concatenate(anchors, axis=0), mx.nd.concatenate(positives, axis=0), labels
def next(self):
"""Return a batch."""
if self.is_train:
anchors, positives, labels = self.sample_train_batch()
return anchors, positives, labels
else:
if self._test_count * self._test_batch_size < len(self._test_data):
data, labels = self.get_test_batch(self._test_batch_size)
self._test_count += 1
else:
self._test_count = 0
raise StopIteration
return mx.io.DataBatch(data=[data], label=[labels])
class PrototypeIterator(ImageDatasetIterator):
"""Prototype networks data iterator.
"""
def __init__(self, train_images, test_images, boxes, nc, ns, nq, data_shape, use_crops, is_train, scale_image_data,
test_batch_size=128, resize_image=256):
batch_size = nc * (ns + nq)
super(PrototypeIterator, self).__init__(train_images, test_images, boxes, 0, batch_size, data_shape, use_crops,
is_train, scale_image_data, resize_image=resize_image)
self.nc = nc
self.ns = ns
self.nq = nq
self._test_batch_size = test_batch_size
def next(self):
"""Return a batch."""
if self.is_train:
supports, queries, labels = self.sample_train_batch()
return supports, queries, labels
else:
if (self._test_count * self._test_batch_size) < len(self._test_data):
data, labels = self.get_test_batch(self._test_batch_size)
self._test_count += 1
else:
self._test_count = 0
raise StopIteration
return mx.io.DataBatch(data=[data], label=[labels])
def sample_train_batch(self):
"""Sample a training batch (data and label)."""
sampled_classes = np.random.choice(self.num_train_classes, self.nc, replace=False)
sampled_classes.sort()
supports = [] # <Nc x Ns x I>
queries = [] # <Nc x Nq x I>
labels = [] # <Nc x 1>
for i in range(sampled_classes.shape[0]):
label = sampled_classes[i]
img_fnames =
|
np.random.choice(self._train_images[label], self.nq + self.ns, replace=False)
|
numpy.random.choice
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tools to build and fit an effective PSF (ePSF) based on Anderson and
King (2000; PASP 112, 1360).
"""
import copy
import time
import warnings
import numpy as np
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.nddata.utils import (overlap_slices, PartialOverlapError,
NoOverlapError)
from astropy.utils.exceptions import (AstropyUserWarning,
AstropyDeprecationWarning)
from .epsf_stars import EPSFStar, LinkedEPSFStar, EPSFStars
from .models import EPSFModel
from ..centroids import centroid_com
from ..extern import SigmaClip
try:
import bottleneck # pylint: disable=W0611
HAS_BOTTLENECK = True
except ImportError:
HAS_BOTTLENECK = False
__all__ = ['EPSFFitter', 'EPSFBuilder']
class EPSFFitter:
"""
Class to fit an ePSF model to one or more stars.
Parameters
----------
fitter : `astropy.modeling.fitting.Fitter`, optional
A `~astropy.modeling.fitting.Fitter` object. The default is
`~astropy.modeling.fitting.LevMarLSQFitter`.
fit_boxsize : int, tuple of int, or `None`, optional
The size (in pixels) of the box centered on the star to be used
for ePSF fitting. This allows using only a small number of
central pixels of the star (i.e. where the star is brightest)
for fitting. If ``fit_boxsize`` is a scalar then a square box
of size ``fit_boxsize`` will be used. If ``fit_boxsize`` has
two elements, they should be in ``(ny, nx)`` order. The size
must be greater than or equal to 3 pixels for both axes. If
`None`, the fitter will use the entire star image. The default
is 5.
fitter_kwargs : dict-like, optional
Any additional keyword arguments (except ``x``, ``y``, ``z``, or
``weights``) to be passed directly to the ``__call__()`` method
of the input ``fitter``.
"""
def __init__(self, fitter=LevMarLSQFitter(), fit_boxsize=5,
**fitter_kwargs):
self.fitter = fitter
self.fitter_has_fit_info = hasattr(self.fitter, 'fit_info')
if fit_boxsize is not None:
fit_boxsize = np.atleast_1d(fit_boxsize).astype(int)
if len(fit_boxsize) == 1:
fit_boxsize = np.repeat(fit_boxsize, 2)
min_size = 3
if any([size < min_size for size in fit_boxsize]):
raise ValueError('size must be >= {} for x and y'
.format(min_size))
self.fit_boxsize = fit_boxsize
# remove any fitter keyword arguments that we need to set
remove_kwargs = ['x', 'y', 'z', 'weights']
fitter_kwargs = copy.deepcopy(fitter_kwargs)
for kwarg in remove_kwargs:
if kwarg in fitter_kwargs:
del fitter_kwargs[kwarg]
self.fitter_kwargs = fitter_kwargs
def __call__(self, epsf, stars):
"""
Fit an ePSF model to stars.
Parameters
----------
epsf : `EPSFModel`
An ePSF model to be fitted to the stars.
stars : `EPSFStars` object
The stars to be fit. The center coordinates for each star
should be as close as possible to actual centers. For stars
than contain weights, a weighted fit of the ePSF to the star
will be performed.
Returns
-------
fitted_stars : `EPSFStars` object
The fitted stars. The ePSF-fitted center position and flux
are stored in the ``center`` (and ``cutout_center``) and
``flux`` attributes.
"""
if len(stars) == 0:
return stars
if not isinstance(epsf, EPSFModel):
raise TypeError('The input epsf must be an EPSFModel.')
# make a copy of the input ePSF
epsf = epsf.copy()
# perform the fit
fitted_stars = []
for star in stars:
if isinstance(star, EPSFStar):
fitted_star = self._fit_star(epsf, star, self.fitter,
self.fitter_kwargs,
self.fitter_has_fit_info,
self.fit_boxsize)
elif isinstance(star, LinkedEPSFStar):
fitted_star = []
for linked_star in star:
fitted_star.append(
self._fit_star(epsf, linked_star, self.fitter,
self.fitter_kwargs,
self.fitter_has_fit_info,
self.fit_boxsize))
fitted_star = LinkedEPSFStar(fitted_star)
fitted_star.constrain_centers()
else:
raise TypeError('stars must contain only EPSFStar and/or '
'LinkedEPSFStar objects.')
fitted_stars.append(fitted_star)
return EPSFStars(fitted_stars)
def _fit_star(self, epsf, star, fitter, fitter_kwargs,
fitter_has_fit_info, fit_boxsize):
"""
Fit an ePSF model to a single star.
The input ``epsf`` will usually be modified by the fitting
routine in this function. Make a copy before calling this
function if the original is needed.
"""
if fit_boxsize is not None:
try:
xcenter, ycenter = star.cutout_center
large_slc, small_slc = overlap_slices(star.shape,
fit_boxsize,
(ycenter, xcenter),
mode='strict')
except (PartialOverlapError, NoOverlapError):
warnings.warn('The star at ({0}, {1}) cannot be fit because '
'its fitting region extends beyond the star '
'cutout image.'.format(star.center[0],
star.center[1]),
AstropyUserWarning)
star = copy.deepcopy(star)
star._fit_error_status = 1
return star
data = star.data[large_slc]
weights = star.weights[large_slc]
# define the origin of the fitting region
x0 = large_slc[1].start
y0 = large_slc[0].start
else:
# use the entire cutout image
data = star.data
weights = star.weights
# define the origin of the fitting region
x0 = 0
y0 = 0
x_oversamp = star.pixel_scale[0] / epsf.pixel_scale[0]
y_oversamp = star.pixel_scale[1] / epsf.pixel_scale[1]
scaled_data = data / (x_oversamp * y_oversamp)
# define positions in the ePSF oversampled grid
yy, xx =
|
np.indices(data.shape, dtype=np.float)
|
numpy.indices
|
# Encoding: utf-8
""" Module for registration of a SpineImaging session
The main steps are:
1) Cluster the data to get an initial reference volume
2) Each plane is clustered and xy registered to the initial reference volume from step 1
3) For each time point, each plane is replaced by the mean xy aligned group it belongs to from step 2
4) Cluster the data set created in step 4 and estimate the Z position of the resulting groups
5) Combine the groups from step 4 using the estimated Z position into 4-5 groups
6) Assign a Z position to each time point in the original data based on the clustering in step 4
7) xy align the original data to the appropriate reference volume from step 5 based on assignments from step 6
Registration of groups in stage 2 and time points in stage 7:
1) Compares plane to plane not volume to volume
2) Use a reference volume the has been expanded to be bigger then the original data
3) Use knowledge of the possible velocities the brain moves to estimate the likelihood of a given shift
4) Have priors based on cross-correlation of volumes
5) Have optimization procedures that try to minimize a metric based on the fano factor of the result
"""
from __future__ import print_function, division
import copy
import logging
import os
import sys
import time
from random import sample
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objs as go
import plotly.offline as py
import plotly.tools as tls
import thunder as td
from pySparkUtils.utils import change, balanced_repartition, fallback
from pySparkUtils.SVD import getSVD
from scipy import signal
from scipy.interpolate import PchipInterpolator
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import white_tophat
from scipy.optimize import differential_evolution
from scipy.spatial.distance import cdist
from scipy.stats import norm
from scipy.stats.mstats import zscore
from skimage.feature import match_template
from skimage.restoration.inpaint import inpaint_biharmonic
from sklearn.cluster import KMeans
from tsp_solver.greedy_numpy import solve_tsp
from prep.Embedding import getExampleVol, initEmbedDict, prepareCoordinates, getFieldTFormPar
from prep.IO import writeTiff, reloadClean
from prep.Log import add_logging_to_file
from prep.Steps import cropDictStep
from prep.Utils import getTarget, registerByPlane, nanMeanByIndex, getCrop, getBestCrop, log_progress
# Setup logging
logger = logging.getLogger(__name__)
logger.handlers = []
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(logging.Formatter("%(name)s @ %(asctime)s - [%(levelname)s] %(module)s::%(funcName)s: %(message)s"))
ch.setLevel(logging.INFO)
logger.addHandler(ch)
try:
from itertools import izip
except ImportError:
# python 3
izip = zip
def initRegDict(session, data, folderName='intermediates', auPerPhoton=90.0, structureSize=6):
""" Initialize a dictionary with default values for registration
:param session: current SpineSession object
:param data: clean data Images object
:param folderName: sub-folder name
:param auPerPhoton: arbitrary value estimating a single photon
:param structureSize:structure size in pixels in x, y for topHat filtering
:return: Registration dictionary with default values, creates a directory for intermediate files, starts logging to
file
"""
regDict = dict()
other_vars = locals()
del other_vars['session']
regDict.update(other_vars)
# from session or computed
regDict['dims'] = data.first().shape
regDict['path'] = session.path
regDict['sampleRate'] = float(session.volRate)
regDict['fullPath'] = os.path.join(session.path + folderName, '')
regDict['sigmaTime'] = 1000.0 / session.volRate / 2
regDict['velPerPixXY'] = session.pixSizeXY / (1000 / regDict['sampleRate'])
regDict['velPerPixZ'] = session.pixSizeZ / (1000 / regDict['sampleRate'])
regDict['flyLines'] = session.flyLines
regDict['pixSizeXY'] = session.pixSizeXY
regDict['pixSizeZ'] = session.pixSizeZ
regDict['fieldMaskBool'] = session.fieldMaskBool
regDict['fieldDur'] = float(session.ySize + session.optFlyLines) / session.lineRate * 1000
if not os.path.isdir(regDict['fullPath']):
os.mkdir(os.path.dirname(regDict['fullPath']))
else:
logger.info('Folder: ' + regDict['fullPath'] + ' exists, files will be overwritten')
add_logging_to_file('prep.Registration', os.path.join(regDict['fullPath'], 'registration.log'))
return regDict
@fallback
def change_sc(sc):
""" prevents spark context from dying when transmitting messages and failing quickly for getFinalGroupsSVD
:param sc: spark context
:return: spark context
"""
return change(sc, spark_rpc_message_maxSize='250', spark_executor_heartbeatInterval='360s',
spark_network_timeout='720s', spark_task_maxFailures='1')
def getGlobalSignal(sc, regDict, cropDict=None, minZscore=2.0, minDFF=1.0, regWindow=3000):
"""Estimates global events based on a crop given by cropDict, if None will take all the data.
:param sc: Spark context
:param regDict: registration dict
:param cropDict: cropping dictionary from getCrop function
:param minZscore: minimal Z score above which to be considered AP (float)
:param minDFF: minimal Z score above which to be considered AP (float)
:param regWindow: window in time points to estimate baseline from (int)
:return: Adds to regDict:
1) globalTC: time course of planes that represent global events
2) putAP: bool array with True for every event above threshold - putative action potential
3) noAP: Logical not on putAP (all time points without APs)
"""
data = regDict['data']
if cropDict is not None:
data = cropDictStep(data=data, cropDict=cropDict)
else:
cropDict = getCrop(data.first(), display=None)
regDict['cellCrop'] = cropDict
def getTC(x):
threshold = np.percentile(x, 95)
x = x.astype(np.float32)
x[x < threshold] = np.nan
return np.nanmean(x)
globalTC = data.map(getTC).toarray()
globalTC_BC = sc.broadcast(globalTC)
regDict['minZscore'] = minZscore
regDict['minDFF'] = minDFF
regDict['regWindow'] = regWindow
def findAP(key):
""" determine if current time point (key) is an AP
:param key: current time point
:return: 1 if AP, 0 otherwise
"""
from scipy.stats import gaussian_kde
key = key[0]
start = int(max((0, key - regWindow / 2)))
stop = int(min((len(globalTC_BC.value), key + regWindow / 2)))
y1 = globalTC_BC.value[start:stop]
y2 = y1[np.logical_not(np.isnan(y1))]
if np.any(y2):
if len(y2) > 100:
kernel = gaussian_kde(y2)
low = int(np.round(np.percentile(y2, 25)))
high = int(np.round(np.percentile(y2, 75)))
step = (high - low) / 100.
testRange = low + np.array(range(1, 101)) * step
estMode = testRange[np.argmax(kernel(testRange))]
else:
estMode = np.median(y2)
y3 = y2[(y2 - estMode) < 0] - estMode
std = np.std(np.hstack((y3, -y3)))
zScore = (globalTC_BC.value[key] - estMode) / std
localPutAP = np.logical_and(zScore > minZscore, (globalTC_BC.value[key] / estMode - 1.) > minDFF)
else:
localPutAP = 0
return localPutAP
index = np.array(range(len(globalTC))).reshape(-1, 1, 1)
putAP = td.images.fromarray(index, engine=sc).map(findAP).toarray()
globalTC_BC.unpersist()
regDict['globalTC'] = globalTC
regDict['putAP'] = putAP
noAP = np.ones(data.shape[0], dtype=bool)
noAP[putAP] = False
regDict['noAP'] = noAP
APNum = np.where(regDict['putAP'])[0].shape[0]
length = regDict['data'].shape[0]
meanFR = APNum / (length / regDict['sampleRate'])
regDict['meanFR'] = meanFR
# plot
data = list()
data.append(dict(y=globalTC + 1000, type='scatter', name='TC'))
data.append(dict(y=putAP * 1000, type='scatter', name='AP'))
layout = dict(title='Mean firing rate: %.2fHz' % meanFR)
py.iplot(dict(data=data, layout=layout))
def getInitClustersSVD(regDict, initClusterNum=30, initIterKMeans=35, initNInit=50, initTol=1e-8, initK=40):
""" Clusters the data set into initClusterNum clusters by first extracting the first initK features of the data
using SVD and then clustering using K-Means in feature space
:param regDict: registration dict
:param initClusterNum: number of clusters for initial reference selection (int)
:param initIterKMeans: number of KMeans iterations (int)
:param initNInit: number of initializations for the K-Means
:param initTol: Tolerance for the K-Means
:param initK: number of features to get from the SVD
:return: adds to regDict:
1) initClusters: cluster assignment per time point
2) initClusterNums: number of time points per cluster
"""
local = locals()
del local['regDict']
regDict.update(local)
# SVD on the data
t = time.time()
data = regDict['data']
indexNoAP = np.where(regDict['noAP'])[0]
U, _, _ = getSVD(data, initK, getComponents=False, getS=False, normalization='mean')
current = time.time() - t
m, s = divmod(current, 60)
logger.info('SVD:: %02d:%02d' % (m, s))
# K-Means cluster
model = KMeans(n_clusters=initClusterNum, init='k-means++', n_init=initNInit,
max_iter=initIterKMeans, tol=initTol, precompute_distances='auto',
verbose=0, random_state=1, copy_x=False, n_jobs=-1,
algorithm='auto').fit(U[indexNoAP, :])
current = time.time() - t
m, s = divmod(current, 60)
logger.info('Cluster:: %02d:%02d' % (m, s))
clusters = model.predict(U)
centers = model.cluster_centers_
# get unique cluster
uIndex, loc = np.unique(clusters, return_inverse=True)
clusters2 = np.zeros(clusters.shape)
for i in range(len(uIndex)):
clusters2[loc == i] = i
clusters = clusters2
centers = centers[uIndex]
dist = cdist(centers, centers)
# plot
ax1 = plt.subplot(1, 1, 1)
y, x, f = ax1.hist(clusters, bins=len(uIndex), color='b')
ax1.set_ylabel('Time points', color='b')
ax1.set_xlabel('Cluster#')
for tl in ax1.get_yticklabels():
tl.set_color('b')
binCenters = 0.5 * (x[1:] + x[:-1])
ax2 = ax1.twinx()
ax2.plot(binCenters, np.sum(dist, axis=0), '*r')
ax2.set_ylabel('Sum distances', color='r')
plt.grid(False)
for tl in ax2.get_yticklabels():
tl.set_color('r')
# return data
regDict['initClusters'] = clusters
regDict['initClusterNums'] = y
def runInitTarget(sc, regDict, rankList=(1, 2, 3), session=None, TForm=None):
""" A helper function to take the (rankList) clusters and find a target using cross-correlation to the mean
:param sc: Spark Context
:param regDict: registration dict
:param rankList: iterable of ranks for the clusters to select
:param session: SpineSession object
:param TForm: transformation dict to embed targets for manual selection
:return: TForm
"""
if TForm is None:
if session is None:
raise ValueError('If TForm not provided please provide a session object')
embedDict = initEmbedDict(session)
prepareCoordinates(embedDict)
xStart = session.pipelines['Clean'].steps['crop2'].args['xStart']
xStop = session.pipelines['Clean'].steps['crop2'].args['xStop']
logger.info('Creating a TForm with xStart: %d, and xStop %d' % (xStart, xStop))
TForm = getFieldTFormPar(sc, embedDict, xDim=(xStart, xStop))
embedDict['TForm1'] = TForm
session.embedDict = embedDict
for rank in rankList:
getInitTarget(sc=sc, regDict=regDict, rank=rank, initCC=90)
regTargetEmbedded = getExampleVol(sc, data=regDict['regTarget'], TFormDict=TForm, project=True)
writeTiff(regDict['fullPath'], regTargetEmbedded, 'regTarget' + str(rank))
return TForm
def getInitTarget(sc, regDict, rank=1, initCC=90):
""" gets the initial reference volume using the 'rank' biggest cluster followed by taking the initCC most
correlated points and registering them to the mean
:param sc: Spark context
:param regDict: registration dict
:param rank: which cluster to take 1 - most number of timepoints (int)
:param initCC: CC cutoff for initial target creation (int)
:return: adds to regDict:
1) initTargetIndex: which time points are in the rank highest cluster
2) initTarget: initial target before registration
3) regTarget: initial target after registration
4) initPoints: point used to create initTarget
5) targetShifts: shifts used to create regTarget
"""
# get params
data = regDict['data']
regDict['initCC'] = initCC
length = regDict['data'].shape[0]
y = regDict['initClusterNums']
clusters = regDict['initClusters']
maxCluster = np.argpartition(y, -rank)[-rank:][0]
maxIndex = np.where(clusters == maxCluster)[0]
# crop out cell body (the crop used to get global signal)
noCellIndex = np.where(np.logical_not(regDict['cellCrop']['zIndex']))[0]
if noCellIndex.shape[0] > 0:
data2 = data[:, :, :, noCellIndex]
else:
data2 = data
# plot where the time points of the selected group came from
plt.figure()
plt.plot(maxIndex, np.ones(maxIndex.shape), 'r*')
plt.xlim(0, length)
plt.title('Time points taken for initTarget')
# plot the CC
initPoints = getTarget(data2[maxIndex, :, :, :], initCC, 1, mode='index')
initTarget = data[initPoints, :, :, :].mean().toarray()
# get the reference volume
targetShifts, regTarget = registerByPlane(sc, data=data[initPoints, :, :, :], target=initTarget, upSample2=10,
doShift=True)
regTarget = regTarget.mean().toarray()
# save to regDict
regDict['initTargetIndex'] = maxIndex
regDict['initTarget'] = initTarget
regDict['initPoints'] = initPoints
regDict['regTarget'] = regTarget
regDict['targetShifts'] = targetShifts
@fallback
def getAllGroupsSVD(sc, session, regDict, planeClusterNum=800, planeIterKMeans=20, planeNInit=10, planeTol=1e-5,
planeK=40, planeIterSVD=50, full_path=None, change_sc_cluster=True):
""" use SVD to get planeK components and cluster each plane to planeClusterNum that should represent similar
timepoints in x, y z shifts or activity
:param sc: Spark context
:param session: Session object
:param regDict: registration dictionary
:param planeClusterNum: number of clusters per plane
:param planeIterKMeans: number of iteration for the KMeans clustering
:param planeNInit: number of random initializations for the KMeans clustering
:param planeTol: the tolerance for the KMeans clustering
:param planeK: the number of SVD components
:param planeIterSVD: The number od SVD iterations
:return: sc, adds to regDict:
1) groups: cluster assignment per time point per plane
2) allMeanGrpVol: list per plane of a 4d numpy array (groupID, x, y, z) mean volumes
"""
# todo: make planeClusterNum dependent on length / variability in the data
# init params
t = time.time()
data = regDict['data']
regDict['planeClusterNum'] = planeClusterNum
regDict['planeIterKMeans'] = planeIterKMeans
regDict['planeNInit'] = planeNInit
regDict['planeTol'] = planeTol
regDict['planeK'] = planeK
regDict['planeIterSVD'] = planeIterSVD
planeNum = data.shape[3]
# get all SVDs
if change_sc_cluster:
sc = change(sc, spark_executor_cores='5', spark_task_cpus='5', spark_executor_memory='30g')
data = reloadClean(sc, session, full_path=full_path)
def merge_arrays(kv):
plane, arrays = kv
list_arrays = list(arrays)
sorted_list = sorted(list_arrays, key=lambda x: (x[0][0], x[0][1]))
sorted_array = np.array(list(map(lambda x: x[1], sorted_list)), dtype=np.int16).T
return plane, sorted_array
def bring_key_out(kv):
key, value = kv
return key[2], (key[:2], value)
clean_plane = data.toseries().tordd().map(bring_key_out).groupByKey().map(merge_arrays)
def get_clusters(kv):
plane, data = kv
import os
os.environ['MKL_NUM_THREADS'] = '5'
os.environ['OMP_NUM_THREADS'] = '5'
from sklearn.decomposition import TruncatedSVD
tsvd = TruncatedSVD(planeK, algorithm="randomized", n_iter=10)
data_reduced = tsvd.fit_transform(data)
clusters = KMeans(n_clusters=planeClusterNum, init='k-means++', n_init=planeNInit,
max_iter=planeIterKMeans, tol=planeTol, precompute_distances='auto',
verbose=0, random_state=1, copy_x=False, n_jobs=-1,
algorithm='auto').fit_predict(data_reduced)
return plane, clusters
clusters = clean_plane.map(get_clusters).collect()
sorted_list = sorted(clusters, key=lambda x: x[0])
groups = np.array(list(map(lambda x: x[1], sorted_list)), dtype=np.int16).T
if change_sc_cluster:
sc = change_sc(sc)
# reload clean
data = reloadClean(sc, session, name=None, returnRegDict=False, returnTForm=False, full_path=full_path)
regDict['data'] = data
allMeanGrpVol = [None] * planeNum
for i in log_progress(range(planeNum)):
allMeanGrpVol[i] = nanMeanByIndex(data, groups[:, i], largeGroupMean=False)
regDict['groups'] = groups
regDict['allMeanGrpVol'] = allMeanGrpVol
current = time.time() - t
m, s = divmod(current, 60)
logger.info('Done: %02d:%02d' % (m, s))
return sc
def getExpandedTarget(sc, regDict, nIter, flyLinesFlag=True, expansionFactor=2.0, nanThreshold=200, cutoffCC=50,
structureSize=3, inPaint=1, zscoreFlag=False, tophatFlag=True):
""" Iteratively register the groups from getAllGroups to regTarget in an expanded space in X, Y then crop optimally
using nanThreshold as maximum number of NaNs in the volume
:param sc: Spark context
:param regDict: registration dict
:param nIter: number of iteration to align and crop the reference volume
:param flyLinesFlag: crop fly lines after shifting (True)
:param expansionFactor: factor to expand in x and y (2.0)
:param nanThreshold: number of continuous NaNs permitted in the expanded volume
:param cutoffCC: cutoff for CC while creating target
:param structureSize: size for top hat filter
:param inPaint: 0 = leave NaNs, 1 = use inPaint, 2 = use nan_to_num
:param zscoreFlag: flag to do zscore
:param tophatFlag: flag to do top hat filtering
:return: sc &
Adds to regDict:
1) expandedTarget: the expanded reference volume
2) grpVolShifts: shifts per group used to create the volume
3) grpVolShiftsTime: shifts per time point to create the volume
4) intermediate results in *Dict keys
"""
# init params
# todo: nanThreshold should be % pixels per plane
start = time.time()
regDict['structureSizeExtendedTarget'] = structureSize
allMeanGrpVol = regDict['allMeanGrpVol']
zNum = len(allMeanGrpVol)
regDict['expansionFactor'] = expansionFactor
regTarget = copy.deepcopy(regDict['regTarget'])
dims = regDict['dims']
regDict['nanThresholdExpandedTarget'] = nanThreshold
regDict['ExpandedTargetCC'] = cutoffCC
flyLines = regDict['flyLines']
refTarget = regTarget
xMeanLast = 0
yMeanLast = 0
reference3DDict = dict()
expendedDict = dict()
shiftsDict = dict()
returnedDict = dict()
if not isinstance(cutoffCC, list):
cutoffCC = [cutoffCC]
while len(cutoffCC) <= nIter:
cutoffCC.append(cutoffCC[-1])
# paralleling the volumes per plane per group 5d array
if 'allMeanGrpVolPar' not in regDict.keys() or 'allMeanGrpVolPar2' not in regDict.keys():
logger.info('Parallelizing allMeanGrpVolPar')
allMeanGrpVol2 = np.stack(allMeanGrpVol)
allMeanGrpVolPar = td.images.fromarray(allMeanGrpVol2.transpose((1, 0, 2, 3, 4)),
npartitions=sc.defaultParallelism * 4, engine=sc)
allMeanGrpVolPar.cache()
allMeanGrpVolPar.count()
regDict['allMeanGrpVolPar'] = allMeanGrpVolPar
allMeanGrpVolPar2 = td.images.fromarray(allMeanGrpVol2, npartitions=sc.defaultParallelism * 4, engine=sc)
allMeanGrpVolPar2.cache()
allMeanGrpVolPar2.count()
regDict['allMeanGrpVolPar2'] = allMeanGrpVolPar2
else:
logger.info('Using existing allMeanGrpVolPar')
allMeanGrpVolPar = regDict['allMeanGrpVolPar']
allMeanGrpVolPar2 = regDict['allMeanGrpVolPar2']
# get shift function
def shift3D(kv):
key, array = kv
shift = np.zeros((zNum, 4))
ref = copy.deepcopy(refTargetBC.value)
source2 = copy.deepcopy(array).astype(np.float64)
for z2 in range(zNum):
source = copy.deepcopy(source2[z2, :, :, :])
for j in range(source.shape[2]):
source[:flyLines[j], :, j] = np.nan
# source[:, :, j] = inpaint_biharmonic(source[:, :, j], np.isnan(source[:, :, j]))
source[:, :, j] = np.nan_to_num(source[:, :, j])
if tophatFlag:
source[:, :, j] = white_tophat(source[:, :, j], structureSize)
if zscoreFlag:
source = zscore(source, axis=2)
c = match_template(ref, source, pad_input=True)
if (maxShift[0] * 2) < c.shape[0]:
crop = np.ceil(float(c.shape[0]) / 2 - maxShift[0]).astype(int)
c = c[crop:-crop, :, :]
if (maxShift[1] * 2) < c.shape[1]:
crop = np.ceil(float(c.shape[1]) / 2 - maxShift[1]).astype(int)
c = c[:, crop:-crop, :]
zCenter = int(np.floor(zNum / 2))
if maxShift[2] == 0:
c = c[:, :, zCenter]
else:
c = c[:, :, (zCenter - maxShift[2]):(zCenter + maxShift[2] + 1)]
bestShift = np.unravel_index(np.argmax(c), c.shape)
bestShift -= np.floor(np.asarray(c.shape) / 2)
if maxShift[2] == 0:
bestShift = np.hstack((bestShift, 0.))
shift[z2, :3] = bestShift
shift[z2, 3] = np.max(c)
return shift
# apply shift function
def itkRealign3D(kv):
from SimpleITK import ResampleImageFilter, GetImageFromArray, TranslationTransform, GetArrayFromImage
key, array = kv
resampler = ResampleImageFilter()
CC = shiftsOutBC.value[:, key[0], 3] * -1
cutoff = np.percentile(CC, cutoffCC[k])
indexes = np.where(CC > cutoff)[0]
realigned = np.zeros((indexes.shape[0], int(dims[0] * expansionFactor), int(dims[1] * expansionFactor)))
for i, groupID in enumerate(indexes):
current = array[groupID, :, :, key[0]].astype('float32')
if flyLinesFlag:
current[:flyLines[key], :] = np.nan
moving = GetImageFromArray(current)
transform = TranslationTransform(2, shiftsOutBC.value[groupID, key[0], 1:3])
resampler.SetTransform(transform)
resampler.SetSize((int(dims[1] * expansionFactor), int(dims[0] * expansionFactor)))
resampler.SetDefaultPixelValue(np.NAN)
resampler.SetOutputOrigin(outOrigin)
realigned[i, :, :] = GetArrayFromImage(resampler.Execute(moving))
return np.nanmean(realigned, axis=0)
for k in range(nIter):
# get the shifts
for z in range(refTarget.shape[2]):
refTarget[:flyLines[z], :, z] = np.nan
refTarget[:, :, z] = inpaint_biharmonic(refTarget[:, :, z], np.isnan(refTarget[:, :, z]))
if tophatFlag:
refTarget[:, :, z] = white_tophat(refTarget[:, :, z], structureSize)
if zscoreFlag:
refTarget = zscore(refTarget, axis=2)
maxShift = np.array([refTarget.shape[0] / 2, refTarget.shape[1] / 2, 0])
refTargetBC = sc.broadcast(refTarget)
shifts3D = allMeanGrpVolPar.map(shift3D, with_keys=True).toarray()
refTargetBC.unpersist()
shiftsDict[k] = copy.deepcopy(shifts3D)
# center the shifts
if k == nIter - 1:
break
xMean = (np.percentile(shifts3D[:, :, 0], 98) + np.percentile(shifts3D[:, :, 0], 2)) / 2. - xMeanLast
yMean = (np.percentile(shifts3D[:, :, 1], 98) + np.percentile(shifts3D[:, :, 1], 2)) / 2. - yMeanLast
shifts3D[:, :, 0] = shifts3D[:, :, 0] - xMean
shifts3D[:, :, 1] = shifts3D[:, :, 1] - yMean
xMeanLast = copy.deepcopy(xMean)
yMeanLast = copy.deepcopy(yMean)
# get the new expanded target
shifts3D[:, :, 2] = 0
shiftsOut = copy.deepcopy(shifts3D)
shiftsOut = -shiftsOut[:, :, [2, 1, 0, 3]]
shiftsOutBC = sc.broadcast(shiftsOut)
outOrigin = (-dims[1] / 2., -dims[0] / 2.)
logger.info('Iteration %d, target shape: %s' % (k + 1, refTarget.shape))
reference3D = allMeanGrpVolPar2.map(itkRealign3D, with_keys=True).toarray().transpose(1, 2, 0)
shiftsOutBC.unpersist()
reference3DDict[k] = reference3D
expandedTarget = copy.deepcopy(reference3D)
# iteratively find best target size
maxShifts = np.abs(shifts3D[:, :, :2]).max(axis=(0, 1))
returned, expandedTarget = getBestCrop(sc, target=expandedTarget, maxShifts=maxShifts,
nanThreshold=nanThreshold, inPaint=inPaint)
returnedDict[k] = copy.deepcopy(returned)
expendedDict[k] = copy.deepcopy(expandedTarget)
refTarget = copy.deepcopy(expandedTarget)
logger.info('expended shape: %s' % (refTarget.shape,))
m, s = divmod(time.time() - start, 60)
logger.info('Time: %02d:%02d' % (m, s))
regDict['expandedTarget'] = expandedTarget
regDict['grpVolShifts'] = shifts3D[:, :, :3]
regDict['reference3DDict'] = reference3DDict
regDict['expandedDict'] = expendedDict
regDict['shiftsDict'] = shiftsDict
regDict['returnedDict'] = returnedDict
# assign the shifts to time points
groups = (regDict['groups']).astype(int)
grpVolShiftsTime = np.zeros((groups.shape[0], groups.shape[1], 2), dtype=float)
for timePoint, allPlaneGroups in enumerate(groups):
for plane, group in enumerate(allPlaneGroups):
grpVolShiftsTime[timePoint, plane, :] = -shifts3D[group, plane, :2]
regDict['grpVolShiftsTime'] = grpVolShiftsTime
def runGroupAlignment(sc, regDict, TForm1, percentile=30):
""" A helper function to run and output to disk the results of registering the groups per plane using the volume
shifts or after optimization of per plane registration.
:param sc: Spark Context
:param regDict: registration dict
:param TForm1: TFrom to embed to 3d volume for manual inspection
:param percentile: for optimization see function XYAlignedGroupsOptimization
:return: writes to disk the mean volumes after registration
"""
getXYAlignedGroups(sc, regDict, optimization=False, useVolumeShifts=True)
allGroups = np.array(regDict['planeGrpAlign'])
sz = allGroups.shape
dims = regDict['dims']
allGroups = allGroups.reshape(sz[0], sz[1], dims[0], dims[1]).transpose(0, 1, 3, 2)
allGroupsEmbedded = getExampleVol(sc, allGroups.transpose(1, 3, 2, 0), TForm1, project=True)
writeTiff(regDict['fullPath'], allGroupsEmbedded.transpose(1, 2, 0), 'allGroupsVolume')
writeTiff(regDict['fullPath'], np.expand_dims(np.nanmean(allGroupsEmbedded, axis=0), axis=2),
'allGroupsVolumeMean')
# get % nans and fano factor of the data using the prior to use for optimization as the % change and not
# absolute values
regDict['nans_group_volume'] = np.sum(np.isnan(allGroups)).astype(float) / np.prod(sz) * 100
meanAllGroups = np.nanmean(allGroups, axis=1)
t = np.nanpercentile(meanAllGroups, 50)
mask = allGroups < t
allGroups[mask] = np.nan
mean = np.nanmean(allGroups, axis=1)
var = np.nanvar(allGroups, axis=1)
regDict['fano_group_volume'] = np.nanpercentile(var / mean, percentile)
logger.info('Group volume fano: %f.2, Nans: %f.2' % (regDict['fano_group_volume'], regDict['nans_group_volume']))
# optimization
XYAlignedGroupsOptimization(sc, regDict, percentile=percentile)
allGroups = np.array(regDict['planeGrpAlign'])
sz = allGroups.shape
dims = regDict['dims']
allGroups = allGroups.reshape(sz[0], sz[1], dims[0], dims[1]).transpose(0, 1, 3, 2)
allGroupsEmbedded = getExampleVol(sc, allGroups.transpose(1, 3, 2, 0), TForm1, project=True)
writeTiff(regDict['fullPath'], allGroupsEmbedded.transpose(1, 2, 0), 'allGroupsOpto')
writeTiff(regDict['fullPath'], np.expand_dims(np.nanmean(allGroupsEmbedded, axis=0), axis=2),
'allGroupsOptoMean')
def XYAlignedGroupsOptimization(sc, regDict, percentile=30):
""" optimization of group alignment in x y by minimizing fano factor and penalizing the number of NaNs
:param sc: Spark context
:param regDict: registration dict
:param percentile:
:return: adds to regDict optimized params:
1) lambdaMotion2
2) sigmaTime
3) sigmaPix
"""
# init params
start = time.time()
fieldMaskBool = regDict['fieldMaskBool']
fieldDur = regDict['fieldDur']
fieldTimes = np.asarray(range(0, len(fieldMaskBool))) * fieldDur
fieldTimes = fieldTimes[fieldMaskBool]
regDict['fieldTimes'] = fieldTimes
# creating indexing
allMeanGrpVol = regDict['allMeanGrpVol']
si = regDict['expandedTarget'].shape
totalGroups = sum([x.shape[0] for x in allMeanGrpVol])
regDict['totalGroups'] = totalGroups
counter = 0
parIdx = [None] * totalGroups
for i in range(0, si[2]):
for j in range(0, allMeanGrpVol[i].shape[0]):
parIdx[counter] = (i, j)
counter += 1
regDict['parIdx'] = parIdx
if 'allMeanGrpVolRDD' not in regDict or regDict['allMeanGrpVolRDD'].context._jvm is None:
logger.info('XYAlignedGroupsOptimization making allMeanGrpVolAll')
allMeanGrpVolAll = np.concatenate(regDict['allMeanGrpVol'])
allMeanGrpVolRDD = sc.parallelize(izip(iter(parIdx), iter(allMeanGrpVolAll)),
min(len(parIdx), sc.defaultParallelism * 10))
allMeanGrpVolRDD.cache()
allMeanGrpVolRDD.count()
regDict['allMeanGrpVolRDD'] = allMeanGrpVolRDD
result = differential_evolution(optimizeFanoGroups, bounds=[(0.05, 50), (5, 25), (10, 800), (5, 6)],
args=(sc, regDict, percentile, start),
maxiter=5, tol=1e-2, popsize=4, mutation=(0.5, 1), disp=True, polish=False)
regDict['optimizationResultGroups'] = result
lambdaMotion = result.x[0] / 100.
sigmaTime = result.x[1]
sigmaPix = result.x[2] / 100.
structureSize = int(result.x[3])
logger.info('Running on all data')
getXYAlignedGroups(sc, regDict, lambdaMotion=lambdaMotion, sigmaPix=sigmaPix, sigmaTime=sigmaTime,
structureSize=structureSize)
def optimizeFanoGroups(params, sc, regDict, percentile, start):
""" test params with getWeightedShifts by calculating the % change in # of NaNs and fano factor
:param params: tuple of (lambda_motion, sigma_time, sigma_pix)
:param sc: Spark Context
:param regDict: registration dict
:param percentile: to use for fano factor
:param start: start time of the optimization function
:return:
"""
lambda_motion = params[0] / 100.
sigma_time = params[1]
sigma_pix = params[2] / 100.
structureSizeTemp = int(params[3])
getXYAlignedGroups(sc, regDict, optimization=True, lambdaMotion=lambda_motion, sigmaTime=sigma_time,
sigmaPix=sigma_pix, structureSize=structureSizeTemp)
allGroups = np.array(regDict['planeGrpAlign'])
sz = allGroups.shape
dims = regDict['dims']
allGroups = allGroups.reshape(sz[0], sz[1], dims[0], dims[1])
meanAllGroups = np.nanmean(allGroups, axis=1)
t = np.nanpercentile(meanAllGroups, 50)
mask = allGroups < t
sz2 = allGroups.shape
Nans = np.sum(np.isnan(allGroups)).astype(float) / np.prod(sz2) * 100
allGroups[mask] = np.nan
mean = np.nanmean(allGroups, axis=1)
var = np.nanvar(allGroups, axis=1)
fano = np.nanpercentile(var / mean, percentile)
assert fano > 0
fano_rel = fano / regDict['fano_group_volume'] * 100 - 100
Nans_rel = Nans - regDict['nans_group_volume']
resultTemp = fano_rel + Nans_rel
m, s = divmod(time.time() - start, 60)
logger.info(
'L:% 6.3f,T:% 4.1f,P:% 4.2f,S:%d,N:%.2f,f:%.2f,r:%.3f, %02d:%02d' %
(lambda_motion, sigma_time, sigma_pix, structureSizeTemp, Nans_rel, fano_rel, resultTemp, m, s))
return resultTemp
def getXYAlignedGroups(sc, regDict, optimization=False, useVolumeShifts=False, lambdaMotion=0.15, microMotionGain=1.0,
sigmaPix=1.0, sigmaTime=36, structureSize=3):
""" register in X and Z the groups
:param sc: SparkContext
:param regDict: registration dict
:param optimization: flag to indicate if called from the optimization function
:param useVolumeShifts: flag to just shift the data using the priors
:param lambdaMotion:
:param microMotionGain:
:param sigmaPix:
:param sigmaTime:
:param structureSize:
:return: adds to regDict:
1) planeGrpAlign
2) shiftsGroups
3) shiftsGroupsTime
"""
start = time.time()
if not optimization:
fieldMaskBool = regDict['fieldMaskBool']
fieldDur = regDict['fieldDur']
fieldTimes = np.asarray(range(0, len(fieldMaskBool))) * fieldDur
fieldTimes = fieldTimes[fieldMaskBool]
regDict['fieldTimes'] = fieldTimes
# creating indexing
allMeanGrpVol = regDict['allMeanGrpVol']
si = regDict['expandedTarget'].shape
totalGroups = sum([x.shape[0] for x in allMeanGrpVol])
regDict['totalGroups'] = totalGroups
counter = 0
parIdx = [None] * totalGroups
for i in range(0, si[2]):
for j in range(0, allMeanGrpVol[i].shape[0]):
parIdx[counter] = (i, j)
counter += 1
regDict['parIdx'] = parIdx
if 'allMeanGrpVolRDD' not in regDict or regDict['allMeanGrpVolRDD'] is None:
logger.info('getXYAlignedGroups making allMeanGrpVolAll')
allMeanGrpVolAll = np.concatenate(regDict['allMeanGrpVol'])
allMeanGrpVolRDD = sc.parallelize(izip(iter(parIdx), iter(allMeanGrpVolAll)),
min(len(parIdx), sc.defaultParallelism * 10))
allMeanGrpVolRDD.cache()
allMeanGrpVolRDD.count()
regDict['allMeanGrpVolRDD'] = allMeanGrpVolRDD
# get all attributes from dict and broadcast
regDict['lambdaMotionGroups'] = lambdaMotion
regDict['microMotionGainGroups'] = microMotionGain
regDict['sigmaPixGroups'] = sigmaPix
regDict['sigmaTimeGroups'] = sigmaTime
regDict['structureSizeGroup'] = structureSize
parIdx = regDict['parIdx']
expandedTarget = regDict['expandedTarget']
groupVolShiftsBC = sc.broadcast(regDict['grpVolShifts'])
allMeanGrpVolRDD = regDict['allMeanGrpVolRDD']
velPerPixXY = regDict['velPerPixXY']
flyLines = regDict['flyLines']
auPerPhoton = regDict['auPerPhoton']
pixSizeXY = regDict['pixSizeXY']
fieldTimes = regDict['fieldTimes']
timeWeight = dict()
for k in range(0, expandedTarget.shape[2]):
timeWeight[k] = norm.pdf(np.absolute(fieldTimes[k] - fieldTimes) / (sigmaTime / microMotionGain)).reshape(-1,
1)
timeWeightBC = sc.broadcast(timeWeight)
bestTarget = copy.deepcopy(expandedTarget).astype(np.float64)
for i in range(bestTarget.shape[2]):
bestTarget[:, :, i] = inpaint_biharmonic(bestTarget[:, :, i], np.isnan(bestTarget[:, :, i]))
# bestTarget[:, :, i] = white_tophat(bestTarget[:, :, i], structureSize)
bestTargetBC = sc.broadcast(bestTarget)
def getGroupWeightedPlanarDisplacement(kv):
from numpy import unravel_index, argmax
from skimage.feature import match_template, peak_local_max
from skimage import measure
from scipy.stats import norm
from scipy.ndimage.interpolation import shift
key, array1 = kv
array1 = array1.astype(np.float64)
bestTarget = bestTargetBC.value.astype(np.float64)
initShift = -copy.deepcopy(groupVolShiftsBC.value[key[1], key[0], :-1])
si2 = np.asarray(bestTarget.shape, dtype='float32')
bestShift = np.zeros((int(si2[2]), 2))
conWeight = np.zeros((int(si2[2]), 1))
# initial plane by plane displacement
for z in range(int(si2[2])):
planeShift = copy.deepcopy(initShift)
planeShift[0] = planeShift[0] - np.floor(flyLines[z] / 2).astype(int)
grid0, grid1 = np.meshgrid(np.asarray(range(int(si2[1]))), np.asarray(range(int(si2[0]))))
grid0 = (grid0 - np.floor(si2[1] / 2) + planeShift[1]).flatten()
grid1 = (grid1 - np.floor(si2[0] / 2) + planeShift[0]).flatten()
# one pixel shift is considered noise and ignored
grid0sign = np.sign(grid0)
grid1sign = np.sign(grid1)
grid0 = (np.absolute(grid0) - 1.)
grid1 = (np.absolute(grid1) - 1.)
grid0[np.nonzero(grid0 < 0)[0]] = 0
grid1[np.nonzero(grid1 < 0)[0]] = 0
grid0 = grid0 * grid0sign
grid1 = grid1 * grid1sign
grid0 = grid0 * velPerPixXY
grid1 = grid1 * velPerPixXY
distGrid = (grid0 ** 2 + grid1 ** 2) ** 0.5
distGrid = distGrid.reshape(np.round(si2[0:2]).astype(int))
# convert to log-linear probability
distGrid = np.exp(distGrid * (-np.asarray(1.0 / (lambdaMotion * microMotionGain), dtype='float64')))
arrayPlane = copy.deepcopy(array1[:, :, z])
arrayPlane = arrayPlane[flyLines[z]:, :]
# arrayPlane = white_tophat(arrayPlane[flyLines[z]:, :], structureSize)
targetPlane = bestTarget[:, :, z]
# calculate shift probabilities
nPhotonM = np.sum(arrayPlane.flatten()) / auPerPhoton
c = match_template(targetPlane, arrayPlane, pad_input=True)
# find max correlation
s2 = c.shape
maxC = np.nanmax(c.flatten())
# for each shift in xcorr, convert to probability it is greater than actual peak Fisher correction
maxC2 = 0.5 * np.log((1.0 + maxC) / (1.0 - maxC))
c2 = 0.5 * np.log((1.0 + c) / (1.0 - c))
# z-score, p-value
zScore = (maxC2 - c2) / ((1.0 / (nPhotonM - 3) + 1.0 / (nPhotonM - 3)) ** 0.5)
p = (1 - norm.cdf(zScore)) * 2
# prevent weighted micro shifts by retaining only local maxima > 1um apart
p2 = p * peak_local_max(p, min_distance=2000.0 / pixSizeXY, indices=False, threshold_rel=0,
exclude_border=False).astype('float64')
# weight against peaks that are improbably fast movements (distGrid)
bestShiftPlane = unravel_index(argmax(p2 * distGrid), s2)
bestShift[z, :] = -(bestShiftPlane - np.floor(np.asarray(p.shape) / 2))
# Estimate the "gyration distance" around the selected peak using second moments of image
# Moment Functions in Image Analysis: Theory and Applications
# Mukundan and Ramakrishnan
cm = measure.moments_central(p, bestShiftPlane[0], bestShiftPlane[1], order=2)
xGyr = (cm[0, 2] / cm[0, 0]) ** 0.5
yGyr = (cm[2, 0] / cm[0, 0]) ** 0.5
eqGyr = (xGyr * yGyr / np.pi) ** 0.5
conWeight[z] = (1 - norm.cdf(eqGyr / sigmaPix)) * 2
# neighborhood displacement depending on registration certainty
bestShift2 = copy.deepcopy(bestShift)
for k2 in range(int(si2[2])):
netWeight = timeWeightBC.value[k2] * conWeight
bestShift2[k2, :] = bestShift[argmax(netWeight), :]
bestShift2[k2, 0] = bestShift2[k2, 0] + np.floor(flyLines[k2] / 2).astype(int)
# shift and NaN fly lines
for z in range(int(array1.shape[2])):
# if optimization:
# array1[:, :, z] = white_tophat(array1[:, :, z], structureSize)
array1[array1 < 0] = 0
array1[0:flyLines[z], :, z] = 0
if useVolumeShifts:
s = -initShift
array1[:, :, z] = shift(array1[:, :, z], s, cval=float('nan'))
bestShift2[z, :] = initShift
else:
s = -bestShift2[z, :]
array1[:, :, z] = shift(array1[:, :, z], s, cval=float('nan'))
nFlyLines = int(flyLines[z] + s[0])
if nFlyLines > 0:
array1[0:nFlyLines, :, z] = np.NAN
# array1[array1 < 0] = np.NAN
return key, (bestShift2, array1)
# registration of groups to initial target
regGrpVol = allMeanGrpVolRDD.map(getGroupWeightedPlanarDisplacement).collectAsMap()
# pulling out the registered plains
nPlane = regGrpVol[parIdx[0]][1].shape[2]
planeGrpAlign = [None] * nPlane
shifts = [None] * nPlane
for j in range(int(nPlane)):
nextIdx = np.asarray([i[0] == j for i in parIdx]).nonzero()[0]
planeGrpAlign[j] = np.asarray([regGrpVol[parIdx[i]][1][:, :, j] for i in nextIdx])
shifts[j] = np.asarray([regGrpVol[parIdx[i]][0][j, :] for i in nextIdx])
planeGrpAlign[j] = planeGrpAlign[j].reshape(planeGrpAlign[j].shape[0], -1)
regDict['planeGrpAlign'] = planeGrpAlign
regDict['shiftsGroups'] = shifts
if not optimization:
# reorder shifts per time point
groups = regDict['groups'].astype(int)
shifts2 = np.stack(shifts).transpose(1, 0, 2)
shiftsGroupsTime = np.zeros((groups.shape[0], groups.shape[1], 2), dtype=int)
for timePoint, allPlaneGroups in enumerate(groups):
for plane, group in enumerate(allPlaneGroups):
shiftsGroupsTime[timePoint, plane, :] = shifts2[group, plane, :]
regDict['shiftsGroupsTime'] = shiftsGroupsTime
data = list()
data.append(dict(y=np.max(regDict['shiftsGroupsTime'], axis=0)[:, 0], type='scatter', name='max x'))
data.append(dict(y=np.max(regDict['shiftsGroupsTime'], axis=0)[:, 1], type='scatter', name='max y'))
data.append(dict(y=np.mean(regDict['shiftsGroupsTime'], axis=0)[:, 0], type='scatter', name='mean x'))
data.append(dict(y=np.mean(regDict['shiftsGroupsTime'], axis=0)[:, 1], type='scatter', name='mean y'))
data.append(dict(y=np.std(regDict['shiftsGroupsTime'], axis=0)[:, 0], type='scatter', name='std x'))
data.append(dict(y=np.std(regDict['shiftsGroupsTime'], axis=0)[:, 1], type='scatter', name='std y'))
py.iplot(dict(data=data))
m, s = divmod(time.time() - start, 60)
logger.info('Time: %02d:%02d' % (m, s))
@fallback
def getFinalGroupsSVD(sc, session, regDict, flyLinesFlag=False, finalClusterNum=45, finalIterKMeans=600,
finalNInit=25, finalTol=1e-8, finalKSVD=80, finalIterSVD=30, largeGroupMean=False, crop=None):
"""
:param sc: Spark Context
:param session: SpineSession object
:param regDict: registration dict
:param flyLinesFlag: if to exclude fly lines before shifting data in XY
:param finalClusterNum: Number of clusters to form
:param finalIterKMeans: Number of iteration in the KMeans
:param finalNInit: Number if initialization in the KMeans int
:param finalTol:Tolerance the KMeans
:param finalKSVD: Number of SVD components to calculate
:param finalIterSVD: Number of iteration in the SVD
:param largeGroupMean:
:param crop: crop planes / lines with crossing cells. If None will not crop.
:return: adds to regDict:
1) finalGroups: cluster assignments
2) finalGrpImg: the groups themselves
"""
# get data from regDict
start = time.time()
regDict['finalClusterNum'] = finalClusterNum
regDict['finalIterKMeans'] = finalIterKMeans
regDict['finalNInit'] = finalNInit
regDict['finalTol'] = finalTol
regDict['finalKSVD'] = finalKSVD
regDict['finalIterSVD'] = finalIterSVD
regDict['finalCrop'] = crop
indexNoAP = np.where(regDict['noAP'])[0]
flyLines = regDict['flyLines']
data = regDict['data']
# broadcast shifts
shiftsBC = sc.broadcast(regDict['shiftsGroupsTime'])
def applyShifts_inner(kv):
from scipy.ndimage.interpolation import shift as sp_shift
from scipy.stats.mstats import zscore
k, v = kv
k = np.array(k[0]).astype(int)
v = v.astype(np.float32)
if extendedFlag:
sz = v.shape
out = np.zeros((sz[0] * 2, sz[1] * 2, sz[2])).astype('float32')
out[:] = cVal
if flyLinesFlag:
for p in range(v.shape[2]):
v[:flyLines[p], :, p] = cVal
out[sz[0] // 2:(sz[0] // 2 + sz[0]), sz[1] // 2:(sz[1] // 2 + sz[1]), :] = v
v = out
for p in range(v.shape[2]):
shift = shiftsBC.value[k, p]
plane = v[:, :, p]
if flyLinesFlag and not extendedFlag:
plane[0:flyLines[p], :] = -0.001
plane = sp_shift(plane, -shift, cval=cVal, prefilter=False)
if flyLinesFlag and not extendedFlag:
plane[plane < 0] = cVal
v[:, :, p] = plane
if not np.isnan(cVal):
v = zscore(v, axis=None)
return v
extendedFlag = False
cVal = 0
shiftedData = data.map(applyShifts_inner, with_keys=True)
if crop is not None:
shiftedData = shiftedData.map(lambda x: x[crop['ySlice'], crop['xSlice'], crop['zIndex']])
shiftedData.cache()
shiftedData.count()
# train without time point with APs
m, s = divmod(time.time() - start, 60)
logger.info('Pre: %02d:%02d' % (m, s))
sys.stdout.flush()
U, _, _ = getSVD(shiftedData, finalKSVD, getComponents=False, getS=False, normalization='mean')
m, s = divmod(time.time() - start, 60)
logger.info('SVD: %02d:%02d' % (m, s))
sys.stdout.flush()
model = KMeans(n_clusters=finalClusterNum, init='k-means++', n_init=finalNInit, max_iter=finalIterKMeans,
tol=finalTol, precompute_distances='auto', verbose=0, random_state=None, copy_x=False, n_jobs=-1,
algorithm='auto').fit(U[indexNoAP, ...])
clusters = model.predict(U)
m, s = divmod(time.time() - start, 60)
logger.info('Cluster: %02d:%02d' % (m, s))
# remove empty groups
uIndex, loc = np.unique(clusters, return_inverse=True)
clusters2 = np.zeros(clusters.shape)
for i in range(len(uIndex)):
clusters2[loc == i] = i
clusters = clusters2
# return data in expanded space
extendedFlag = True
cVal = float('NaN')
returnData = data.map(applyShifts_inner, with_keys=True)
sys.stdout.flush()
try:
# will fail due to Spark handling of data bigger then 2^32 see https://issues.apache.org/jira/browse/SPARK-6235
finalGroup = nanMeanByIndex(returnData, clusters, largeGroupMean=largeGroupMean)
except Exception:
# doing it one cluster at a time
logger.info('failed with nanMeanByIndex')
returnData.cache()
returnData.count()
sz2 = returnData.shape
finalGroup = np.zeros((len(uIndex), sz2[1], sz2[2], sz2[3]))
for i, index in log_progress(enumerate(uIndex), every=1, size=len(uIndex), name='Groups'):
pos = np.sort(np.where(clusters == index)[0])
if len(pos) == 1:
finalGroup[i, :, :, :] = returnData[pos, :, :, :].toarray()
else:
# logger.info('%d: %d, ' % (i, len(pos)))
current = returnData[pos, :, :, :]
current.cache()
current.count()
mean = np.nan_to_num(np.nanmean(current.toarray(), axis=0))
MeanSz = mean.shape
MeanVec = mean.reshape(1, MeanSz[0] * MeanSz[1] * MeanSz[2])
CC = current.map(
lambda vol: np.corrcoef(np.nan_to_num(vol.reshape(1, MeanSz[0] * MeanSz[1] * MeanSz[2])),
MeanVec)[0, 1]).toarray()
CC = np.array(CC)
length = current.shape[0]
if length < 30:
points = np.array(length)
elif length < 100:
points = np.round(length / 2)
elif length < 200:
points = np.round(length / 3)
elif length < 300:
points = np.round(length / 4)
else:
points = np.round(length / 5)
points = points.astype(int)
ind = np.argpartition(CC, -points)[-points:]
finalGroup[i, :, :, :] = np.nanmean(current[ind, :, :, :].toarray(), axis=0)
current.uncache()
returnData.uncache()
regDict['finalGroups'] = clusters + 1
regDict['finalGrpImg'] = finalGroup
returnData.uncache()
m, s = divmod(time.time() - start, 60)
logger.info('Done: %02d:%02d' % (m, s))
sys.stdout.flush()
# plot results
ax1 = plt.subplot(1, 1, 1)
index2 = np.unique(clusters)
_, bins, _ = ax1.hist(clusters + 1, bins=len(index2))
ax1.set_ylabel('Timepoints', color='b')
ax1.set_xlabel('Cluster#')
for tl in ax1.get_yticklabels():
tl.set_color('b')
binCenters = 0.5 * (bins[1:] + bins[:-1])
ax2 = ax1.twinx()
Mean = np.nanmean(finalGroup, axis=(1, 2, 3))
Std = np.nanstd(finalGroup, axis=(1, 2, 3))
ax2.errorbar(binCenters, Mean, Std, ecolor='r', fmt='ro')
plt.grid(False)
ax2.set_ylabel(u'Intensity (±SD)', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.show()
return sc
def estZShiftsGrouping(sc, regDict, nZShifts=5, zFWHM=2.5, nanThreshold=200, crop=None, preFlag=True,
redoBestCrop=False, do_plot=True):
""" estimate Z shifts and make nZshifts new groups from postMeanGrpVol
:param sc: Spark Context
:param regDict: needs: postMeanGrpVol, Index, nCutLines, pixSizeXY, finalGroups, newShifts
:param nZShifts: final number of Z shifts to coalesce data into
:param zFWHM: FWHM of the z-axis PSF
:param nanThreshold
:param crop: crop dict from getCrop, if None will crop 4 lines in X and Y
:param preFlag: flag for doing tophat and z score on the groups
:param redoBestCrop: redo the best crop step, if false will take it from regDict
:return: adds: groupZ, grpZIdx, grpZPos, finalShifts
"""
start = time.time()
regDict['nZShifts'] = nZShifts
regDict['nanThresholdZShiftsGrouping'] = nanThreshold
regDict['zFWHM'] = zFWHM
groups = copy.deepcopy(regDict['finalGrpImg'])
finalGroups = copy.deepcopy(regDict['finalGroups'])
ngrp = np.array([np.sum(finalGroups == grp) for grp in np.unique(finalGroups)]).astype('float32')
weightedGroups = np.zeros(groups.shape)
for i in range(groups.shape[0]):
weightedGroups[i, :, :, :] = groups[i, :, :, :] * ngrp[i]
# get the best crop based on number of NaNs
origSz = groups.shape
logger.info('Original size: %s' % (origSz,))
groups = groups.transpose(1, 2, 3, 0).reshape(origSz[1], origSz[2], origSz[0] * origSz[3])
maxShifts = regDict['shiftsGroupsTime'].max(axis=(0, 1))
if maxShifts[0] > origSz[1] / 4:
maxShifts[0] = origSz[1] / 4
if maxShifts[1] > origSz[2] / 4:
maxShifts[1] = origSz[2] / 4
if maxShifts[0] < 4:
maxShifts[0] = 4
if maxShifts[1] < 4:
maxShifts[1] = 4
# check if crop is already made and used cached version
if 'groupsInit' not in regDict.keys() or redoBestCrop:
logger.info('Generating best crop')
returned, groups = getBestCrop(sc, groups, maxShifts=maxShifts, nanThreshold=nanThreshold * origSz[0],
inPaint=True, checkSize=False)
regDict['returnedInit'] = copy.deepcopy(returned)
regDict['groupsInit'] = copy.deepcopy(groups)
else:
logger.info('Using cached best crop')
returned = copy.deepcopy(regDict['returnedInit'])
groups = copy.deepcopy(regDict['groupsInit'])
expSz = groups.shape
groups = groups.reshape(expSz[0], expSz[1], origSz[3], origSz[0]).transpose(3, 0, 1, 2)
# crop cell body and some lines
if crop is None:
lines = 4
logger.info('No crop given, using %d lines' % lines)
groups = groups[:, lines:-lines, lines:-lines, :]
regDict['estZCropLines'] = lines
regDict['estZCropDict'] = None
else:
groups = groups[:, crop['ySlice'], crop['xSlice'], crop['zIndex']]
regDict['estZCropLines'] = None
regDict['estZCropDict'] = crop
m, s = divmod(time.time() - start, 60)
logger.info('group shape after crop: %s, time: %02d:%02d' % (groups.shape, m, s))
# calculate the correlation dissimilarity between all groups
from scipy.stats.mstats import zscore
groups2 = copy.deepcopy(groups)
if preFlag:
for i in range(groups.shape[0]):
for j in range(groups.shape[3]):
groups2[i, :, :, j] = white_tophat(groups2[i, :, :, j], 6)
groups2[i, :, :, :] = zscore(groups2[i, :, :, :], axis=None)
flatGroups = groups2.reshape(groups.shape[0], -1)
R = np.corrcoef(np.nan_to_num(flatGroups))
R = 1 - R
# order the groups based on similarity using a traveling salesman problem solver
bestPath = np.asarray(solve_tsp(R))
regDict['bestPath'] = copy.deepcopy(bestPath)
# Coalesce groups into nZShifts final zShift groups to minimize activity-based dissimilarity
grpsPerShift = np.floor(bestPath.shape[0] / nZShifts).astype(int)
si = weightedGroups.shape
logger.info('weightedGroup shape: %s' % (si,))
groupZ = np.zeros((nZShifts, si[1], si[2], si[3]))
grpZIdx = [None] * nZShifts
for i in range(nZShifts - 1):
grpZIdx[i] = bestPath[(i * grpsPerShift):((i * grpsPerShift) + grpsPerShift)]
groupZ[i, :, :, :] = np.nanmean(weightedGroups[grpZIdx[i], :, :, :], axis=0) * len(grpZIdx[i]) / np.sum(
ngrp[grpZIdx[i]])
grpZIdx[nZShifts - 1] = bestPath[((nZShifts - 1) * grpsPerShift):]
groupZ[nZShifts - 1, :, :, :] = np.nanmean(weightedGroups[bestPath[((i + 1) * grpsPerShift):], :, :, :],
axis=0) * len(grpZIdx[i]) / np.sum(
ngrp[bestPath[((i + 1) * grpsPerShift):]])
regDict['groupZfull'] = copy.deepcopy(groupZ)
# crop groupZ as groups
shiftIndex = np.argmax(np.array([s[1] for s in returned]))
borderFinal = returned[shiftIndex][0]
groupZ = groupZ[:, borderFinal[0]:-borderFinal[1], borderFinal[2]:-borderFinal[3], :]
groupZCC = copy.deepcopy(groupZ)
if crop is None:
lines = 12
groupZCC = groupZCC[:, lines:-lines, lines:-lines, :]
else:
groupZCC = groupZCC[:, crop['ySlice'], crop['xSlice'], crop['zIndex']]
origVols = groupZCC
# based on XY-shift correlations estimate Zshift between furthest groups
for i in range(groupZCC.shape[0]):
for k in range(groupZCC.shape[3]):
origVols[i, :, :, k] = inpaint_biharmonic(groupZCC[i, :, :, k], np.isnan(groupZCC[i, :, :, k]))
# calculate dissimilarity
flatOrigVols = origVols.reshape(origVols.shape[0], -1)
dissim = 1 - np.corrcoef(np.nan_to_num(flatOrigVols))
# filter XY to the larger Z-PSF
pixelSizeXY = regDict['pixSizeXY'] / float(1000)
filterFWHM = zFWHM / pixelSizeXY
filterSigma = filterFWHM / 2.355
filtVols = np.zeros(origVols.shape)
for i in range(0, origVols.shape[0]):
for j in range(0, origVols.shape[3]):
filtVols[i, :, :, j] = gaussian_filter(origVols[i, :, :, j], filterSigma, mode='wrap')
# calculate autocorrelation
ccXY = np.zeros(filtVols.shape[0:3])
midPlane = np.floor(filtVols.shape[3] / 2).astype(int)
for i in range(0, filtVols.shape[0]):
ccXY[i, :, :] = 1 - match_template(filtVols[i, :, :, :], filtVols[i, :, :, :],
pad_input=True, mode='wrap')[:, :, midPlane]
# estimate shift to corr curve
si = ccXY[0, :, :].shape
grid0, grid1 = np.meshgrid(np.asarray(range(0, si[1])), np.asarray(range(0, si[0])))
grid0 = (grid0 - np.floor(si[1] / 2)).flatten() * pixelSizeXY
grid1 = (grid1 - np.floor(si[0] / 2)).flatten() * pixelSizeXY
distGrid = (grid0 ** 2 + grid1 ** 2) ** 0.5
distGrid = (distGrid.reshape(-1, 1) * np.ones((1, ccXY.shape[0]))).T.flatten()
ccXY = ccXY.flatten()
uniqueDist = np.unique(distGrid.flatten())
ccDist = np.asarray([np.median(ccXY[distGrid == dist]) for dist in uniqueDist])
idx = np.argsort(ccDist)
f = PchipInterpolator(ccDist[idx], uniqueDist[idx])
maxZShift = f(dissim[0, -1]) * 2
# assign Z positions to each group assuming even spacing across groupZ
zSpacing = maxZShift / (nZShifts - 1)
grpZPos = (np.asarray(range(0, nZShifts)) - ((nZShifts - 1) / float(2))) * zSpacing
# newShifts = regDict['grpVolShifts'][:,:,:-1]
zshifts = np.ones(len(finalGroups))
zshiftsPos = np.ones(len(finalGroups))
for i in range(0, nZShifts):
for j in grpZIdx[i]:
zshiftsPos[finalGroups == (j + 1)] = grpZPos[i]
zshifts[finalGroups == (j + 1)] = i
# inpaint expandedZTargets
origSz = groupZ.shape
logger.info('origSz: %s' % (origSz,))
groupZ = groupZ.transpose(1, 2, 3, 0).reshape(origSz[1], origSz[2], origSz[0] * origSz[3])
def inPaintPar(volume):
mask = np.isnan(volume)
if np.sum(mask):
try:
return inpaint_biharmonic(volume, mask)
except ValueError:
return np.nan_to_num(volume)
else:
return volume
groupZ = td.images.fromarray(groupZ, engine=sc).map(inPaintPar).toarray()
expSz = groupZ.shape
logger.info('expSz shape: %s' % (expSz,))
groupZ = groupZ.reshape(expSz[0], expSz[1], origSz[3], origSz[0]).transpose(3, 0, 1, 2)
regDict['grpZIdxGroup'] = grpZIdx
regDict['grpZPosGroup'] = grpZPos
regDict['zshiftsGroup'] = zshifts.astype(int)
regDict['zshiftsPosGroup'] = zshiftsPos
regDict['expandedZTargets'] = groupZ
regDict['returnedEstZ'] = returned
if do_plot:
z, c = np.unique(regDict['zshiftsGroup'], return_counts=True)
fig = tls.make_subplots(rows=1, cols=3, specs=[[{'colspan': 2}, None, {}]], print_grid=False)
fig.append_trace(go.Scatter({'y': regDict['zshiftsGroup']}, ), 1, 1)
x = list(map(str, regDict['grpZPosGroup']))
fig.append_trace(go.Bar({'x': x, 'y': c, }, ), 1, 3)
fig['layout']['xaxis1'].update(title='Time points')
fig['layout']['yaxis1'].update(title='Z position (um)')
fig['layout']['xaxis2'].update(title='Z Position')
fig['layout']['yaxis2'].update(title='# Time points')
fig['layout'].update(showlegend=False)
py.iplot(fig)
def getShiftOffset(regDict):
"""
:param regDict:
:return:
"""
shiftIndex = np.argmax(np.array([s[1] for s in regDict['returnedEstZ']]))
borderFinal = np.array(regDict['returnedEstZ'][shiftIndex][0]).astype(float)
dims = regDict['dims'][:2]
mid = (np.array(dims) / 2).astype(float)
left = (mid[0] - borderFinal[0])
right = mid[0] - borderFinal[1]
up = mid[1] - borderFinal[2]
down = mid[1] - borderFinal[3]
offset = np.array([int((left - right) / 2), int((up - down) / 2)])
regDict['shiftsGroupsTime2'] = copy.deepcopy(regDict['shiftsGroupsTime']) + offset
def runVolReg(sc, regDict, TForm1, numTimepoints=10000, percentile=33):
"""
:param sc: Spark Context
:param TForm1: Transformation dict for embedding to 3d space
:param regDict: registration dict
:param numTimepoints: number of time points to use in optimization
:param percentile: cutoff for the distribution os the optimization metric
:return:
"""
WeightedShiftsOptimization(sc, regDict, numTimepoints=numTimepoints, usePerPlanePrior=True, percentile=percentile)
shiftList = ['newShifts', 'grpVolShiftsTime', 'shiftsGroupsTime']
for shift in shiftList:
logger.info(shift)
shifts = regDict[shift]
regTemp = applyShifts(sc, regDict, optimization=False, shifts=shifts)
meanTemp = regTemp.map(lambda x: np.nan_to_num(x)).mean().toarray()
embedTemp = getExampleVol(sc, meanTemp, TFormDict=TForm1, project=True)
writeTiff(regDict['fullPath'], embedTemp, shift + '_regData')
getAlignedGroups(regDict)
def WeightedShiftsOptimization(sc, regDict, numTimepoints=10000, usePerPlanePrior=False, percentile=33):
""" optimization of time points alignment in x y by minimizing fano factor and penalizing non-linearly the number
of NaNs, uses numTimepoints random subset of time points
:param sc: Spark context
:param regDict: registration dict
:param numTimepoints: the number of time points to use
:param usePerPlanePrior: if True will use the priors (False)
:param percentile: threshold for optimization
:return:
"""
start = time.time()
getShiftOffset(regDict)
data = regDict['data']
sz = data.shape[0]
index = np.array(sample(range(sz), numTimepoints))
regDict['dataMidIndex'] = index
dataMid = data[index, :, :, :]
dataMid = balanced_repartition(dataMid.tordd(), sc.defaultParallelism * 3)
dataMid = td.images.fromrdd(dataMid.sortByKey())
dataMid.cache()
regDict['dataMid'] = dataMid
m, s = divmod(time.time() - start, 60)
logger.info('Got data for optimization with %d time points, %02d:%02d' % (numTimepoints, m, s))
# get volume shift values
array = applyShifts(sc, regDict, shifts=regDict['shiftsGroupsTime'][index, ...], optimization=True).toarray()
Mean = np.nanmean(array, axis=0)
t = np.nanpercentile(Mean, 40)
mask = array < t
sz2 = array.shape
regDict['Nans_volume'] = np.sum(np.isnan(array)).astype(float) / np.prod(sz2) * 100
array[mask] = np.nan
mean = np.nanmean(array, axis=0)
var = np.nanvar(array, axis=0)
regDict['fano_volume'] = np.nanpercentile(var / mean, percentile)
m, s = divmod(time.time() - start, 60)
logger.info('Volume shifts fano: %.2f, Nans: %.2f, %02d:%02d' %
(regDict['fano_volume'], regDict['Nans_volume'], m, s))
if 'limitedShiftsLPBC' not in regDict.keys():
groupedTargetBC = sc.broadcast(regDict['expandedZTargets'])
regDict['groupedTargetBC'] = groupedTargetBC
zshiftsBC = sc.broadcast(regDict['zshiftsGroup'])
regDict['zshiftsBC'] = zshiftsBC
regDict['shiftsGroupsTimeBC'] = sc.broadcast(regDict['shiftsGroupsTime2'])
optimizationResult = differential_evolution(optimizeFano, bounds=[(0.5, 100), (3, 40), (10, 1000)],
args=(sc, regDict, percentile, usePerPlanePrior, start),
maxiter=5, popsize=3, mutation=(0.5, 1),
disp=True,
polish=False)
regDict['optimizationResultTimepoints'] = optimizationResult
regDict['optimizationResultTimepointsPercentile'] = percentile
lambdaMotion = optimizationResult.x[0] / 100.0
sigmaTime = optimizationResult.x[1]
sigmaPix = optimizationResult.x[2] / 100.0
getWeightedShifts(sc=sc, regDict=regDict, usePerPlanePrior=usePerPlanePrior, lambdaMotion=lambdaMotion,
sigmaPix=sigmaPix, sigmaTime=sigmaTime)
regData = applyShifts(sc=sc, regDict=regDict)
regDict['regData'] = regData
regDict['regData'].cache()
regDict['regData'].count()
def optimizeFano(params, sc, regDict, percentile, usePerPlanePrior, start):
""" test params with getWeightedShifts by calculating the % change in # of NaNs and fano factor
:param params: tuple of (lambda_motion, sigma_time, sigma_pix)
:param sc: Spark Context
:param regDict: registration dict
:param percentile: to use for fano factor
:param usePerPlanePrior: use priors in the registration function
:param start: start time of the optimization function
:return:
"""
lambda_motion = float(params[0]) / 100.0
sigma_time = float(params[1])
sigma_pix = float(params[2]) / 100.0
getWeightedShifts(sc, regDict, optimization=True, usePerPlanePrior=usePerPlanePrior,
lambdaMotion=lambda_motion, sigmaPix=sigma_pix, sigmaTime=sigma_time)
after_shifts = applyShifts(sc, regDict, optimization=True)
after_shifts.cache()
after_shifts.count()
Mean = after_shifts.map_as_series(lambda x: np.array(np.nanmean(x), ndmin=1)).toarray()
sz2 = after_shifts.shape
Nans = np.sum(after_shifts.map(lambda x: np.sum(np.isnan(x))).sum().toarray()).astype(float) / np.prod(sz2) * 100
t = np.nanpercentile(Mean, 40)
def apply_mask(x):
mask = x < t
x[mask] = np.nan
return x
mean = after_shifts.map(apply_mask).map_as_series(lambda x: np.array(np.nanmean(x), ndmin=1)).toarray()
var = after_shifts.map(apply_mask).map_as_series(lambda x: np.array(np.nanvar(x), ndmin=1)).toarray()
fano = np.nanpercentile(var / mean, percentile)
assert fano > 0.0
fano_rel = fano / regDict['fano_volume'] * 100 - 100
Nans_rel = Nans - regDict['Nans_volume']
result = fano_rel + Nans_rel
after_shifts.uncache()
m, s = divmod(time.time() - start, 60)
logger.info(
'L:% 6.3f,T:% 4.1f,P:% 4.2f,N:%.2f,f:%.2f,r:%.3f, %02d:%02d' %
(lambda_motion, sigma_time, sigma_pix, Nans_rel, fano_rel, result, m, s))
return result
def getWeightedShifts(sc, regDict, optimization=False, usePerPlanePrior=False, lambdaMotion=2.0, microMotionGain=1.0,
sigmaPix=3.0, sigmaTime=36.0):
""" X Y align time point while considering certainty, prior and neighboring planes
:param sc: Spark Context
:param regDict: registration dict
:param optimization: if True called from optimization function WeightedShiftsOptimization
:param usePerPlanePrior: if True will use priors instead of calculated shifts
:param lambdaMotion: How big of a motion do we want to consider for current vs. prior
:param microMotionGain: currently not used (set to 1.0)
:param sigmaPix: How big of a motion do we want to consider for current vs. previous plane
:param sigmaTime: how much do we weight the previous shifts in time
:return: newShifts
"""
if not optimization:
# limitedShiftsLPBC = sc.broadcast(regDict['limitedShiftsLP'])
# regDict['limitedShiftsLPBC'] = limitedShiftsLPBC
groupedTargetBC = sc.broadcast(regDict['expandedZTargets'])
regDict['groupedTargetBC'] = groupedTargetBC
zshiftsBC = sc.broadcast(regDict['zshiftsGroup'])
regDict['zshiftsBC'] = zshiftsBC
shiftsGroupsTimeBC = sc.broadcast(regDict['shiftsGroupsTime2'])
regDict['shiftsGroupsTimeBC'] = shiftsGroupsTimeBC
else:
# limitedShiftsLPBC = regDict['limitedShiftsLPBC']
groupedTargetBC = regDict['groupedTargetBC']
zshiftsBC = regDict['zshiftsBC']
shiftsGroupsTimeBC = regDict['shiftsGroupsTimeBC']
dims = regDict['expandedZTargets'][0, :, :, :].shape
velPerPixXY = regDict['velPerPixXY']
regDict['lambdaMotionTimepoints'] = lambdaMotion
regDict['microMotionGainTimepoints'] = microMotionGain
regDict['sigmaPixTimepoints'] = sigmaPix
regDict['sigmaTimeTimepoints'] = sigmaTime
medianFly = np.median(regDict['flyLines'])
auPerPhoton = regDict['auPerPhoton']
pixSizeXY = regDict['pixSizeXY']
flyLines = copy.deepcopy(regDict['flyLines'])
fieldMaskBool = regDict['fieldMaskBool']
fieldDur = regDict['fieldDur']
optimizationIndex = regDict['dataMidIndex']
# pre calculate time and
fieldTimes = np.asarray(range(len(fieldMaskBool))) * fieldDur
fieldTimes = fieldTimes[fieldMaskBool]
timeWeight = dict()
for k in range(regDict['dims'][2]):
timeWeight[k] = norm.pdf(np.absolute(fieldTimes[k] - fieldTimes) / (sigmaTime / microMotionGain)).reshape(-1, 1)
# offset per plane
offset = (medianFly / 2).astype(int)
medianFly = int(medianFly)
nanLines = flyLines - medianFly
si = np.asarray(dims, dtype='float32')
def getWeightedPlanarDisplacement(kv):
from numpy import unravel_index, argmax
from skimage.feature import match_template, peak_local_max
from skimage import measure
from scipy.stats import norm
key, array1 = kv
key = np.array(key[0]).astype(int)
if optimization:
key = (optimizationIndex[key],)
array1 = array1.astype(dtype='float64')
array1 = array1[medianFly:, :, :]
for i in range(array1.shape[2]):
if nanLines[i] > 0:
array1[:nanLines[i], :, i] = 0.0
# array1[:, :, i] = inpaint_biharmonic(array1[:, :, i], np.isnan(array1[:, :, i]))
bestShift = np.zeros((si[2].astype(int), 2))
conWeight = np.zeros((si[2].astype(int), 1))
# get z Target
bestTarget = groupedTargetBC.value[zshiftsBC.value[key[0]], :, :, :].squeeze()
# initial plane by plane displacement
for i in range(int(si[2])):
if usePerPlanePrior:
initShift = shiftsGroupsTimeBC.value[key, i]
# else:
# initShift = limitedShiftsLPBC.value[key[0]]
initShift[0] = initShift[0] - offset
a = range(int(si[1]))
b = range(int(si[0]))
grid0, grid1 = np.meshgrid(np.asarray(a), np.asarray(b))
grid0 = (grid0 - np.floor(si[1] / 2) + initShift[1]).flatten()
grid1 = (grid1 - np.floor(si[0] / 2) + initShift[0]).flatten()
# one pixel shift is considered noise and ignored
grid0sign = np.sign(grid0)
grid1sign = np.sign(grid1)
grid0 = (np.absolute(grid0) - 1.)
grid1 = (np.absolute(grid1) - 1.)
grid0[np.nonzero(grid0 < 0)[0]] = 0
grid1[np.nonzero(grid1 < 0)[0]] = 0
grid0 = grid0 * grid0sign
grid1 = grid1 * grid1sign
grid0 = grid0 * velPerPixXY
grid1 = grid1 * velPerPixXY
distGrid = (grid0 ** 2 + grid1 ** 2) ** 0.5
distGrid = distGrid.reshape(np.array(si[0:2]).astype(int))
# convert to log-linear probability
distGrid = np.exp(distGrid * (-np.array(1.0 / (lambdaMotion * microMotionGain), dtype='float64')))
arrayPlane = array1[:, :, i]
# arrayPlane[0:flyLines[i], :] = 0
targetPlane = bestTarget[:, :, i]
# calculate shift probabilities
nPhotonM = np.sum(arrayPlane.flatten()) / auPerPhoton
c = match_template(targetPlane, arrayPlane, pad_input=True)
# find max correlation
s = c.shape
maxCorr = np.nanmax(c.flatten())
# for each shift in xcorr, convert to probability it is greater than actual peak
# Fisher correction
maxCorr2 = 0.5 * np.log((1.0 + maxCorr) / (1.0 - maxCorr))
c2 = 0.5 * np.log((1.0 + c) / (1.0 - c))
# z-score
zScore = (maxCorr2 - c2) / ((1.0 / (nPhotonM - 3) + 1.0 / (nPhotonM - 3)) ** 0.5)
# p-value
p = (1 - norm.cdf(zScore)) * 2
# prevent weighted microshifts by retaining only local maxima > 2um apart
p2 = p * peak_local_max(p, min_distance=float(2000) / pixSizeXY, indices=False, threshold_rel=0,
exclude_border=False).astype('float64')
# weight against peaks that are improbably fast movements (distGrid)
bestShiftPlane = unravel_index(argmax(p2 * distGrid), s)
bestShift[i, :] = -(bestShiftPlane - np.floor(np.asarray(p.shape) / 2))
# Estimate the "gyration distance" around the selected peak using second moments of image
# Moment Functions in Image Analysis: Theory and Applications
# Mukundan and Ramakrishnan
cm = measure.moments_central(p, bestShiftPlane[0], bestShiftPlane[1], order=2)
xGyr = (cm[0, 2] / cm[0, 0]) ** 0.5
yGyr = (cm[2, 0] / cm[0, 0]) ** 0.5
eqGyr = (xGyr * yGyr / np.pi) ** 0.5
conWeight[i] = (1 - norm.cdf(eqGyr / sigmaPix)) * 2
# neighborhood displacement depending on registration certainty
bestShift2 = bestShift
for i in range(int(si[2])):
netWeight = timeWeight[i] * conWeight
bestShift2[i, :] = bestShift[argmax(netWeight), :]
bestShift2[:, 0] = bestShift2[:, 0] + offset
return bestShift2
if optimization:
newShifts = regDict['dataMid'].map(getWeightedPlanarDisplacement, with_keys=True, dtype=np.float64,
value_shape=(regDict['dims'][2], 2)).toarray()
else:
newShifts = regDict['data'].map(getWeightedPlanarDisplacement, with_keys=True, dtype=np.float64,
value_shape=(regDict['dims'][2], 2)).toarray()
regDict['newShifts'] = newShifts
if not optimization:
plt.figure(figsize=(10, 5))
meanX = np.mean(newShifts[:, :, 0], axis=1)
meanY = np.mean(newShifts[:, :, 1], axis=1)
stdX = np.std(newShifts[:, :, 0], axis=1)
stdY = np.std(newShifts[:, :, 1], axis=1)
plt.subplot(2, 2, 1)
plt.plot(meanX, 'b')
plt.ylabel('mean x')
plt.subplot(2, 2, 2)
plt.plot(meanY, 'b')
plt.ylabel('mean y')
plt.subplot(2, 2, 3)
plt.plot(stdX, 'r')
plt.ylabel('std x')
plt.subplot(2, 2, 4)
plt.plot(stdY, 'r')
plt.ylabel('std y')
# regDict['limitedShiftsLP'][:,:2]
def applyShifts(sc, regDict, optimization=False, shifts=None, useGroupShifts=False):
""" shift the images object according to the new shifts
:param sc: SparkContext
:param regDict: needs newShifts, flyLines
:param optimization: if True was called from WeightedShiftsOptimization: will take the dataMid and will also top hat
the result
:param shifts: which shifts to use
:param useGroupShifts: use limitedShiftsLP - the prior
:return: registered Images object
"""
if shifts is None:
shifts = regDict['newShifts']
flyLines = regDict['flyLines']
AllShiftsArrayBC = sc.broadcast(shifts)
def XYVolumePlanarAlign(kv):
key, im = kv
key = np.array(key[0]).astype(int)
from scipy.ndimage.interpolation import shift
im = im.astype('float32')
for z in range(0, im.shape[2]):
# if optimization:
# im[:, :, z] = white_tophat(im[:, :, z], 3)
# im[im < 0] = 0
im[0:flyLines[z], :, z] = 0
if useGroupShifts:
s = -AllShiftsArrayBC.value[key, :]
im[:, :, z] = shift(im[:, :, z], s, cval=float('nan'))
else:
s = -AllShiftsArrayBC.value[key, z, :]
im[:, :, z] = shift(im[:, :, z], s, cval=float('nan'))
nFlyLines = int(flyLines[z] + s[0])
if nFlyLines > 0:
im[0:nFlyLines, :, z] = np.NAN
return im.astype(dtype='float32')
if optimization:
return regDict['dataMid'].map(XYVolumePlanarAlign, dtype=np.float32, value_shape=regDict['dims'],
with_keys=True)
else:
return regDict['data'].map(XYVolumePlanarAlign, dtype=np.float32, value_shape=regDict['dims'], with_keys=True)
def getAlignedGroups(regDict):
""" Get groups after x y alignment with (postMeanGrpVolNo) or without APs (postMeanGrpVolNoAP)
:param regDict: registration dict
:return: adds: postMeanGrpVol, postMeanGrpVolNoAP
"""
start = time.time()
finalGroups = copy.deepcopy(regDict['finalGroups'])
# Same group averaging, but replace time-points during putative APs with last preceding non-AP time-point
expPutAP = signal.convolve(regDict['putAP'], [1, 1, 1], mode='valid') > 0
notAP = np.logical_not(expPutAP).nonzero()[0]
reIdx = np.array(range(len(finalGroups)))
for i in range(len(finalGroups)):
newIdx = (notAP <= i).nonzero()[0]
if np.any(newIdx):
reIdx[i] = notAP[newIdx[-1]]
postMeanGrpVol = nanMeanByIndex(regDict['regData'], finalGroups)
postMeanGrpVolNoAP = nanMeanByIndex(regDict['regData'], finalGroups[reIdx])
regDict['postMeanGrpVol'] = postMeanGrpVol
regDict['postMeanGrpVolNoAP'] = postMeanGrpVolNoAP
m, s = divmod(time.time() - start, 60)
logger.info('getAlignedGroups: %02d:%02d' % (m, s))
def estZShifts(regDict, nZShifts, zFWHM, crop, noAP=True, shifts='newShifts'):
""" estimate Z shifts and make nZshifts new groups from postMeanGrpVol
:param regDict: needs: postMeanGrpVol, Index, nCutLines, pixSizeXY, finalGroups, newShifts
:param nZShifts: final number of Z shifts to coalesce data into
:param zFWHM: FWHM of the z-axis PSF
:param crop: a cropping to apply before estimating the z positions
:param noAP: exclude time points with APs
:param shifts: name of shifts to take: newShifts - after optimization, shiftsGroupsTime - before
:return: adds: groupZ, grpZIdx, grpZPos, finalShifts
"""
regDict['nZShifts'] = nZShifts
regDict['zFWHM'] = zFWHM
if noAP:
groups = copy.deepcopy(regDict['postMeanGrpVolNoAP'])
else:
groups = copy.deepcopy(regDict['postMeanGrpVol'])
finalGroups = copy.deepcopy(regDict['finalGroups'])
ngrp = np.array([
|
np.sum(finalGroups == grp)
|
numpy.sum
|
import numpy as np
class ActivationFunction:
"""All functions are defined in this way:
Args:
x (np.ndarray): input vector
derivative (boolean): True -> apply the derivative, False -> apply the function
Returns:
np.ndarray: vector after applying activation function
"""
@staticmethod
def identity(x, derivative = False):
if derivative:
return 1.
else:
return x
@staticmethod
def sigmoid(x, derivative = False):
if derivative:
_f_x = ActivationFunction.sigmoid(x)
return _f_x * (1 - _f_x)
else:
return 1. / (1. + np.exp(-x))
@staticmethod
def tanh(x, derivative = False):
if derivative:
_f_x = ActivationFunction.tanh(x)
return 1 - (_f_x * _f_x)
else:
return np.tanh(x)
@staticmethod
def relu(x, derivative = False):
if derivative:
return np.where(x > 0, 1, 0)
else:
return np.maximum(x, 0)
@staticmethod
def leaky_relu(x, alpha = 0.01, derivative = False):
if derivative:
return np.where(x >= 0, 1, alpha)
else:
return
|
np.maximum(x, alpha * x)
|
numpy.maximum
|
import numpy as np
import pathlib
from mmdet.core.bbox3d.geometry import (center_to_corner_box2d,\
center_to_corner_box3d,\
box2d_to_corner_jit,\
points_in_convex_polygon_3d_jit,\
corner_to_surfaces_3d_jit,\
rotation_box2d_jit,\
rotation_points_single_angle,\
box_collision_test)
import copy
import pickle
import numba
def select_transform(transform, indices):
result = np.zeros(
(transform.shape[0], *transform.shape[2:]), dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
@numba.njit
def rotation_matrix_3d_(rot_mat_T, angle, axis):
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = -rot_sin
rot_mat_T[2, 0] = rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = -rot_sin
rot_mat_T[2, 1] = rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def points_transform_(points, centers, point_masks, loc_transform,
rot_transform, valid_mask):
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
@numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
# boxes: [N, 5]
# valid_mask: [N]
# loc_noises: [N, M, 3]
# rot_noises: [N, M]
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -
|
np.ones((num_boxes,), dtype=np.int64)
|
numpy.ones
|
# Experiment that generates several sets of networks of varying CH-divergence types
# then trains an msbm of a single type in a "consensus" type of way. Then we report the
# average rand_index and average entropy of the z variables, which are indicators of how well
# the algorithm is learning the true model.
import os, sys
import pickle
import pdb
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def main():
stats_url = os.path.join('stats', 'stats_' + 'detectability.pickle')
print("generating plots from: {}".format(stats_url))
statistics = pickle.load(open(stats_url, 'rb'), encoding='latin1')
#box plot CH-div vs Rand Index
#We create a list with the 8 boxes
data1 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.55)&(chd<0.65) for chd in statistics['CH_div']])
fil2 = np.array([n == 250 for n in statistics['N']])
data1 = data1[fil&fil2].flatten()
data2 = np.array(statistics['ari_Z'])
fil = np.array([(chd>0.65)&(chd<0.75) for chd in statistics['CH_div']])
fil2 =
|
np.array([n == 250 for n in statistics['N']])
|
numpy.array
|
from __future__ import print_function
from agimus_vision import py_agimus_vision as av
import numpy as np, pinocchio, sys, os, pymannumopt as mno
def toURDFTag(M):
x,y,z = M.translation
ori = '<origin xyz="{} {} {}" '.format(x,y,z)
r,p,y = pinocchio.rpy.matrixToRpy(M.rotation)
ori += 'rpy="{} {} {}"/>'.format(r,p,y)
return ori
def vpToSE3(M):
return pinocchio.SE3(np.array(M))
def se3toVP(M):
return av.HomogeneousMatrix(M.homogeneous)
def cam_proj(x):
return x[:2] / x[2]
def Jcam_proj(x):
d = 1/x[2]
return np.array([ [ d, 0, -x[0]*d**2],
[ 0, d, -x[1]*d**2] ])
def J_M_times_P(M,P):
return np.hstack((M.rotation, -np.dot(M.rotation, pinocchio.skew(P))))
def image_point_residual(pij, cMi, oPij):
return pij - cam_proj(cMi * oPij)
def Jimage_point_residual(pij, cMi, oPij):
return - np.dot(Jcam_proj(cMi * oPij), J_M_times_P(cMi, oPij))
def image_point_residuals(pijss, cMw, wMis, oPijss):
return [ pij - cam_proj(cMw * wMi * oPij)
for pijs, wMi, oPijs in zip(pijss, wMis, oPijss)
for pij, oPij in zip(pijs, oPijs) ]
def Jimage_point_residuals(pijss, cMw, wMis, oPijss):
nrows = 0
for pijs, wMi, oPijs in zip(pijss, wMis, oPijss):
nrows += 2*len(pijs)
JcMw = np.zeros((nrows, 6))
JwMis = []
JcMi = np.zeros((2,6))
r = 0
for pijs, wMi, oPijs in zip(pijss, wMis, oPijss):
XiMw = wMi.toActionMatrixInverse()
JwMi = np.zeros((nrows, 6))
for pij, oPij in zip(pijs, oPijs):
JcMi = Jimage_point_residual(pij, cMw * wMi, oPij)
JcMw[r:r+2] = np.dot(JcMi, XiMw)
JwMi[r:r+2] = JcMi
r+=2
JwMis.append(JwMi)
return JcMw, JwMis
def plane_residual(pi, wMis, oPijss):
#TODO should be this but we add the 4 mm that we measured on the real model
return np.array([ (np.dot(pi[:3], wMi * oPij) + pi[3] ) for wMi, oPijs in zip(wMis, oPijss) for oPij in oPijs ])
def Jplane_residual(pi, wMis, oPijss):
# derivative wrt to pi
Jpi = [ (wMi*oPij).tolist() + [1,] for wMi, oPijs in zip(wMis, oPijss) for oPij in oPijs ]
# derivative wrt to wMis
JwMis = []
row = 0
for wMi, oPijs in zip(wMis, oPijss) :
JwMi = np.zeros((len(Jpi), 6))
for oPij in oPijs:
JwMi[row,:] = np.dot(pi[:3], J_M_times_P(wMi, oPij))
row += 1
JwMis.append(JwMi)
return Jpi, JwMis
class Image:
def __init__(self, tags, guess_cMis, pijss, oPijss, coplanar_tags_s):
self.tags = tags
self.guess_cMis = guess_cMis
self.pijss = pijss
self.oPijss = oPijss
# list of tuple (plane id, list of visible tags)
self.coplanar_tags_s = []
tags_set = set(tags)
for ip, coplanar_tags in enumerate(coplanar_tags_s):
coplanar_tags_set = tags_set.intersection(coplanar_tags)
if len(coplanar_tags_set) > 0:
tag_indices = [ tags.index(tag) for tag in coplanar_tags_set ]
self.coplanar_tags_s.append( (ip, tag_indices) )
def makeImage(Img, tag_defs, coplanar_tags_s):
tags = []
pijss = []
oPijss = []
cMis = []
first = True
for tag, (id, size) in enumerate(tag_defs):
aprilTag = av.makeAprilTag()
if first:
aprilTag.detector().imageReady = False
first = False
aprilTag.cameraParameters(cam)
aprilTag.addTag(id, size, av.HomogeneousMatrix())
if aprilTag.detect(Img):
aprilTag.drawDebug(Img)
oMt = np.array(aprilTag.getPose())
# TODO initial guess
cMis.append(vpToSE3(oMt))
tags.append(tag)
pijss.append([ np.array(v) for v in aprilTag.getPoints(cam, id) ])
oPijss.append([ np.array(v) for v in av.aprilTagPoints(size) ])
del aprilTag
return Image(tags, cMis, pijss, oPijss, coplanar_tags_s)
class Variables:
def __init__(self, ncameras, ntags, nplanes):
self.ncameras = ncameras
self.ntags = ntags
self.nplanes = nplanes
from pinocchio import liegroups
spaces = [ liegroups.Rn(4), ] * nplanes \
+ [ liegroups.SE3() ] * ncameras \
+ [ liegroups.SE3() ] * ntags
self.space = spaces[0]
for space in spaces[1:]:
self.space *= space
def plane(self, X, i):
assert i < self.nplanes
return X[4*i:4*(i+1)]
def camera(self, X, i, asSE3=True):
assert i < self.ncameras
s = self.nplanes*4
if asSE3:
return pinocchio.XYZQUATToSE3(X[s+7*i:s+7*(i+1)])
else:
return X[s+7*i:s+7*(i+1)]
def tag(self, X, i, asSE3=True):
assert i < self.ntags
s = self.nplanes*4+self.ncameras*7
if asSE3:
return pinocchio.XYZQUATToSE3(X[s+7*i:s+7*(i+1)])
else:
return X[s+7*i:s+7*(i+1)]
def Jplane(self, Jx, i):
assert i < self.nplanes
return Jx[:,4*i:4*(i+1)]
def Jcamera(self, Jx, i):
assert i < self.ncameras
s = self.nplanes*4
return Jx[:,s+6*i:s+6*(i+1)]
def Jtag(self, Jx, i):
assert i < self.ntags
s = self.nplanes*4+self.ncameras*6
return Jx[:,s+6*i:s+6*(i+1)]
class ImageResiduals(mno.VectorFunction):
def __init__(self, variables, images):
mno.VectorFunction.__init__(self)
self.images = images
self.variables = variables
def dimension(self):
nb_pts_per_image = [ sum([ len(pijs) for pijs in image.pijss ]) for image in self.images ]
nb_pts = sum(nb_pts_per_image)
return 2 * nb_pts
def f(self, X, f):
r = 0
for ic, image in enumerate(self.images):
cMw = self.variables.camera(X, ic)
# build cMis
wMis = [ self.variables.tag(X, it) for it in image.tags ]
residuals = image_point_residuals(image.pijss, cMw, wMis, image.oPijss)
for residual in residuals:
f[r:r+2] = residual
r+=2
assert r == f.shape[0]
def f_fx(self, X, f, fx):
fx[:] = np.zeros_like(fx)
r = 0
for ic, image in enumerate(self.images):
cMw = self.variables.camera(X, ic)
# build cMis
wMis = [ self.variables.tag(X, it) for it in image.tags ]
residuals = image_point_residuals(image.pijss, cMw, wMis, image.oPijss)
JcMw, JwMis = Jimage_point_residuals(image.pijss, cMw, wMis, image.oPijss)
nr = JcMw.shape[0]
self.variables.Jcamera(fx[r:r+nr,:], ic)[:] = JcMw
for it, JwMi in zip(image.tags, JwMis):
self.variables.Jtag(fx[r:r+nr,:], it)[:] = JwMi
for residual in residuals:
f[r:r+2] = residual
r+=2
class PlaneResiduals(mno.VectorFunction):
def __init__(self, variables, images):
mno.VectorFunction.__init__(self)
self.images = images
self.variables = variables
def dimension(self):
nb_pts_per_plane = [
sum([ len(image.oPijss[tagid]) for tagid in tag_indices ])
for image in self.images
for _, tag_indices in image.coplanar_tags_s ]
assert sum(nb_pts_per_plane) == 4 * sum([ len(tag_indices) for image in self.images for _, tag_indices in image.coplanar_tags_s ])
return sum(nb_pts_per_plane)
def f(self, X, f):
r = 0
for ic, image in enumerate(self.images):
for ip, tag_indices in image.coplanar_tags_s:
pi = self.variables.plane(X, ip)
wMis = [ self.variables.tag(X, image.tags[k]) for k in tag_indices ]
oPijss = [ image.oPijss[k] for k in tag_indices ]
n = sum( [ len(oPijs) for oPijs in oPijss ] )
f[r:r+n] = plane_residual(pi, wMis, oPijss)
r += n
assert r == f.shape[0]
def f_fx(self, X, f, fx):
r = 0
fx[:] = np.zeros_like(fx)
for ic, image in enumerate(self.images):
for ip, tag_indices in image.coplanar_tags_s:
pi = self.variables.plane(X, ip)
wMis = [ self.variables.tag(X, image.tags[k]) for k in tag_indices ]
oPijss = [ image.oPijss[k] for k in tag_indices ]
n = sum( [ len(oPijs) for oPijs in oPijss ] )
f[r:r+n] = plane_residual(pi, wMis, oPijss)
Jpi, JwMis = Jplane_residual(pi, wMis, oPijss)
self.variables.Jplane(fx[r:r+n,:], ip)[:] = Jpi
for k, JwMi in zip(tag_indices, JwMis):
self.variables.Jtag(fx[r:r+n,:], image.tags[k])[:] = JwMi
r += n
assert r == f.shape[0]
class PlaneUnitNormal(mno.VectorFunction):
def __init__(self, variables):
mno.VectorFunction.__init__(self)
self.variables = variables
def dimension(self):
return self.variables.nplanes
def f(self, X, f):
for ip in range(self.variables.nplanes):
pi = self.variables.plane(X, ip)
normal = pi[:3]
f[ip] = np.sum(normal**2) - 1
def f_fx(self, X, f, fx):
fx[:] =
|
np.zeros_like(fx)
|
numpy.zeros_like
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Dependencies
# Created by wrborrelli
# %%
import sys
import itertools
import random
#import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
from scipy.optimize import Bounds
from scipy.optimize import linprog
from scipy.optimize import nnls
from scipy.optimize import minimize
from scipy.optimize import lsq_linear
from scipy.spatial import HalfspaceIntersection
from scipy.spatial.qhull import _Qhull
import numpy as np
import pint
from pint import UnitRegistry
units = UnitRegistry()
Q_ = units.Quantity
# %% [markdown]
# # Helper Functions
# %% [markdown]
def convex_hull_intersection(points1: np.ndarray, points2: np.ndarray, vis2d=False):
""" Returns the points corresponding to the intersecting region of two convex hulls (up to however many dimensions scipy ConvexHull takes (9D I think).
Args:
points1: np.array() of points corresponding to the first convex hull.
points2: np.array() of points corresponding to the second convex hull.
vis2d: True/False if you want to visualize the resulting region (2D hulls only)
Returns:
np.array() of points corresponding to the intersection region.
"""
assert points1.shape[1] == points2.shape[1]
hull1 = ConvexHull(points1)
hull2 = ConvexHull(points2)
A = np.vstack((hull1.equations[:, :-1], hull2.equations[:, :-1]))
b = np.hstack((hull1.equations[:, -1], hull2.equations[:, -1]))
res = linprog(c=np.zeros(A.shape[1]), A_ub=A, b_ub=-b, method="interior-point")
feasible_point = res.x
hint = HalfspaceIntersection(np.vstack((hull1.equations, hull2.equations)), feasible_point)
if vis2d:
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, aspect='equal')
xlim, ylim = (0, 1), (0, 1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for simplex in hull1.simplices:
ax.plot(points1[simplex, 0], points1[simplex, 1], 'r-')
for simplex in hull2.simplices:
ax.plot(points2[simplex, 0], points2[simplex, 1], 'b-')
x, y = zip(*hint.intersections)
ax.plot(x, y, 'k^', markersize=8)
plt.savefig("{}".format(__file__).replace(".py", ".png"))
return hint.intersections
# %%
def get_hull_centroid(hull: ConvexHull):
""" Returns the centroid of the supplied scipy convex hull object.
Args:
hull: a scipy convex hull object
Returns:
np.array() of centroids for each axis.
>>> get_hull_centroid(ConvexHull(np.array([[1.8, 0., 0.],[0. , 0., 0.], [0., 0., 26.5], [0., 2., 1.]])))
array([0.45, 0.5, 6.875])
"""
return np.array([np.mean(hull.points[hull.vertices, i]) for i in range(len(hull.points) - 1)])
# %%
## functions from randomSampling_py that are needed
# %%
def bb_to_cds(bb):
""" Converts bounding box notation to vertex coordinates.
Args:
bb: list of lists of bounding box notation [[mins], [maxes]] corresponding to the mins and maxes for each axis.
Returns:
np.array() of points corresponding to the vertices of the bounding box.
"""
outp = list(itertools.product(*bb))
out = []
for i in outp:
temp = []
for j in i:
if isinstance(j, list):
for k in j:
temp.append(k)
else:
temp.append(j)
out.append(temp)
return np.array(out)
# %%
def dropZeroColumns(reagents, uniqueChemicalNames=None):
""" Version of dropZeroColumns that includes the uniqueChemicalNames corresponding to only nonzero columns in the reagent dictionary.
Args:
reagents: Dictionary defining the reagents.
uniqueChemicalNames: (optional) Provide a list of unique chemical names.
Returns:
Dictionary of reagents with only the nonzero columns, as well as a list of the unique chemical names that correspond to the nonzero columns.
"""
if uniqueChemicalNames is None:
d_keys = list(reagents.keys())
array = np.array(list(reagents.values()))
new_vals = array[:, array.any(0)]
return dict(zip(d_keys, new_vals))
else:
d_keys = list(reagents.keys())
array = np.array(list(reagents.values()))
new_vals = array[:, array.any(0)]
array = np.array(list(reagents.values()))
noz_cols = list(np.nonzero(np.array([max(array.T[:, i]) for i in range(len(array.T))]))[0])
noz_chems = list(np.take(np.array(uniqueChemicalNames), noz_cols))
return [dict(zip(d_keys, new_vals)), noz_chems]
# %%
def allowedExperiments(reagents, maxConcentration, minConcentration):
""" Find the allowed ConvexHull given the reagent definitions and max/min concentration.
Note that, relative to the Mathematica code, the location of max and min concentration in the function arguments are swapped. This allows for the case where you only want to give a max concentration.
Because this generates a scipy ConvexHull, it is susceptible to dimensional constraints.
Args:
reagents: dictionary of reagent definitions.
maxConcentration: the maximum concentration imposed.
minConcentration: the minimum concentration imposed.
Returns:
scipy ConvexHull defining the allowed experimental sampling region.
"""
compositionBoundary = np.array(list(reagents.values())) # array of convex hull points
minMax = [] # list to format imposedBoundary points
if ((type(minConcentration) is list) and (type(maxConcentration)) is list): # run this if both min/max Concentrations are given as lists
for i in range(len(minConcentration)): # formats imposedBoundary into coords
minMax.append([minConcentration[i], maxConcentration[i]])
imposedBoundary = np.array(minMax) # array of imposedBoundary points
return ConvexHull(convex_hull_intersection(compositionBoundary, bb_to_cds(imposedBoundary))) # return the intersection convex hull
else:
return print('error') # or print an error
def allowedExperiments(reagents, maxConcentration, minConcentration=None):
""" Find the allowed ConvexHull given the reagent definitions and max/min concentration.
Note that, relative to the Mathematica code, the location of max and min concentration in the function arguments are swapped. This allows for the case where you only want to give a max concentration.
Because this generates a scipy ConvexHull, it is susceptible to dimensional constraints.
Args:
reagents: dictionary of reagent definitions.
maxConcentration: the maximum concentration imposed.
minConcentration: (optional) the minimum concentration imposed (0 otherwise).
Returns:
scipy ConvexHull defining the allowed experimental sampling region.
"""
if ((type(maxConcentration) is float) or (type(maxConcentration) is int)): # run this if only maxConcentration is given and it's a float or int
correctDimensionalityVector = ([1]*len(np.array(list(reagents.values()))[0]))
compositionBoundary = np.array(list(reagents.values()))
minMax = []
minConcVec = ([0]*len(correctDimensionalityVector)) # min [] vec is 0's
maxConcVec = ([maxConcentration]*len(correctDimensionalityVector)) # max [] vec is maxConc repeated to correct dimension
for i in range(len(correctDimensionalityVector)):
minMax.append(([minConcVec[i], maxConcVec[i]]))
imposedBoundary =
|
np.array(minMax)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
@author: jessys
#email: <EMAIL>
"""
import numpy as np
import pandas as pd
def normalized_correlation(a, b):
a = (a - np.mean(a)) / (np.std(a) * len(a))
b = (b - np.mean(b)) / (np.std(b))
return np.correlate(a, b)
def normalized_correlation_full(a, b):
a = (a - np.mean(a)) / (
|
np.std(a)
|
numpy.std
|
import itertools
import numpy as np
from copy import deepcopy
from dcptree.analysis import to_group_data, groups_to_group_data
from dcptree.data import check_data
from dcptree.group_helper import check_groups
from scipy.stats import binom
def exact_mcn_test(y, yhat1, yhat2, two_sided = False):
"""
:param y: true
:param yhat1:
:param yhat2:
:param two_sided:
:return: value of the discrete McNemar Test
"""
f1_correct = np.equal(y, yhat1)
f2_correct = np.equal(y, yhat2)
table = np.zeros(shape = (2, 2))
for i in range(2):
for j in range(2):
table[i, j] = np.sum((f1_correct == i) & (f2_correct == j))
b = table[0, 1] #f1 wrong and f2 right
c = table[1, 0] #f1 right and f2 wrong
n = b + c
# envy-freeness requires that
# f1 is correct more often than f2 <=> b < c
#
# We test
#
# H0: error(f1) = error(f2)
# H1: error(f1) > error(f2)
#
# This requires assuming b /(b+c) ~ Bin(0.5)
if two_sided:
test_statistic = min(b, c)
p = 2.0 * binom.cdf(k = min(b, c), n = b + c, p = 0.5)
else:
test_statistic = c
p = binom.cdf(k = test_statistic, n = n, p = 0.5)
return p, test_statistic
def check_model_assignment(groups_to_models, groups, models):
assert isinstance(groups_to_models, dict)
assert isinstance(models, list)
splits = [[(k, l) for l in v['labels']] for k, v in groups.items()]
splits = set(itertools.product(*splits))
assert set(groups_to_models.keys()).issubset(splits), 'mapper should include map every group in the data'
model_indices = list(range(len(models)))
assignment_indices = np.array(list(groups_to_models.values()))
assert np.array_equal(np.unique(assignment_indices), model_indices), 'every model should cover at least one group'
return True
def build_model_assignment_map(p):
groups_to_models = {}
group_labels, group_values = groups_to_group_data(p.groups, stat_field = 'train')
split_values = np.unique(group_values, axis = 0)
for vals in split_values:
s = tuple([(g, z) for g, z in zip(group_labels, vals)])
vals = vals[:, None].transpose()
n_matches = 0
for i, l in enumerate(p.leaves):
if l.contains(group_labels, vals):
groups_to_models[s] = i
n_matches += 1
assert n_matches == 1
assert check_model_assignment(groups_to_models, p.groups, models = p.predictors)
return groups_to_models
class DecoupledClassifierSet(object):
def __init__(self, data, groups, pooled_model, decoupled_models, groups_to_models):
# check inputs
assert check_data(data, ready_for_training = True)
assert check_groups(groups, data)
# initialize data
self._data = {
'X': np.array(data['X']),
'Y': np.array(data['Y']),
'variable_names': list(data['variable_names'])
}
self._groups = deepcopy(groups)
self._pooled_model = pooled_model
self._decoupled_models = decoupled_models
group_names, group_values = groups_to_group_data(groups)
training_values = np.unique(group_values, axis = 0).tolist()
training_splits = [tuple(zip(group_names, v)) for v in training_values]
assert isinstance(groups_to_models, dict)
assert set(training_splits) == set(groups_to_models.keys()), 'mapper should include map every group in the training data'
assignment_idx = np.array(list(groups_to_models.values()))
assert np.array_equal(np.unique(assignment_idx), np.arange(len(self))), 'every model should cover at least one group'
models_to_groups = {k:[] for k in range(len(self))}
for group_tuple, model_index in groups_to_models.items():
group_value = [s[1] for s in group_tuple]
assert len(group_value) == len(group_names)
models_to_groups[model_index].append(group_value)
self._splits = training_splits
self.groups_to_models = groups_to_models
self.models_to_groups = models_to_groups
def __len__(self):
return len(self._decoupled_models)
def __repr__(self):
info = [
'DecoupledClassifierSet',
'# group attributes: %d' % len(self._groups),
'# groups: %d' % len(self.groups_to_models),
'# models: %d' % len(self._decoupled_models),
]
info = info + [', '.join(s) for s in self.split_names]
return '\n'.join(info)
@property
def data(self):
return self._data
@property
def groups(self):
return self._groups
@property
def split_names(self):
return [['%s = %s' % (a, b) for (a, b) in s] for s in self._splits]
@property
def pooled_model(self):
return self._pooled_model
@pooled_model.setter
def pooled_model(self, clf):
assert callable(clf)
self._pooled_model = clf
@property
def decoupled_models(self):
return [clf.predict for clf in self._decoupled_models]
@decoupled_models.setter
def decoupled_models(self, clf_set):
assert len(clf_set) >= 2
assert all([callable(clf) for clf in clf_set])
self._decoupled_models = clf_set
def assigned_indices(self, model_index, group_names, group_values):
assignment_idx = np.repeat(False, group_values.shape[0])
for s in self.models_to_groups[model_index]:
assignment_idx = np.logical_or(assignment_idx, np.all(group_values == s, axis = 1))
return assignment_idx
def _parse_data_args(self, **kwargs):
"""
helper function to parse X, y, group_names, and group_values
from keyword inputs to different methods
:param kwargs:
:return:
"""
if len(kwargs) == 0:
return to_group_data(data = self.data, groups = self.groups, stat_field = 'train')
elif set(['X', 'y', 'group_names', 'group_values']).issubset(kwargs):
return kwargs['X'], kwargs['y'], kwargs['group_names'], kwargs['group_values']
elif set(['data', 'groups']).issubset(kwargs):
return to_group_data(data = kwargs['data'], groups = kwargs['groups'], stat_field = kwargs.get('stat_field') or 'train')
else:
raise ValueError('unsupport input arguments')
def _drop_missing_splits(self, splits, group_values):
new = []
for s in splits:
group_labels = [t[1] for t in s]
if np.all(group_values == group_labels, axis = 1).any():
new.append(s)
return new
def predict(self, X, group_names, group_values):
"""
predict using all of the leaves in this partition
:param X:
:param group_names:
:param group_values:
:return:
"""
yhat = np.repeat(np.nan, X.shape[0])
for i, clf in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
yhat[idx] = clf.predict(X[idx, ])
assert np.all(np.isfinite(yhat))
return yhat
def splits(self, groups = None):
"""
:param groups: group data structure
:return: list of splits, where each split = [(group_name, group_label)] for all possible subgroups
"""
if groups is None:
groups = self.groups
splits = [[(k, l) for l in v['labels']] for k, v in groups.items()]
return list(itertools.product(*splits))
def group_sample_size_stats(self, metric_type = 'n', drop_missing = False, **kwargs):
_, y, group_names, group_values = self._parse_data_args(**kwargs)
splits = self.splits(kwargs.get('groups'))
if drop_missing:
splits = self._drop_missing_splits(splits, group_values)
S = np.repeat(np.nan, len(splits))
for i, s in enumerate(splits):
group_labels = [t[1] for t in s]
idx = np.all(group_values == group_labels, axis = 1)
if metric_type in ('n', 'p'):
S[i] = np.sum(idx)
elif metric_type in ('n_pos', 'p_pos'):
S[i] = np.sum(y[idx] == 1)
elif metric_type in ('n_neg', 'p_neg'):
S[i] = np.sum(y[idx] == -1)
if metric_type in ('p', 'p_pos', 'p_neg'):
S = S / len(y)
return S
def group_switch_stats(self, metric_type = 'error_gap', drop_missing = False, **kwargs):
X, y, group_names, group_values = self._parse_data_args(**kwargs)
splits = self.splits(kwargs.get('groups'))
if drop_missing:
splits = self._drop_missing_splits(splits, group_values)
n_groups, n_models = len(splits), len(self)
M = np.tile(np.nan, [n_groups, n_models])
model_idx = np.arange(n_models)
for i, s in enumerate(splits):
group_labels = [t[1] for t in s]
idx = np.all(group_values == group_labels, axis = 1)
if any(idx):
ys, Xs = y[idx], X[idx, :]
match_idx = self.groups_to_models[s]
other_idx = model_idx[model_idx != match_idx].tolist()
h = self._decoupled_models[match_idx]
ys_hat = h.predict(Xs)
for k in other_idx:
yk_hat = self._decoupled_models[k].predict(Xs)
if metric_type == 'pvalue':
M[i, k] = exact_mcn_test(ys, ys_hat, yk_hat)[0]
elif metric_type == 'error':
M[i, k] = np.not_equal(ys, yk_hat).mean()
elif metric_type == 'error_gap':
M[i, k] = np.not_equal(ys, yk_hat).mean() - np.not_equal(ys, ys_hat).mean()
elif metric_type == 'error_relgap':
M[i, k] = (np.not_equal(ys, yk_hat).mean() - np.not_equal(ys, ys_hat).mean()) / np.not_equal(ys, ys_hat).mean()
if metric_type == 'error':
M[i, match_idx] = np.not_equal(ys, ys_hat).mean()
elif metric_type == 'error_relgap':
M[i, match_idx] = 0.0
return M
def group_decoupling_stats(self, parent_type = 'root', metric_type = 'error_gap', drop_missing = False, **kwargs):
X, y, group_names, group_values = self._parse_data_args(**kwargs)
splits = self.splits(kwargs.get('groups'))
if drop_missing:
splits = self._drop_missing_splits(splits, group_values)
leaves = self._decoupled_models
S = np.repeat(np.nan, len(splits))
if parent_type == 'self':
if metric_type == 'error':
for i, s in enumerate(splits):
group_labels = [t[1] for t in s]
idx = np.all(group_values == group_labels, axis = 1)
ys, Xs = y[idx], X[idx, :]
yhat = self.predict(Xs, group_names, group_values[idx, :])
S[i] = np.not_equal(ys, yhat).mean()
elif parent_type == 'root':
for i, s in enumerate(splits):
group_labels = [t[1] for t in s]
idx = np.all(group_values == group_labels, axis = 1)
ys, Xs = y[idx], X[idx, :]
yhat = self.predict(Xs, group_names, group_values[idx,:])
yhat_root = self.pooled_model.predict(Xs)
if metric_type == 'pvalue':
S[i], _ = exact_mcn_test(ys, yhat, yhat_root)
elif metric_type in ['error']:
S[i] = np.not_equal(ys, yhat_root).mean()
elif metric_type in ['error_gap', 'error_relgap']:
base_value = np.not_equal(ys, yhat).mean()
comp_value = np.not_equal(ys, yhat_root).mean()
metric_value = comp_value - base_value
if '_relgap' in metric_type:
metric_value = metric_value / base_value
S[i] = metric_value
return S
def partition_stat_matrix(self, metric_type = 'error', **kwargs):
"""
return a matrix computing
:param metric_type: 'error', 'error_gap', 'mistakes', 'mistake_gap', 'pvalue'
:param kwargs: can be either:
- data, groups
- data, groups, stat_field
- X, y, group_names, group_values
:return: n_parts x n_parts matrix M of statistics where:
- n_parts = # of leaves in partition
- M[i, j] = value of metric on data in leaf i using model from leaf j
"""
X, y, group_names, group_values = self._parse_data_args(**kwargs)
n_models = len(self)
M = np.empty(shape = (n_models, n_models))
# fast return for comparison based metrics
is_root = len(self) == 1
if is_root and metric_type not in ['error', 'mistakes']:
return np.array([np.nan])
if metric_type in ['error', 'error_gap']:
for i, leaf in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
ys, Xs = y[idx], X[idx, :]
for j, other in enumerate(self._decoupled_models):
M[i, j] = np.not_equal(ys, other.predict(Xs)).mean()
elif metric_type in ['mistakes', 'mistakes_gap']:
for i, leaf in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
ys, Xs = y[idx], X[idx, :]
for j, other in enumerate(self._decoupled_models):
M[i, j] = np.not_equal(ys, other.predict(Xs)).sum()
elif metric_type == 'pvalue':
for i, leaf in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
ys, Xs = y[idx], X[idx, :]
hs = leaf.predict(X[idx, :])
for j, other in enumerate(self._decoupled_models):
if i != j:
M[i, j], _ = exact_mcn_test(y = ys, yhat1 = hs, yhat2 = other.predict(Xs))
np.fill_diagonal(M, np.nan)
if '_gap' in metric_type:
M = M - np.diag(M)[:, None]
np.fill_diagonal(M, np.nan)
return M
def partition_sample_size_stats(self, metric_type = 'n', **kwargs):
_, y, group_names, group_values = self._parse_data_args(**kwargs)
S = np.zeros(len(self))
for i, l in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
if np.any(idx):
if metric_type in ('n', 'p'):
S[i] = np.sum(idx)
elif metric_type in ('n_pos', 'p_pos'):
S[i] = np.sum(y[idx] == 1)
elif metric_type in ('n_neg', 'p_neg'):
S[i] = np.sum(y[idx] == -1)
assert len(y) == np.sum(S)
if metric_type in ('p', 'p_pos', 'p_neg'):
S = S / len(y)
return S
def partition_decoupling_stats(self, metric_type = 'error_gap', parent_type = 'root', **kwargs):
X, y, group_names, group_values = self._parse_data_args(**kwargs)
S = np.repeat(np.nan, len(self))
for i, l in enumerate(self._decoupled_models):
idx = self.assigned_indices(i, group_names, group_values)
ys, Xs = y[idx], X[idx, :]
yhat = l.predict(Xs)
yhat_pooled = self.pooled_model.predict(Xs)
if metric_type == 'error':
if parent_type == 'self':
metric_value = np.not_equal(ys, yhat).mean()
elif parent_type == 'root':
metric_value = np.not_equal(ys, yhat_pooled).mean()
elif metric_type in ['error_gap', 'error_relgap']:
base_value =
|
np.not_equal(ys, yhat)
|
numpy.not_equal
|
import itertools
import sys
from time import time
import numpy as np
from scipy.signal import bspline
import pdb2sql
from deeprank.config import logger
from deeprank.tools import sparse
from deeprank.operate.pdb import get_atoms, get_residue_contact_atom_pairs
from deeprank.operate import hdf5data
try:
from tqdm import tqdm
except ImportError:
def tqdm(x):
return x
def logif(string, cond): return logger.info(string) if cond else None
class GridTools(object):
def __init__(self, variant_group, variant,
number_of_points=30, resolution=1.,
atomic_densities=None, atomic_densities_mode='ind',
feature=None, feature_mode='ind',
contact_distance=10.0,
cuda=False, gpu_block=None, cuda_func=None, cuda_atomic=None,
prog_bar=False, time=False, try_sparse=True):
"""Map the feature of a complex on the grid.
Args:
variant_group(str): name of the group of the variant in the HDF5 file.
variant (PdbVariantSelection): The variant
number_of_points(int, optional): number of points we want in
each direction of the grid.
resolution(float, optional): distance(in Angs) between two points.
atomic_densities(dict, optional): dictionary of element types with
their vdw radius, see deeprank.config.atom_vdw_radius_noH
atomic_densities_mode(str, optional): Mode for mapping
(deprecated must be 'ind').
feature(None, optional): Name of the features to be mapped.
By default all the features present in
hdf5_file['< variant_group > /features/] will be mapped.
feature_mode(str, optional): Mode for mapping
(deprecated must be 'ind').
contact_distance(float, optional): the dmaximum distance
between two contact atoms default 10.0Å.
cuda(bool, optional): Use CUDA or not.
gpu_block(tuple(int), optional): GPU block size to use.
cuda_func(None, optional): Name of the CUDA function to be
used for the mapping of the features.
Must be present in kernel_cuda.c.
cuda_atomic(None, optional): Name of the CUDA function to be
used for the mapping of the atomic densities.
Must be present in kernel_cuda.c.
prog_bar(bool, optional): print progression bar for
individual grid (default False).
time(bool, optional): print timing statistic for
individual grid (default False).
try_sparse(bool, optional): Try to store the matrix in
sparse format (default True).
"""
# variant and hdf5 file
self.variant_group = variant_group
self.variant_basename = variant_group.name
# variant query
self.variant = variant
# hdf5 file to strore data
self.hdf5 = self.variant_group.file
self.try_sparse = try_sparse
# parameter of the grid
if number_of_points is not None:
if not isinstance(number_of_points, list):
number_of_points = [number_of_points] * 3
self.npts = np.array(number_of_points).astype('int')
if resolution is not None:
if not isinstance(resolution, list):
resolution = [resolution] * 3
self.res = np.array(resolution)
# feature requested
self.atomic_densities = atomic_densities
self.feature = feature
# mapping mode
self.feature_mode = feature_mode
self.atomic_densities_mode = atomic_densities_mode
# cuda support
self.cuda = cuda
if self.cuda: # pragma: no cover
self.gpu_block = gpu_block
self.gpu_grid = [int(np.ceil(n / b))
for b, n in zip(self.gpu_block, self.npts)]
# cuda
self.cuda_func = cuda_func
self.cuda_atomic = cuda_atomic
# parameter of the atomic system
self.atom_xyz = None
self.atom_index = None
self.atom_type = None
# grid points
self.x = None
self.y = None
self.z = None
# grids for calculation of atomic densities
self.xgrid = None
self.ygrid = None
self.zgrid = None
# dictionaries of atomic densities
self.atdens = {}
# conversion from boh to angs for VMD visualization
self.bohr2ang = 0.52918
# contact distance to locate the interface
self.contact_distance = contact_distance
# progress bar
self.local_tqdm = lambda x: tqdm(x) if prog_bar else x
self.time = time
# if we already have an output containing the grid
# we update the existing features
_update_ = False
if self.variant_basename + '/grid_points/x' in self.hdf5:
_update_ = True
if _update_:
logif(f'\n=Updating grid data for {self.variant_basename}.',
self.time)
self.update_feature()
else:
logif(f'\n= Creating grid and grid data for {self.variant_basename}.',
self.time)
self.create_new_data()
################################################################
def create_new_data(self):
"""Create new feature for a given variant."""
# get the position/atom type .. of the complex
self.read_pdb()
# get the contact atoms and interface center
self.get_contact_center()
# define the grid
self.define_grid_points()
# save the grid points
self.export_grid_points()
# map the features
self.add_all_features()
# if we wnat the atomic densisties
self.add_all_atomic_densities()
# cloe the db file
self.sqldb._close()
################################################################
def update_feature(self):
"""Update existing feature in a variant."""
# get the position/atom type .. of the complex
# get self.sqldb
self.read_pdb()
# read the grid from the hdf5
grid = self.hdf5.get(self.variant_basename + '/grid_points/')
self.x, self.y, self.z = grid['x'][()], grid['y'][()], grid['z'][()]
# create the grid
self.ygrid, self.xgrid, self.zgrid = np.meshgrid(
self.y, self.x, self.z)
# set the resolution/dimension
self.npts = np.array([len(self.x), len(self.y), len(self.z)])
self.res = np.array(
[self.x[1] - self.x[0], self.y[1] - self.y[0], self.z[1] - self.z[0]])
# map the features
self.add_all_features()
# if we want the atomic densisties
self.add_all_atomic_densities()
# cloe the db file
self.sqldb._close()
################################################################
def read_pdb(self):
"""Create a sql databse for the pdb."""
self.sqldb = pdb2sql.interface(self.variant_group.attrs['pdb_path'])
# get the contact atoms and interface center
def get_contact_center(self):
"""Get the center of conact atoms."""
contact_atom_pairs = get_residue_contact_atom_pairs(self.sqldb,
self.variant.chain_id,
self.variant.residue_number,
self.variant.insertion_code,
self.contact_distance)
contact_atom_ids = set([])
for atom1, atom2 in contact_atom_pairs:
contact_atom_ids.add(atom1.id)
contact_atom_ids.add(atom2.id)
# get interface center
self.center_contact = np.mean(
np.array(self.sqldb.get('x,y,z', rowID=list(contact_atom_ids))), 0)
################################################################
# shortcut to add all the feature a
# and atomic densities in just one line
################################################################
# add all the residue features to the data
def add_all_features(self):
"""Add all the features to a given variant."""
# map the features
if self.feature is not None:
# map the residue features
dict_data = self.map_features(self.feature)
# save to hdf5 if specfied
t0 = time()
logif('-- Save Features to HDF5', self.time)
self.hdf5_grid_data(dict_data, 'Feature_%s' % (self.feature_mode))
logif(' Total %f ms' % ((time() - t0) * 1000), self.time)
# add all the atomic densities to the data
def add_all_atomic_densities(self):
"""Add all atomic densities."""
# if we wnat the atomic densisties
if self.atomic_densities is not None:
# compute the atomic densities
self.map_atomic_densities()
# save to hdf5
t0 = time()
logif('-- Save Atomic Densities to HDF5', self.time)
self.hdf5_grid_data(self.atdens, 'AtomicDensities_%s' %
(self.atomic_densities_mode))
logif(' Total %f ms' % ((time() - t0) * 1000), self.time)
################################################################
# define the grid points
# there is an issue maybe with the ordering
# In order to visualize the data in VMD the Y and X axis must be inverted ...
# I keep it like that for now as it should not matter for the CNN
# and maybe we don't need atomic denisties as features
################################################################
def define_grid_points(self):
"""Define the grid points."""
logif('-- Define %dx%dx%d grid ' %
(self.npts[0], self.npts[1], self.npts[2]), self.time)
logif('-- Resolution of %1.2fx%1.2fx%1.2f Angs' %
(self.res[0], self.res[1], self.res[2]), self.time)
halfdim = 0.5 * (self.npts * self.res)
center = self.center_contact
low_lim = center - halfdim
hgh_lim = low_lim + self.res * (np.array(self.npts) - 1)
self.x = np.linspace(low_lim[0], hgh_lim[0], self.npts[0])
self.y = np.linspace(low_lim[1], hgh_lim[1], self.npts[1])
self.z = np.linspace(low_lim[2], hgh_lim[2], self.npts[2])
# there is something fishy about the meshgrid 3d
# the axis are a bit screwy ....
# i dont quite get why the ordering is like that
self.ygrid, self.xgrid, self.zgrid = np.meshgrid(
self.y, self.x, self.z)
################################################################
# Atomic densities
# as defined in the paper about ligand in protein
################################################################
# compute all the atomic densities data
def map_atomic_densities(self, only_contact=True):
"""Map the atomic densities to the grid.
Args:
only_contact(bool, optional): Map only the contact atoms
Raises:
ImportError: Description
ValueError: if an unsupported mode is used
"""
mode = self.atomic_densities_mode
logif('-- Map atomic densities on %dx%dx%d grid (mode=%s)' %
(self.npts[0], self.npts[1], self.npts[2], mode), self.time)
# prepare the cuda memory
if self.cuda: # pragma: no cover
# try to import pycuda
try:
from pycuda import driver, compiler, gpuarray, tools
import pycuda.autoinit
except BaseException:
raise ImportError("Error when importing pyCuda in GridTools")
# book mem on the gpu
x_gpu = gpuarray.to_gpu(self.x.astype(np.float32))
y_gpu = gpuarray.to_gpu(self.y.astype(np.float32))
z_gpu = gpuarray.to_gpu(self.z.astype(np.float32))
grid_gpu = gpuarray.zeros(self.npts, np.float32)
# get the contact atoms
atoms_by_chain = {}
if only_contact:
contact_atom_pairs = get_residue_contact_atom_pairs(self.sqldb, self.variant.chain_id,
self.variant.residue_number, self.variant.insertion_code,
self.contact_distance)
for atom1, atom2 in contact_atom_pairs:
atoms_by_chain[atom1.chain_id] = atoms_by_chain.get(atom1.chain_id, []) + [atom1]
atoms_by_chain[atom2.chain_id] = atoms_by_chain.get(atom2.chain_id, []) + [atom2]
else:
for atom in get_atoms(self.sqldb):
atoms_by_chain[atom.chain_id] = atoms_by_chain.get(atom.chain_id, []) + [atom]
# Loop over the atom types:
for element_type, vdw_rad in self.local_tqdm(self.atomic_densities.items()):
t0 = time()
# Loop over the atoms:
for chain_id, atoms in atoms_by_chain.items():
if self.cuda: # if we use CUDA
# reset the grid
grid_gpu *= 0
# get the atomic densities
for atom in atoms:
if atom.element == element_type:
x0, y0, z0 = atom.position.astype(np.float32)
vdw = np.float32(vdw_rad)
self.cuda_atomic(vdw, x0, y0, z0,
x_gpu, y_gpu, z_gpu, grid_gpu,
block=tuple(self.gpu_block),
grid=tuple(self.gpu_grid))
atdens = grid_gpu.get()
else: # if we don't use CUDA
# init the grid
atdens = np.zeros(self.npts)
# run on the atoms
for atom in atoms:
if atom.element == element_type:
atdens += self.densgrid(atom.position, vdw_rad)
if mode == 'ind':
key = element_type
self.atdens[key] = atdens
else:
raise ValueError('Unsupported atomic density mode {}'.format(mode))
tgrid = time() - t0
logif(' Grid time %f ms' % (tgrid * 1000), self.time)
# compute the atomic denisties on the grid
def densgrid(self, center, vdw_radius):
"""Function to map individual atomic density on the grid.
The formula is equation (1) of the Koes paper
Protein-Ligand Scoring with Convolutional NN Arxiv:1612.02751v1
Args:
center (list(float)): position of the atoms
vdw_radius (float): vdw radius of the atom
Returns:
TYPE: np.array (mapped density)
"""
x0, y0, z0 = center
dd = np.sqrt((self.xgrid - x0)**2
+ (self.ygrid - y0)**2
+ (self.zgrid - z0)**2)
dgrid = np.zeros(self.npts)
index_shortd = dd < vdw_radius
index_longd = (dd >= vdw_radius) & (dd < 1.5 * vdw_radius)
dgrid[index_shortd] = np.exp(-2 * dd[index_shortd]**2 / vdw_radius**2)
dgrid[index_longd] = 4. / np.e**2 / vdw_radius**2 * dd[index_longd]**2 \
- 12. / np.e**2 / vdw_radius * dd[index_longd] + 9. / np.e**2
return dgrid
################################################################
# Residue or Atomic features
# read the file provided in input
# and map it on the grid
################################################################
@staticmethod
def _get_feature_row_position_values(row):
""" Extract metadata from an input xyz feature row.
The row format is: x y z [values]
Returns (triple): position(float list of length 3) and values(float list)
"""
position_dimension = 3
position = row[:position_dimension]
values = row[position_dimension:]
return position, values
@staticmethod
def _get_indicative_feature_key(feature_type_name, value_number=None):
""" Creates a key to be used within the grid feature group.
The format is: [type name]_[value number]
Returns (str): the key
"""
feature_name = feature_type_name
if value_number is not None:
feature_name += "_value%03d" % value_number
return feature_name
# map residue a feature on the grid
def map_features(self, featlist, transform=None):
"""Map individual feature to the grid.
For residue based feature the feature file must be of the format
chainID residue_name(3-letter) residue_number [values]
For atom based feature it must be
chainID residue_name(3-letter) residue_number atome_name [values]
Args:
featlist (list(str)): list of features to be mapped
transform (callable, optional): transformation of the feature (?)
Returns:
np.array: Mapped features
Raises:
ImportError: Description
ValueError: if an unsupported mode is used
"""
# declare the total dictionary
dict_data = {}
# prepare the cuda memory
if self.cuda: # pragma: no cover
# try to import pycuda
try:
from pycuda import driver, compiler, gpuarray, tools
import pycuda.autoinit
except BaseException:
raise ImportError("Error when importing pyCuda in GridTools")
# book mem on the gpu
x_gpu = gpuarray.to_gpu(self.x.astype(np.float32))
y_gpu = gpuarray.to_gpu(self.y.astype(np.float32))
z_gpu = gpuarray.to_gpu(self.z.astype(np.float32))
grid_gpu = gpuarray.zeros(self.npts, np.float32)
# loop over all the features required
for feature_name in featlist:
logger.debug('-- Map %s on %dx%dx%d grid ' % (feature_name, self.npts[0], self.npts[1], self.npts[2]))
# read the data
featgrp = self.variant_group['features']
if feature_name in featgrp.keys():
data = featgrp[feature_name][:]
else:
raise ValueError('feature %s not found in the file' % (feature_name))
# test if the transform is callable
# and test it on the first line of the data
# get the data on the first line
position_dimension = 3
logger.debug("data shape {}".format(data.shape))
if data.shape[0] != 0:
position, data_test = GridTools._get_feature_row_position_values(data[0])
logger.debug("position={}, data={}".format(position, data_test))
# define the length of the output
if transform is None:
nFeat = len(data_test)
elif callable(transform):
nFeat = len(transform(data_test))
else:
print('Error transform in map_feature must be callable')
return None
else:
nFeat = 1
logger.debug("placing {} features in {}".format(nFeat, feature_name))
# Initialize the dict that will eventually hold all the data:
if nFeat == 1:
fname = GridTools._get_indicative_feature_key(feature_name)
dict_data[fname] = np.zeros(self.npts)
else: # do we need that ?!
for iF in range(nFeat):
fname = GridTools._get_indicative_feature_key(feature_name, value_number=iF)
dict_data[fname] = np.zeros(self.npts)
# skip empty features
if data.shape[0] == 0:
continue
# rest the grid and get the x y z values
if self.cuda: # pragma: no cover
grid_gpu *= 0
# timing
tprocess = 0
tgrid = 0
# map all the features
for row in self.local_tqdm(data):
t0 = time()
# parse the row
pos, feat_values = GridTools._get_feature_row_position_values(row)
# postporcess the data
if callable(transform):
feat_values = transform(feat_values)
# handle the mode
if self.feature_mode == "diff":
raise ValueError("Unsupported feature mode {}".format(self.feature_mode))
else:
coeff = 1.0
tprocess += time() - t0
t0 = time()
# map this feature(s) on the grid(s)
if not self.cuda:
if nFeat == 1:
fname = GridTools._get_indicative_feature_key(feature_name)
dict_data[fname] += coeff * self.featgrid(pos, feat_values)
else:
for iF in range(nFeat):
fname = GridTools._get_indicative_feature_key(feature_name, iF)
dict_data[fname] += coeff * self.featgrid(pos, feat_values[iF])
# try to use cuda to speed it up
else: # pragma: no cover
if nFeat == 1:
x0, y0, z0 = pos.astype(np.float32)
alpha = np.float32(coeff * feat_values)
self.cuda_func(alpha,
x0, y0, z0,
x_gpu, y_gpu, z_gpu,
grid_gpu,
block=tuple(self.gpu_block),
grid=tuple(self.gpu_grid))
else:
raise ValueError('CUDA only possible for single-valued features')
tgrid += time() - t0
if self.cuda: # pragma: no cover
fname = GridTools._get_indicative_feature_key(feature_name)
dict_data[fname] = grid_gpu.get()
driver.Context.synchronize()
logger.debug(' Process time %f ms' % (tprocess * 1000))
logger.debug(' Grid time %f ms' % (tgrid * 1000))
logger.debug(" Returning data {}" % dict_data)
return dict_data
# compute the a given feature on the grid
def featgrid(self, center, value, type_='fast_gaussian'):
"""Map an individual feature (atomic or residue) on the grid.
Args:
center (list(float)): position of the feature center
value (float): value of the feature
type_ (str, optional): method to map
Returns:
np.array: Mapped feature
Raises:
ValueError: Description
"""
# shortcut for th center
x0, y0, z0 = center
sigma = np.sqrt(1. / 2)
beta = 0.5 / (sigma**2)
# simple Gaussian
if type_ == 'gaussian':
dd = np.sqrt((self.xgrid - x0)**2
+ (self.ygrid - y0)**2
+ (self.zgrid - z0)**2)
dd = value * np.exp(-beta * dd)
return dd
# fast gaussian
elif type_ == 'fast_gaussian':
cutoff = 5. * beta
dd = np.sqrt((self.xgrid - x0)**2
+ (self.ygrid - y0)**2
+ (self.zgrid - z0)**2)
dgrid = np.zeros(self.npts)
dgrid[dd < cutoff] = value * np.exp(-beta * dd[dd < cutoff])
return dgrid
# Bsline
elif type_ == 'bspline':
spline_order = 4
spl = bspline((self.xgrid - x0) / self.res[0], spline_order) \
* bspline((self.ygrid - y0) / self.res[1], spline_order) \
* bspline((self.zgrid - z0) / self.res[2], spline_order)
dd = value * spl
return dd
# nearest neighbours
elif type_ == 'nearest':
# distances
dx = np.abs(self.x - x0)
dy = np.abs(self.y - y0)
dz = np.abs(self.z - z0)
# index
indx = np.argsort(dx)[:2]
indy = np.argsort(dy)[:2]
indz = np.argsort(dz)[:2]
# weight
wx = dx[indx]
wx /= np.sum(wx)
wy = dy[indy]
wy /= np.sum(wy)
wz = dx[indz]
wz /= np.sum(wz)
# define the points
indexes = [indx, indy, indz]
points = list(itertools.product(*indexes))
# define the weight
W = [wx, wy, wz]
W = list(itertools.product(*W))
W = [np.sum(iw) for iw in W]
# put that on the grid
dgrid = np.zeros(self.npts)
for w, pt in zip(W, points):
dgrid[pt[0], pt[1], pt[2]] = w * value
return dgrid
# default
else:
raise ValueError(f'Options not recognized for the grid {type_}')
################################################################
# export the grid points for external calculations of some
# features. For example the electrostatic potential etc ...
################################################################
def export_grid_points(self):
"""export the grid points to the hdf5 file."""
hdf5data.store_grid_points(self.variant_group, self.x, self.y, self.z)
hdf5data.store_grid_center(self.variant_group, self.center_contact)
logger.info("store a grid for {}, centered at {}".format(str(self.variant), self.center_contact))
# save the data in the hdf5 file
@staticmethod
def _check_features(name, features):
""" Check the feature for values that could cause glitches.
Args:
features (np.array): raw feature values
"""
if np.any(np.isnan(features)):
raise ValueError("%s: NaN detected" % name)
if np.any(np.isinf(features)):
raise ValueError("%s: Infinity detected" % name)
def hdf5_grid_data(self, dict_data, data_name):
"""Save the mapped feature to the hdf5 file.
Args:
dict_data(dict): feature values stored as a dict
data_name(str): feature name
"""
hdf5data.store_grid_data(self.variant_group, data_name, dict_data, try_sparse=self.try_sparse)
for key in dict_data:
data = np.array(dict_data[key])
GridTools._check_features("%s[%s] from %s" % (data_name, key, str(self.variant)), data)
data_summary = "%s<{%f - %f}" % ("x".join([str(n) for n in data.shape]), np.min(data),
|
np.max(data)
|
numpy.max
|
#!/usr/local/bin/python3
"""
py_auc - python library for calculating the area under the curve (ROC, PR) of binary classifiers
author: <NAME> @ IBM
email: <EMAIL>
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from scipy.optimize import curve_fit
class AUC(object):
""" object for area under the curve (AUC) calculation
example:
import py_auc
sg = py_auc.Score_generator()
a = py_auc.AUC(sg.get_asDataFrame())
a.cal_auc_rank()
a.plot_ROC()
"""
def __init__(self, data=None, debug=False):
"""
initialize with scores and classes
input: data - panda DataFrame with "Score" and "Class" columns
"""
self._scores = []
self._classes = []
self._data = data
self._debug = debug
self._n0 = 0 # class 0
self._n1 = 0 # class 1
self._n = 0
self._spm = None
if (data is not None) and ("Score" in data.columns) and ("Class" in data.columns):
self.get_classes_scores(data["Class"], data["Score"])
def get_classes_scores(self, c, s):
""" add scores and classes """
if len(c) != len(s):
print('... dimension is not matches: score - %d class - %d'.format(len(s), len(c)))
return
self._scores = s
self._classes = c
self._n = len(self._scores)
self._n1 = np.count_nonzero(self._classes)
self._n0 = self._n - self._n1
self._prepare()
def get_scores(self, s0, s1):
""" add class 0 score and class 1 score separately """
self._n0 = len(s0)
self._n1 = len(s1)
self._n = self._n0 + self._n1
self._scores = np.zeros(self._n)
self._scores[:self._n0] = s0
self._scores[self._n0:] = s1
self._classes = np.ones(self._n)
self._classes[:self._n0] = 0
self._prepare()
def _prepare(self):
""" calculate rank """
self._data = pd.DataFrame()
self._data['Score'] = self._scores
self._data['Class'] = self._classes
self._spm = self._data.sort_values(by='Score', ascending=False)
self._spm['Rank'] = range(self._n+1)[1:]
self._spm['FPR'] = (self._spm['Class'] == 0).cumsum().values/self._n0 # FPR
self._spm['TPR'] = (self._spm['Class'] == 1).cumsum().values/self._n1 # TPR or recall
self._spm['Prec'] = (self._spm['Class'] == 1).cumsum().values/(np.arange(self._n) + 1) # precision
def cal_auc_rank(self, measure_time=False):
""" calculate area under ROC using rank algorithm """
if measure_time: start_time = time.time()
self._spm = self._data.sort_values(by='Score', ascending=False)
self._spm['Rank'] = np.arange(self._n+1)[1:]/self._n
mask = self._spm['Class'] == 0
auc = 0.5 + (self._spm[mask].Rank.mean() - self._spm[~mask].Rank.mean())
if measure_time:
return (auc, (time.time() - start_time))
else:
return (auc)
def cal_auc_bac(self, measure_time=False):
""" calculate area under ROC using rank algorithm """
if measure_time: start_time = time.time()
self._spm = self._data.sort_values(by='Score', ascending=False)
self._spm['FPR'] = (self._spm['Class'] == 0).cumsum().values/self._n0 # FPR
self._spm['TPR'] = (self._spm['Class'] == 1).cumsum().values/self._n1 # TPR or recall
auc = np.sum(self._spm['TPR'] + 1 - self._spm['FPR'])/self._n - 0.5
if measure_time:
return (auc, (time.time() - start_time))
else:
return (auc)
def cal_auc_trapz(self, measure_time=False):
""" calculate area under ROC using trapz function """
if measure_time: start_time = time.time()
self._spm = self._data.sort_values(by='Score', ascending=False)
x = (self._spm['Class'] == 0).cumsum().values/self._n0 # FPR
y = (self._spm['Class'] == 1).cumsum().values/self._n1 # TPR or recall
auc = np.trapz(y, x=x)
if measure_time:
return (auc, (time.time() - start_time))
else:
return (auc)
def cal_auc_sklearn(self, measure_time=False):
""" calculate area under ROC using scikit-learning """
if measure_time: start_time = time.time()
auc = roc_auc_score(self._classes, self._scores)
if measure_time:
return (auc, (time.time() - start_time))
else:
return (auc)
def cal_auprc_rank(self, measure_time=False):
""" calculate area under precision-recall curve using rank algorithm """
if measure_time: start_time = time.time()
rho = self._n1/self._n
self._spm = self._data.sort_values(by='Score', ascending=False)
p = (self._spm['Class'] == 1).cumsum().values/(np.arange(self._n) + 1) # precision
auprc = 0.5*rho*(1.0 + np.sum(p*p)/(self._n*rho*rho))
if measure_time:
return (auprc, (time.time() - start_time))
else:
return(auprc)
def cal_auprc_trapz(self, measure_time=False):
""" calculate area under precision-recall curve using trapz algorithm """
if measure_time: start_time = time.time()
self._spm = self._data.sort_values(by='Score', ascending=False)
y = (self._spm['Class'] == 1).cumsum().values/self._n1 # TPR or recall
p = (self._spm['Class'] == 1).cumsum().values/(np.arange(self._n) + 1) # precision
auprc = np.trapz(p, x=y)
if measure_time:
return (auprc, (time.time() - start_time))
else:
return(auprc)
def cal_auprc_sklearn(self, measure_time=False):
""" calculate area under PRC using scikit-learning """
if measure_time: start_time = time.time()
auprc = average_precision_score(self._classes, self._scores)
if measure_time:
computetime = time.time() - start_time
return (auprc, computetime)
else:
return auprc
def plot_rank(self, sampling=10, filename=''):
""" plot rank vs class """
self._prepare()
cmatrix = self._spm['Class'].values.reshape(sampling, -1)
prob = cmatrix.mean(axis=1)
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
ax0 = axs[0]
ax0.plot(prob, '.')
ax0.set_xlabel('Sampled Rank r\'')
ax0.set_ylabel('P(1|r\')')
ax1 = axs[1]
ax1.plot(self._spm['Rank'], self._spm['TPR'], label='TPR')
ax1.plot(self._spm['Rank'], self._spm['FPR'], '--', label='FPR')
ax1.set_xlabel('Rank r')
ax1.set_ylabel('TPR(r), FPR(r)')
ax1.legend()
ax2 = axs[2]
ax2.plot(self._spm['Rank'], self._spm['Prec'], label='prec')
bac = (self._spm['TPR'].values + 1.0 - self._spm['FPR'].values)/2.0
ax2.plot(self._spm['Rank'], bac, '--', label='bac')
ax2.set_xlabel('Rank r')
ax2.set_ylabel('Precision(r), bac(r)')
ax2.legend()
if filename == '': filename = 'rank_plot.pdf'
plt.savefig(filename, dpi=150)
def plot_ROC(self, bins=50, filename=''):
""" calculate ROC curve (receiver operating characteristic curve) """
self._prepare()
auc = np.trapz(self._spm['TPR'], x=self._spm['FPR'])
print('AUC (area under the ROC curve): {0:8.3f}'.format(auc))
auc = np.trapz(self._spm['Prec'], x=self._spm['TPR'])
print('AUPRC (area under the PRC curve): {0:8.3f}'.format(auc))
# ROC plot
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(15,4))
ax0 = axs[0]
ax0.plot(self._spm['FPR'], self._spm['TPR'], label='ROC')
ax0.set_xlabel('FPR')
ax0.set_ylabel('TPR')
#ax0.set_xlim(0, 1.1)
#ax0.set_ylim(0, 1.1)
# PR plot
ax1 = axs[1]
ax1.plot(self._spm['TPR'], self._spm['Prec'], label='PRC')
ax1.set_xlabel('Recall (TPR)')
ax1.set_ylabel('Precision')
#ax1.set_xlim(0, 1.1)
#ax1.set_ylim(0, 1.1)
#ax1.set_title('Precision-Recall Curve')
# score distribution
ax2 = axs[2]
sns.histplot(self._spm.loc[self._spm['Class']==0, 'Score'], bins=bins, kde=False, rug=True, label='Class 0')
sns.histplot(self._spm.loc[self._spm['Class']==1, 'Score'], bins=bins, kde=False, rug=True, label='Class 1')
ax2.legend(loc='upper right')
ax2.set_xlabel('Scores')
ax2.set_ylabel('#')
if filename == '':
filename = 'auc_summary.pdf'
plt.savefig(filename, dpi=150)
plt.show()
class Score_generator(object):
""" two class score generator
example:
import py_auc
sg = py_auc.Score_generator()
sg.set0('gaussian', 0, 1, 1000)
sg.set1('gaussian', 3, 1, 1000)
OR
sg.set(n=10000, rho=0.5, kind0='gaussian', mu0=0, std0=2, kind1='gaussian', mu1=1, std1=2)
res = sg.get_classProbability(sampleSize=100, sampleN=100, measure_time=False)
lambda = sg.get_lambda(cprob=res)
sg.plot_prob(cprob=res)
sg.plot_rank(cprob=res)
"""
def __init__(self):
""" """
self._kind0 = ''
self._kind1 = ''
self._mu0 = 0
self._mu1 = 0
self._std0 = 0
self._std1 = 0
self._n0 = 100
self._n1 = 100
self._n = self._n0 + self._n1
self._rho = self._n1/self._n
self._s0 = []
self._s1 = []
self._prob = []
self._sampleN = 0
self._sampling = []
self._fit_vals = []
self._debug = False
def _generate(self, kind, mu, std, n, k):
""" set parameters of class """
if kind.lower() not in ['uniform', 'gaussian', 'triangle', 'gfamily']:
kind = 'uniform'
if kind.lower() == 'uniform':
temp = np.random.uniform(low=mu-std, high=mu+std, size=n)
shapex = np.array([mu-2*std, mu-std, mu-std, mu+std, mu+std, mu+2*std])
shapey = np.array([0, 0, 1/(2*std), 1/(2*std), 0, 0])
elif kind.lower() == 'gaussian':
temp = np.random.normal(loc=mu, scale=std, size=n)
shapex = np.linspace(mu-2.5*std, mu+2.5*std, 50)
shapey = 1/(std * np.sqrt(2 * np.pi)) * np.exp( - (shapex - mu)**2 / (2 * std**2))
elif kind.lower() == 'triangle':
#if self._debug: print('... generating gaussian distribution with n={}, mu={}, sigma={}'.format(n, mu, std))
temp = np.random.triangular(mu-std, mu, mu+std, size=n)
shapex = np.array([mu-2*std, mu-std, mu, mu+std, mu+2*std])
shapey = np.array([0, 0, 1/std, 0, 0])
elif kind.lower() == 'gfamily':
#if self._debug: print('... generating gaussian family with k = {}, n={}, mu={}, sigma={}'.format(k, n, mu, std))
temp = gaussfamily(k=k, n=n, mu=mu, std=std, show=False)
if k == 1:
std_k = std * np.sqrt(3*k)
shapex = np.array([mu-2*std_k, mu-std_k, mu-std_k, mu+std_k, mu+std_k, mu+2*std_k])
shapey = np.array([0, 0, 1/(2*std_k), 1/(2*std_k), 0, 0])
elif k == 2:
std_k = std * np.sqrt(3*k)
shapex = np.array([mu-2*std_k, mu-std_k, mu, mu+std_k, mu+2*std_k])
shapey = np.array([0, 0, 1/std_k, 0, 0])
else:
shapex = np.linspace(mu-2.5*std, mu+2.5*std, 50)
shapey = 1/(std * np.sqrt(2 * np.pi)) * np.exp( - (shapex - mu)**2 / (2 * std**2))
return temp, kind, mu, std, n, shapex, shapey
def set0(self, kind, mu, std, n, k=1):
"""
kind : ['uniform', 'gaussian', 'triangle', 'gfamily']
mu : mean
std : standard deviation
n : number of samples
"""
self._s0, self._kind0, self._mu0, self._std0, self._n0, self._shapex0, self._shapey0 = self._generate(kind, mu, std, n, k)
self._n = self._n0 + self._n1
self._rho = float(self._n1/self._n)
self._k0 = k
def set1(self, kind, mu, std, n, k=1):
"""
kind : ['uniform', 'gaussian', 'triangle', 'gfamily']
mu : mean
std : standard deviation
n : number of samples
"""
self._s1, self._kind1, self._mu1, self._std1, self._n1, self._shapex1, self._shapey1 = self._generate(kind, mu, std, n, k)
self._n = self._n0 + self._n1
self._rho = float(self._n1/self._n)
self._k1 = k
def set(self, n=10000, rho=0.5, kind0='gaussian', mu0=0, std0=2, k0=1, kind1='gaussian', mu1=1, std1=2, k1=1):
""" generate score distribution """
n1 = int(n*rho)
n0 = n - n1
if self._debug: print('... generating {} positive class'.format(n1))
self.set1(kind1, mu1, std1, n1, k=k1)
if self._debug: print('... generating {} negative class'.format(n0))
self.set0(kind0, mu0, std0, n0, k=k0)
def get(self):
""" get scores """
return [self._s0, self._s1]
def get_asDataFrame(self):
""" get scores as DataFrame """
scores = np.zeros(self._n)
scores[:self._n0] = self._s0
scores[self._n0:] = self._s1
classes = np.ones(self._n)
classes[:self._n0] = 0
return pd.DataFrame({'Score': scores, 'Class': classes})
def get_randomSample(self, n):
""" get sample of scores """
temp = self.get_asDataFrame()
return temp.sample(n)
def get_classProbability(self, sampleSize=100, sampleN=100, measure_time=False):
""" calculate probability of class at given rank r """
if measure_time: start_time = time.time()
temp = self.get_asDataFrame()
temp0 = temp[temp['Class'] == 0]
temp1 = temp[temp['Class'] == 1]
n1 = int(sampleSize*self._rho)
n0 = sampleSize - n1
res = pd.DataFrame()
res['Rank'] = range(sampleSize+1)[1:]
for i in range(sampleN):
a = pd.concat([temp0.sample(n0), temp1.sample(n1)]).sort_values(by='Score', ascending=False)
res['Class_{}'.format(i)] = a['Class'].values
self._prob = res.values[:, 1:sampleN+1].mean(axis=1)
res['P(1|r)'] = self._prob
res['P(0|r)'] = 1 - self._prob
res['TPR'] = np.cumsum(res['P(1|r)'])/n1
res['FPR'] = np.cumsum(res['P(0|r)'])/n0
res['Prec'] = np.cumsum(res['P(1|r)'])/res['Rank']
res['bac'] = 0.5*(res['TPR'] + 1.0 - res['FPR'])
self._sampling = res
self._sampleN = sampleN
self._sampleSize = sampleSize
self._sampleN0 = n0
self._sampleN1 = n1
self._auc =
|
np.sum(res['P(0|r)']*res['Rank']/n0 - res['P(1|r)']*res['Rank']/n1)
|
numpy.sum
|
# -*- coding: utf-8 -*-
# After execution of the main pipeline, files are organized in a variety of
# locations, and some information is spread across multiple files. This
# gathers various charts and creates tables using provided data, storing it
# all in folders organized by paper section.
import utils
import graphUtils
import numpy as np
import scipy.stats as stats
import os
import re
import subprocess
import mainParams as mp
from groupWords import getWordGroupsRangeTest
# ==============================================================================
# ==============================================================================
# Color constants for heat-map tables
COLOR_BLUE = (66, 134, 244)
COLOR_ORANGE = (244, 179, 66)
COLOR_GRAY = (239, 239, 239)
# given a start color, an end color, and a current value within a min-max
# range, find a linear interpolation between start and end color
def colorConvert(min, max, current, startColor, endColor):
sr, sg, sb = startColor
er, eg, eb = endColor
scale = (current - min) / (max - min)
red = sr + scale*(er-sr)
green = sg + scale*(eg-sg)
blue = sb + scale*(eb-sb)
return (red/255, green/255, blue/255)
# create tables with information evaluating given metrics
def makeMetricEvalTables(suffix, topStr, comparableTopStr, topNum, poetryNum, comparableNum, simMetrics, baseFolder):
baseScoreInfo = [
("Cosine", 0),
("Burrows' Delta", 0),
]
bestMetricName = "Jensen-Shannon (250)" #Jensen-Shannon+p
bestMetricSigWork = []
bestMetricSigAuthor = []
evalTableOutput = []
evalTableOutput.append("""\\begin{table}[!bt]
\\centering
\\def\\arraystretch{1}
\\begin{tabular}{| l | r | r |}
\\hline
& \\multicolumn{2}{c|}{\\textbf{Percentage of segments most similar to a segment...}} \\\\
\\textbf{Metric}& \\textbf{from the same work} & \\textbf{by the same author} \\\\\\hline
""")
sameWorkTableOutput = []
sameAuthorTableOutput = []
temp = """\\begin{table}[!bt]
\\centering
\\def\\arraystretch{1}
\\begin{tabular}{| l | c | c | c |}
\\hline
"""
sameWorkTableOutput.append(temp)
sameAuthorTableOutput.append(temp)
temp = "& & \\textbf{Top %d +} & \\\\" % (topNum)
sameWorkTableOutput.append(temp)
sameAuthorTableOutput.append(temp)
temp = "\\textbf{Metric}& \\textbf{Top %d} & \\textbf{Top %d in Poetry} & \\textbf{Top %d} \\\\\\hline" % (topNum, poetryNum, comparableNum)
sameWorkTableOutput.append(temp)
sameAuthorTableOutput.append(temp)
workSigReport = []
authorSigReport = []
# & \\textbf{Sim to another work} & \\textbf{Closest to diff author} & \\textbf{Median}
# Get the list of authors and works the metric got correct
scoreLists = {}
for simMetric in simMetrics:
dir, metricName = simMetric
scoreLists[metricName] = {}
for i, params in enumerate([(False, False), (True, False), (False, True), ]):
name = metricName
addP, comparable = params
metricTopStr = topStr
if addP:
metricTopStr += "+p"
name += "+p"
# look at comparable number of non-poetry words
elif comparable:
metricTopStr = comparableTopStr
name += " (%d)" % comparableNum
else:
name += " (%d)" % topNum
fname = "output/greek/no_split/%s/%s/metric/Books/scores.json" % (metricTopStr, dir)
scores = utils.getContent(fname, True)
scoreLists[metricName][i] = scores
scoreLists[metricName][i]["name"] = name
baseScores = []
for bsi in baseScoreInfo:
baseScoreMetric, baseScoreIndex = bsi
baseScores.append(scoreLists[baseScoreMetric][baseScoreIndex])
# Create a table of the information using the provided scores
for metricName in scoreLists:
cell2 = "\\textbf{%s}" % (metricName)
cell3 = "\\textbf{%s}" % (metricName)
for i in scoreLists[metricName]:
currentScores = scoreLists[metricName][i]
authorScores = currentScores["author"]
workScores = currentScores["work"]
name = currentScores["name"]
sameWork = "%.2f%%" % (100*np.mean(workScores))
sameAuth = "%.2f%%" % (100*np.mean(authorScores))
# sameWork = "%.2f%%, (%d/%d)" % (100*np.mean(workScores), np.sum(workScores), len(workScores))
# sameAuth = "%.2f%%, (%d/%d)" % (100*np.mean(authorScores), np.sum(authorScores), len(authorScores))
# cell = "%s & %s & %s & %s & %s & %s" % (name, sameAuth, sameWork, otherWork, diffAuthClosest, median)
cell = "%s & %s & %s" % (name, sameWork, sameAuth)
cell = cell.replace("%", "\\%")
evalTableOutput.append("%s\\\\\\hline" % cell)
cell2 += " & %s" % (sameWork) # work_p
cell3 += " & %s" % (sameAuth) # , author_p)
for j, baseScore in enumerate(baseScores):
a = baseScore["work"]
b = currentScores["work"]
work_t, work_p = stats.ttest_rel(a, b)
workSigReport.append(name)
# Degrees of freedom
df = len(b) - 1
workSig = " (M=%.3f, SD=%.3f) t(%d)=%.3f, p=%.3e" % (np.mean(b), np.std(b), df, work_t, work_p)
workSigReport.append(workSig)
a = baseScore["author"]
b = currentScores["author"]
author_t, author_p = stats.ttest_rel(a, b)
authorSigReport.append(name)
# Degrees of freedom
df = len(b) - 1
authorSig = " (M=%.3f, SD=%.3f) t(%d)=%.3f, p=%.3e" % (np.mean(b), np.std(b), df, author_t, author_p)
authorSigReport.append(authorSig)
if (name == bestMetricName or name == baseScore["name"]):
bestMetricSigWork.append("%s vs %s" % (name, baseScore["name"]))
bestMetricSigWork.append(workSig)
bestMetricSigAuthor.append("%s vs %s" % (name, baseScore["name"]))
bestMetricSigAuthor.append(authorSig)
#print(" Author: t-statistic = %6.3f pvalue = %f" % stats.ttest_rel(a, b))
# Significance notes
if (j == 0):
if (work_p < 0.01):
cell2 += "\\textbf{†}"
elif (work_p < 0.05):
cell2 += "\\textbf{*}"
if (author_p < 0.01):
cell3 += "\\textbf{†}"
elif (author_p < 0.05):
cell3 += "\\textbf{*}"
else:
if (work_p < 0.01):
cell2 += "\\textbf{‡}"
if (author_p < 0.01):
cell3 += "\\textbf{‡}"
cell2 = cell2.replace("%", "\\%")
sameWorkTableOutput.append("%s\\\\\\hline" % cell2)
cell3 = cell3.replace("%", "\\%")
sameAuthorTableOutput.append("%s\\\\\\hline" % cell3)
evalTableOutput.append("""
\\end{tabular}
\\caption{How well similarity metrics identify whether two segments come from the same work or the same author.}
\\label{table:metric_eval}
\\end{table}
""")
utils.safeWrite("%smetric/extraInfo/metricEvalTable%s.tex" % (baseFolder, suffix), "\n".join(evalTableOutput))
sameWorkTableOutput.append("\\end{tabular}")
sameWorkTableOutput.append("\\caption[How well similarity metrics based on a given set of words identify whether two segments come from the same work.]{")
sameWorkTableOutput.append("How well similarity metrics based on a given set of words identify whether two segments come from the same work. \\newline")
sameWorkTableOutput.append("†: Results very significant (p < 0.01) when compared to %s. \\newline" % baseScores[0]["name"])
sameWorkTableOutput.append("*: Results significant (p < 0.05) when compared to %s. \\newline" % baseScores[0]["name"])
sameWorkTableOutput.append("‡: Results very significant (p < 0.01) when compared to %s. " % baseScores[1]["name"])
sameWorkTableOutput.append("}")
sameWorkTableOutput.append("\\label{table:metric_eval_work}")
sameWorkTableOutput.append("\\end{table}")
utils.safeWrite("%smetric/sameWorkEvalTable%s.tex" % (baseFolder, suffix), "\n".join(sameWorkTableOutput))
sameAuthorTableOutput.append("\\end{tabular}")
sameAuthorTableOutput.append("\\caption[How well similarity metrics based on a given set of words identify whether two segments come from the same author.]{")
sameAuthorTableOutput.append("How well similarity metrics based on a given set of words identify whether two segments come from the same author. \\newline")
sameAuthorTableOutput.append("†: Results very significant (p < 0.01) when compared to %s. \\newline" % baseScores[0]["name"])
sameAuthorTableOutput.append("*: Results significant (p < 0.05) when compared to %s. \\newline" % baseScores[0]["name"])
sameAuthorTableOutput.append("‡: Results very significant (p < 0.01) when compared to %s. " % baseScores[1]["name"])
sameAuthorTableOutput.append("}")
sameAuthorTableOutput.append("\\label{table:metric_eval_author}")
sameAuthorTableOutput.append("\\end{table}")
utils.safeWrite("%smetric/sameAuthorEvalTable%s.tex" % (baseFolder, suffix), "\n".join(sameAuthorTableOutput))
sigReport = "Work:\n" + ("\n".join(bestMetricSigWork)) + "\n\n-------------\n\nAuthor:\n" + ("\n".join(bestMetricSigAuthor))
utils.safeWrite("%smetric/bestMetricSignificance%s.txt" % (baseFolder, suffix), sigReport)
# utils.safeWrite("%smetric/bestMetricSignificanceWork%s.txt" % (baseFolder, suffix), "\n".join(bestMetricSigWork))
# utils.safeWrite("%smetric/bestMetricSignificanceAuthor%s.txt" % (baseFolder, suffix), "\n".join(bestMetricSigAuthor))
utils.safeWrite("%smetric/extraInfo/metricSignificanceReportWork%s.txt" % (baseFolder, suffix), "\n".join(workSigReport))
utils.safeWrite("%smetric/extraInfo/metricSignificanceReportAuthor%s.txt" % (baseFolder, suffix), "\n".join(authorSigReport))
# create tables with information evaluating performance of each metric with/without
# smoothing and remainder words
def makeMetricInternalTables(suffix, topStr, simMetrics, baseFolder):
metricInternalTables = []
for simMetric in simMetrics:
dir, metricName = simMetric
# skip Jensen-Shannon
if metricName == "Jensen-Shannon":
continue
tableOutput = []
temp = """
\\begin{table}[!bt]
\\centering
\\def\\arraystretch{1}
\\begin{tabular}{| l | c | c | c |}
\\hline
"""
tableOutput.append(temp)
temp = "\\textbf{Metric Options} & \\textbf{Author} & \\textbf{Work} & \\textbf{Total} \\\\\\hline"
tableOutput.append(temp)
workSigReport = []
authorSigReport = []
totalSigReport = []
# & \\textbf{Sim to another work} & \\textbf{Closest to diff author} & \\textbf{Median}
metricOptions = [
("Baseline", "-remainder-smoothed"),
("+1 Smoothing", "-remainder+smoothed"),
("Remainder", "+remainder-smoothed"),
("Both", "+remainder+smoothed")
]
# Get the list of authors and works the metric got correct
scoreLists = {}
for _, opt in metricOptions:
scoreLists[opt] = {}
name = opt
# Use Poetry Words
metricTopStr = topStr
fname = "output/greek/no_split/%s/%s/metric%s/Books/scores.json" % (metricTopStr, dir, opt)
scores = utils.getContent(fname, True)
scoreLists[opt] = scores
scoreLists[opt]["name"] = name
baseScore = scoreLists["-remainder-smoothed"]
# baseScores = []
# for bsi in baseScoreInfo:
# baseScoreMetric, baseScoreIndex = bsi
# baseScores.append(scoreLists[baseScoreMetric][baseScoreIndex])
# Create a table of the information using the provided scores
for optName, opt in metricOptions:
cell = "\\textbf{%s}" % (optName)
currentScores = scoreLists[opt]
authorScores = currentScores["author"]
workScores = currentScores["work"]
name = currentScores["name"]
sameWork = "%.2f%%, (%d/%d)" % (100*np.mean(workScores), np.sum(workScores), len(workScores))
sameAuth = "%.2f%%, (%d/%d)" % (100*np.mean(authorScores), np.sum(authorScores), len(authorScores))
all = np.concatenate((workScores, authorScores))
total = "%.2f%%, (%d/%d)" % (100*np.mean(all), np.sum(all), len(all))
wrk = " & %s" % (sameWork)
auth = " & %s" % (sameAuth)
tot = " & %s" % (total)
# Calculate significance
a = baseScore["work"]
b = currentScores["work"]
work_t, work_p = stats.ttest_rel(a, b)
workSigReport.append(name)
# Degrees of freedom
df = len(b) - 1
workSig = " (M=%.3f, SD=%.3f) t(%d)=%.3f, p=%.3e" % (np.mean(b), np.std(b), df, work_t, work_p)
workSigReport.append(workSig)
a = baseScore["author"]
b = currentScores["author"]
author_t, author_p = stats.ttest_rel(a, b)
authorSigReport.append(name)
# Degrees of freedom
df = len(b) - 1
authorSig = " (M=%.3f, SD=%.3f) t(%d)=%.3f, p=%.3e" % (np.mean(b), np.std(b), df, author_t, author_p)
authorSigReport.append(authorSig)
a =
|
np.concatenate((baseScore["work"], baseScore["author"]))
|
numpy.concatenate
|
#!/usr/bin/env python3
from flask import Blueprint, Response, render_template, request, session
from ezprobs.geometry import area_circle
from ezprobs.hydraulics import pipe_loss, local_loss
from ezprobs.problems import Parameter, Plot
from ezprobs.units import M, CM, MM, M3PS, KINEMATIC_VISCOSITY, GRAVITY
from ezprobs.dict import DICT_GER, DICT_ENG
from io import BytesIO
from math import sqrt
from scipy.optimize import fsolve
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2021 <NAME>"
__license__ = "MIT"
__email__ = "<EMAIL>"
bp = Blueprint("pressure_pipe_02", __name__)
def compute_solution():
d = 5 * CM
ha = 150 * CM
hb = 20 * CM
hout = 30 * CM
l = 2 * M
k = 0.3 * MM
nu_entry = 0.5
scale = 1 # over scaling velocity head for better display
q_initial = 3 * 10 ** -3 * M3PS
if request.method == "POST":
d = float(request.form["d"]) * MM
hb = float(request.form["hb"]) * CM
a = area_circle(d / 2)
if hb == ha:
q = 0
elif hb < hout:
q = fsolve(
lambda q: hout
+ (q / a) ** 2 / (2 * GRAVITY)
+ local_loss(nu_entry, a, q)
+ pipe_loss(l, a, k, d, q)
- ha,
1,
)[0]
else:
q = fsolve(
lambda q: hb
+ (q / a) ** 2 / (2 * GRAVITY)
+ local_loss(nu_entry, a, q)
+ pipe_loss(l, a, k, d, q)
- ha,
1,
)[0]
v = q / a
distances = np.array([0, l])
x =
|
np.cumsum(distances)
|
numpy.cumsum
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import truncnorm
np.random.seed(42)
ACCEPT = "accept"
DECLINE = "decline"
REVIEW = "review"
ACCEPT_PRIORITY = 1
REVIEW_PRIORITY = 2
DECLINE_PRIORITY = 3
REVIEW_PRIORITY_2 = 4
ACCEPT_PRIORITY_2 = 5
ACCEPT_PRIORITY_3 = 6
REVIEW_PRIORITY_3 = 7
DECLINE_PRIORITY_2 = 8
REVIEW_PRIORITY_4 = 9
ACCEPT_PRIORITY_4 = 10
number_transactions = 225000
fraud_rate = 0.05
number_fraud_transactions = int(number_transactions * fraud_rate)
number_legit_transactions = number_transactions - number_fraud_transactions
positive_labels = np.ones(number_fraud_transactions, dtype=int)
negative_labels = np.zeros(number_legit_transactions, dtype=int)
labels = np.concatenate((positive_labels, negative_labels), axis=0)
number_accept_rules = 7
number_review_rules = 30
number_decline_rules = 30
distribution_min = 0
distribution_max = number_transactions
def sampleFromNormal(mean, std):
sample = -1
global number_transactions
while sample < 0 | sample > number_transactions:
sample = np.random.normal(mean, std, 1)
return sample
def get_truncated_normal(mean=0, sd=1, low=0, upp=number_transactions):
return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
def plot_Distribution(dist, title, mu, sigma):
plt.figure()
s = dist.rvs(1000)
count, bins, ignored = plt.hist(s, 60, density=True)
plt.title(title)
did_plots = False
accept_rules_distribution_mean = number_transactions / 5
accept_rules_distribution_std = number_transactions / 10
trunced_accept_support = get_truncated_normal(
mean=accept_rules_distribution_mean,
sd=accept_rules_distribution_std,
low=0,
upp=number_transactions,
)
plot_Distribution(
trunced_accept_support,
"trunced_accept_support",
accept_rules_distribution_mean,
accept_rules_distribution_std,
)
trunced_accept_accuracy = get_truncated_normal(mean=3 / 4, sd=1 / 5, low=0, upp=1)
plot_Distribution(trunced_accept_accuracy, "trunced_accept_accuracy", 3 / 4, 1 / 5)
decline_rules_distribution_mean = number_transactions * 0.0001
decline_rules_distribution_std = number_transactions * 0.001
trunced_decline_support = get_truncated_normal(
mean=decline_rules_distribution_mean,
sd=decline_rules_distribution_std,
low=0,
upp=number_transactions,
)
plot_Distribution(
trunced_decline_support,
"trunced_decline_support",
decline_rules_distribution_mean,
decline_rules_distribution_std,
)
trunced_decline_accuracy = get_truncated_normal(mean=1 / 6, sd=1 / 20, low=0, upp=1)
plot_Distribution(trunced_decline_accuracy, "trunced_decline_accuracy", 1 / 6, 1 / 20)
class Rule:
def __init__(self, labels, action=DECLINE):
number_positive_in_dataset = np.sum(labels == 1)
number_negative_in_dataset = np.sum(labels == 0)
diff_negative = -1
diff_positive = -1
if action == ACCEPT:
self.priotity = np.random.choice(
[
ACCEPT_PRIORITY,
ACCEPT_PRIORITY_2,
ACCEPT_PRIORITY_3,
ACCEPT_PRIORITY_4,
],
1,
p=[0.3, 0.3, 0.2, 0.2],
)[0]
while (diff_negative < 0) | (diff_positive < 0):
self.support_number = int(trunced_accept_support.rvs())
self.number_correct_triggers = int(
trunced_accept_accuracy.rvs() * self.support_number
)
number_of_positive_triggers = (
self.support_number - self.number_correct_triggers
)
number_of_negative_triggers = self.number_correct_triggers
diff_positive = number_positive_in_dataset - number_of_positive_triggers
diff_negative = number_negative_in_dataset - number_of_negative_triggers
else:
if action == DECLINE:
self.priotity = np.random.choice(
[DECLINE_PRIORITY, DECLINE_PRIORITY_2], 1, p=[0.6, 0.4]
)[0]
self.priotity = DECLINE_PRIORITY
else:
self.priotity = np.random.choice(
[
REVIEW_PRIORITY,
REVIEW_PRIORITY_2,
REVIEW_PRIORITY_3,
REVIEW_PRIORITY_4,
],
1,
p=[0.3, 0.3, 0.2, 0.2],
)[0]
while (diff_negative < 0) | (diff_positive < 0):
self.support_number = int(trunced_decline_support.rvs())
self.number_correct_triggers = int(
trunced_decline_accuracy.rvs() * self.support_number
)
number_of_negative_triggers = (
self.support_number - self.number_correct_triggers
)
number_of_positive_triggers = self.number_correct_triggers
diff_negative = number_negative_in_dataset - number_of_negative_triggers
diff_positive = number_positive_in_dataset - number_of_positive_triggers
def generateColumnsRuleTriggersAccept(rule, labels):
positive_index_labels = np.squeeze(np.where(labels == 1))
negative_index_labels = np.squeeze(np.where(labels == 0))
number_negative_triggered = rule.number_correct_triggers
number_positive_triggered = rule.support_number - number_negative_triggered
trigered_positive_indexes = np.random.choice(
positive_index_labels, number_positive_triggered
)
trigered_negative_indexes = np.random.choice(
negative_index_labels, number_negative_triggered
)
triggers_vector = np.full(
|
np.shape(labels)
|
numpy.shape
|
"""
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: <NAME> <<EMAIL>>
This package is distributed under New BSD license.
"""
import numpy as np
EPS = np.finfo(float).eps # small number to avoid division by zero
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def finite_difference(parameters, fun=None, dx=1e-6):
"""
Compute gradient using central difference
:param parameters: point at which to evaluate gradient
:param fun: function handle to use for finite difference
:param dx: finite difference step
:return: dy: the derivative of fun with respect to x
"""
grads = dict()
for key in parameters.keys():
x = np.copy(parameters[key])
n, p = x.shape
dy = np.zeros((n, p))
for i in range(0, n):
for j in range(0, p):
# Forward step
parameters[key][i, j] = x[i, j] + dx
y_fwd = fun(parameters)
parameters[key] = np.copy(x)
# Backward step
parameters[key][i, j] = x[i, j] - dx
y_bwd = fun(parameters)
parameters[key] = np.copy(x)
# Central difference
dy[i, j] = np.divide(y_fwd - y_bwd, 2 * dx)
grads[key] = dy
return grads
# ------------------------------------ O P T I M I Z E R C L A S S ---------------------------------------------------
class Optimizer(object):
@property
def optimum(self):
return self._optimum_design
@property
def current_design(self):
return self._current_design
def search_direction(self):
return self._search_direction
@property
def cost_history(self):
return self._cost_history
@property
def design_history(self):
return self._design_history
@property
def cost(self):
return self._current_cost
def __init__(self, **kwargs):
self.learning_rate = 0.1
self.beta_1 = 0.9
self.beta_2 = 0.99
self.user_cost_function = None
self.user_grad_function = None
self._current_design = None
self._previous_design = None
self._search_direction = None
self._cost_history = []
self._design_history = []
self._optimum_design = None
self._current_cost = None
self._current_iteration = 0
self.initial_guess = None
for name, value in kwargs.items():
setattr(self, name, value)
@classmethod
def initialize(
cls,
initial_guess,
cost_function,
grad_function=None,
learning_rate=0.05,
beta1=0.9,
beta2=0.99,
):
attributes = {
"user_cost_function": cost_function,
"user_grad_function": grad_function,
"learning_rate": learning_rate,
"beta_1": beta1,
"beta_2": beta2,
"initial_guess": initial_guess,
"_current_design": initial_guess.copy(),
}
return cls(**attributes)
def _cost_function(self, x):
return self.user_cost_function(x)
def _grad_function(self, x):
if self.user_grad_function is not None:
return self.user_grad_function(x)
else:
return finite_difference(x, fun=self.user_cost_function)
def _update_current_design(self, learning_rate=0.05):
"""
Implement one step of gradient descent
"""
pass
def grad_check(self, parameters, tol=1e-6): # pragma: no cover
"""
Check analytical gradient against to finite difference
:param parameters: point at which to evaluate gradient
:param tol: acceptable error between finite difference and analytical
"""
grads = self._grad_function(parameters)
grads_FD = finite_difference(parameters, fun=self.user_cost_function)
for key in parameters.keys():
numerator = np.linalg.norm(grads[key] - grads_FD[key])
denominator = np.linalg.norm(grads[key]) + np.linalg.norm(grads_FD[key])
difference = numerator / (denominator + EPS)
if difference <= tol or numerator <= tol:
print("The gradient of {} is correct".format(key))
else:
print("The gradient of {} is wrong".format(key))
print("Finite dif: grad[{}] = {}".format(key, str(grads_FD[key].squeeze())))
print("Analytical: grad[{}] = {}".format(key, str(grads[key].squeeze())))
def backtracking_line_search(self, tau=0.5):
"""
Perform backtracking line search
:param x0: initial inputs understood by the function 'update' and 'evaluate'
:param alpha: learning rate (maximum step size allowed)
:param update: function that updates X given alpha, i.e. X = update(alpha)
:param evaluate: function that updates cost given X, i.e. cost = evaluate(X)
:param tau: hyper-parameter between 0 and 1 used to reduce alpha during backtracking line search
:return: x: update inputs understood by the function 'update' and 'evaluate'
"""
tau = max(0.0, min(1.0, tau)) # make sure 0 < tau < 1
converged = False
self._previous_design = self._current_design.copy()
while not converged:
self._update_current_design(learning_rate=self.learning_rate * tau)
if self._cost_function(self._current_design) < self._cost_function(
self._previous_design
):
converged = True
elif self.learning_rate * tau < 1e-6:
converged = True
else:
tau *= tau
def optimize(self, max_iter=100, is_print=True):
"""
Optimization logic (main driver)
:param max_iter: maximum number of iterations
:param is_print: True = print cost at every iteration, False = silent
:return: optimum
"""
# Stopping criteria (Vanderplaats, ch. 3, p. 121)
converged = False
N1 = 0
N1_max = 100 # num consecutive passes over which abs convergence criterion must be satisfied before stopping
N2 = 0
N2_max = 100 # num of consecutive passes over which rel convergence criterion must be satisfied before stopping
epsilon_absolute = 1e-7 # absolute error criterion
epsilon_relative = 1e-7 # relative error criterion
self._current_cost = self._cost_function(self._current_design).squeeze()
self._cost_history.append(self._current_cost)
self._design_history.append(self._current_design.copy())
# Iterative update
for i in range(0, max_iter):
self._current_iteration = i
self._search_direction = self._grad_function(self._current_design)
self.backtracking_line_search()
self._current_cost = self._cost_function(self._current_design).squeeze()
self._cost_history.append(self._current_cost)
self._design_history.append(self._current_design.copy())
if is_print:
print(
"iteration = {:d}, cost = {:6.3f}".format(
i, float(self._current_cost)
)
)
# Absolute convergence criterion
if i > 1:
dF1 = abs(self._cost_history[-1] - self._cost_history[-2])
if dF1 < epsilon_absolute * self._cost_history[0]:
N1 += 1
else:
N1 = 0
if N1 > N1_max:
converged = True
if is_print:
print("Absolute stopping criterion satisfied")
# Relative convergence criterion
dF2 = abs(self._cost_history[-1] - self._cost_history[-2]) / max(
abs(self._cost_history[-1]), 1e-6
)
if dF2 < epsilon_relative:
N2 += 1
else:
N2 = 0
if N2 > N2_max:
converged = True
if is_print:
print("Relative stopping criterion satisfied")
# Maximum iteration convergence criterion
if i == max_iter:
if is_print:
print("Maximum optimizer iterations reached")
if converged:
break
self._optimum_design = self._current_design.copy()
return self.optimum
class GD(Optimizer):
def _update_current_design(self, learning_rate=0.05):
"""Gradient descent update"""
for key in self._previous_design.keys():
self._current_design[key] = (
self._previous_design[key] - learning_rate * self._search_direction[key]
)
class Adam(Optimizer):
def __init__(self, **kwargs):
super(Adam, self).__init__(**kwargs)
self.v = None
self.s = None
def _update_current_design(self, learning_rate=0.05, beta_1=0.9, beta_2=0.99):
"""Adam update"""
self.beta_1 = beta_1
self.beta_2 = beta_2
t = self._current_iteration + 1
if self.v is None:
self.v = {
key: np.zeros(value.shape)
for key, value in self._current_design.items()
}
if self.s is None:
self.s = {
key: np.zeros(value.shape)
for key, value in self._current_design.items()
}
for key in self._current_design.keys():
self.v[key] = (
self.beta_1 * self.v[key] + (1.0 - beta_1) * self._search_direction[key]
)
self.s[key] = self.beta_2 * self.s[key] + (1.0 - beta_2) * np.square(
self._search_direction[key]
)
v_corrected = self.v[key] / (1.0 - self.beta_1 ** t)
s_corrected = self.s[key] / (1.0 - self.beta_2 ** t)
self._current_design[key] = self._previous_design[
key
] - learning_rate * v_corrected / (np.sqrt(s_corrected) + EPS)
def run_example(use_adam=True): # pragma: no cover
"""visual example using 2D rosenbrock function"""
import matplotlib.pyplot as plt
# Test function
def rosenbrock(parameters):
x1 = parameters["x1"]
x2 = parameters["x2"]
y = (1 - x1) ** 2 + 100 * (x2 - x1 ** 2) ** 2
y = y.reshape(1, 1)
dydx = dict()
dydx["x1"] = -2 * (1 - x1) - 400 * x1 * (x2 - x1 ** 2)
dydx["x2"] = 200 * (x2 - x1 ** 2)
return y, dydx
# Initial guess
initial_guess = dict()
initial_guess["x1"] = np.array([1.25]).reshape((1, 1))
initial_guess["x2"] = np.array([-1.75]).reshape((1, 1))
# Function handles to be pass
f = lambda x: rosenbrock(parameters=x)[0]
dfdx = lambda x: rosenbrock(parameters=x)[1]
# Learning rate
alpha = 0.5
# Optimize
if use_adam:
optimizer = Adam.initialize(
initial_guess=initial_guess,
cost_function=f,
grad_function=dfdx,
learning_rate=alpha,
)
else:
optimizer = GD.initialize(
initial_guess=initial_guess,
cost_function=f,
grad_function=dfdx,
learning_rate=alpha,
)
optimizer.grad_check(initial_guess)
optimizer.optimize(max_iter=1000)
# For plotting initial and final answer
x0 = np.array([initial_guess["x1"].squeeze(), initial_guess["x2"].squeeze()])
xf = np.array(
[optimizer.optimum["x1"].squeeze(), optimizer.optimum["x2"].squeeze()]
)
# For plotting contours
lb = -2.0
ub = 2.0
m = 100
x1 =
|
np.linspace(lb, ub, m)
|
numpy.linspace
|
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
def plot_response_alt(name, env, task, perf, during_training=False, failure=None, FDD=False, broken=False):
"""
plot the response for an altitude task
"""
subplot_indices = {0: [1, 2], 1: [1, 1], 3: [2, 2], 4: [2, 1], 5: [4, 2],
6: [3, 2], 7: [3, 1], 8: [7, 1], 9: [5, 1], 10: [7, 2], 11: [7, 2]}
fig = make_subplots(rows=6, cols=2, vertical_spacing=0.2 / 6, horizontal_spacing=0.17 / 2)
if broken:
env.time = env.time[:env.step_count - 2]
env.state_history = env.state_history[:env.step_count - 2]
if env.external_ref_signal is not None:
fig.append_trace(go.Scatter(
x=env.time, y=env.external_ref_signal.T, name=r'$h [m]$',
line=dict(color='#EF553B', dash='dashdot')), row=5, col=1)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[0, :],
line=dict(color='#EF553B')),
row=3, col=1)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[1, :],
line=dict(color='#EF553B', dash='dashdot')),
row=3, col=2)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[2, :],
line=dict(color='#EF553B', dash='dashdot')),
row=4, col=2)
fig.append_trace(go.Scatter(
x=env.time, y=env.external_ref_signal.T - env.state_history[9, :].T, name=r'$h [m]$',
line=dict(color='#636EFA')), row=4, col=1)
fig.update_yaxes(title_text=r'$\Delta h \:\: [\text{m}]$', row=4, col=1, title_standoff=8,
tickmode='array',
# tickvals=np.arange(-15, 5 + 5, 5),
# ticktext=['-15', ' ', '-5', ' ', '5'],
tickfont=dict(size=11),
# range=[-20, 5],
titlefont=dict(size=13)
)
else:
for sig_index, state_index in enumerate(task[1]):
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[sig_index, :],
line=dict(color='#EF553B', dash='dashdot')),
row=subplot_indices[state_index][0], col=subplot_indices[state_index][1])
if env.task_fun()[4] == 'altitude_2attitude':
fig.append_trace(go.Scatter(
x=env.time, y=-env.state_history[9, :].T + env.ref_signal[0, :], name=r'$h [m]$',
line=dict(color='#636EFA')), row=4, col=1)
fig.update_yaxes(title_text=r'$\Delta h \:\: [\text{m}]$', row=4, col=1, title_standoff=8,
tickmode='array',
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[0, :].T, name=r'$p [\frac{deg}{s}]$',
line=dict(color='#636EFA')), row=1, col=2)
fig.update_yaxes(title_text=r'$p\:\: [\text{deg}\:\text{s}^{-1}]$', row=1, col=2, title_standoff=7,
tickfont=dict(size=11),
titlefont=dict(size=13),
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[1, :].T, name=r'$q [^\circ/s]$',
line=dict(color='#636EFA')), row=1, col=1)
fig.update_yaxes(title_text=r'$q\:\: [\text{deg}\:\text{s}^{-1}]$', row=1, col=1, title_standoff=13,
# tickmode='array',
# tickvals=np.arange(-5, 5+2.5, 2.5),
# ticktext=['-5',' ', '0',' ', '5'],
# range=[-5, 6],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
# fig.append_trace(go.Scatter(
# x=env.time, y=env.state_history[2, :].T, name=r'$r [^\circ/s]$',
# line=dict(color='#636EFA')), row=2, col=2)
# fig.update_yaxes(row=2, col=2, title_standoff=14,
# tickmode='array',
# tickvals=np.arange(-5, 5 + 2.5, 2.5),
# range=[-5,7],
# ticktext=['-5', ' ', '0', ' ', '5'],
# title_text=r'$r\:\: [\text{deg}\:\text{s}^{-1}]$',
# tickfont=dict(size=11),
# titlefont=dict(size=13)
# )
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[3, :].T, name=r'$V [m/s]$',
line=dict(color='#636EFA')), row=2, col=2)
fig.update_yaxes(title_text=r'$V\:\: [\text{ms}^{-1}]$', row=2, col=2, title_standoff=13,
# tickmode='array',
# tickvals=np.arange(88, 90+1, 1),
# ticktext=['88', '89', '90'],
tickfont=dict(size=11),
# range=[87,90.5],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[4, :].T, name=r'$\alpha [^\circ]$',
line=dict(color='#636EFA')), row=2, col=1)
fig.update_yaxes(title_text=r'$\alpha\:\: [\text{deg}]$', row=2, col=1, title_standoff=18,
# tickmode='array',
# tickvals=np.arange(2, 6+1, 1),
# ticktext=['2', ' ','4', ' ', '6'],
# range=[1.5, 6],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[5, :].T, name=r'$\beta [^\circ]$',
line=dict(color='#636EFA')), row=4, col=2)
fig.update_yaxes(title_text=r'$\beta\:\: [\text{deg}]$', row=4, col=2, title_standoff=14,
# tickmode='array',
# tickvals=np.arange(-1, 1 + 0.5, 0.5),
# ticktext=['-1', ' ', '0', ' ', '1'],
# range=[-1, 1],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[6, :].T, name=r'$\phi [^\circ]$',
line=dict(color='#636EFA')), row=3, col=2)
fig.update_yaxes(title_text=r'$\phi\:\: [\text{deg}]$', row=3, col=2, title_standoff=6,
# tickmode='array',
# tickvals=np.arange(-40, 40 + 20, 20),
# ticktext=['-40', ' ', '0', ' ', '40'],
tickfont=dict(size=11),
# range=[-22, 40],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[7, :].T, name=r'$\theta [^\circ]$',
line=dict(color='#636EFA')), row=3, col=1)
fig.update_yaxes(title_text=r'$\theta\:\: [\text{deg}]$', row=3, col=1,
# tickmode='array',
# tickvals=np.arange(0, 10 + 2.5, 2.5),
# ticktext=['0', ' ', '5 ', ' ', '10'],
tickfont=dict(size=11),
# range=[-16, 20.5],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[9, :].T, name=r'$h [m]$',
line=dict(color='#636EFA')), row=5, col=1)
fig.update_yaxes(title_text=r'$h\:\: [\text{m}]$', row=5, col=1, title_standoff=5,
# tickmode='array',
# tickvals=np.arange(2000, 2400 + 100, 100),
# ticktext=['2000', ' ', '2200 ', ' ', '2400'],
tickfont=dict(size=11),
# range=[1980, 2400],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.action_history[0, :].T,
name=r'$\delta_e [^\circ]$', line=dict(color='#00CC96')), row=6, col=1)
fig.update_yaxes(title_text=r'$\delta_\text{e} \:\: [\text{deg}]$', row=6, col=1, title_standoff=20,
# tickmode='array',
# tickvals=np.arange(-10, 0 + 2.5, 2.5),
# ticktext=['-10', ' ', '-5', ' ', '0'],
tickfont=dict(size=11),
# range=[-10, 0],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.action_history[1, :].T,
name='δ [°]', line=dict(color='#00CC96')), row=5, col=2)
fig.update_yaxes(title_text=r'$\delta_\text{a} \:\: [\text{deg}]$', row=5, col=2, title_standoff=8,
# tickmode='array',
tickvals=np.arange(-5, 5 + 2.5, 2.5),
# ticktext=['-5', ' ', '0', ' ', '5'],
tickfont=dict(size=11),
# range=[-6.5, 5],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.action_history[2, :].T,
name=r'$\delta_r [^\circ]$', line=dict(color='#00CC96')), row=6, col=2)
fig.update_yaxes(title_text=r'$\delta_\text{r} \:\: [\text{deg}]$', row=6, col=2, title_standoff=13,
# tickmode='array',
# tickvals=np.arange(0, 20 + 5, 5),
# ticktext=['0', ' ', '10', ' ', '20'],
tickfont=dict(size=11),
# range=[-5, 6],
titlefont=dict(size=13)
)
if failure != 'normal' and not during_training:
fig.add_vline(x=env.failure_time, row='all', col="all", line=dict(color="Grey", width=1.5))
if FDD:
fig.add_vline(x=env.FDD_switch_time, row='all', col="all", line=dict(color="Grey", width=1.5, dash='dot'))
fig.update_layout(showlegend=False, width=800, height=480, margin=dict(
l=10,
r=2,
b=5,
t=0,
))
fig.layout.font.family = 'Arial'
end_time = env.time[-1] + env.dt * 2
tick_interval = 10
fig.update_xaxes(title_text=r'$t \:\: \text{[s]}$', range=[0, end_time], tickmode='array',
tickvals=np.arange(0, end_time, tick_interval), tickfont=dict(size=11), row=6, col=1,
titlefont=dict(size=13), title_standoff=11)
fig.update_xaxes(title_text=r'$t \:\: \text{[s]}$', range=[0, end_time], tickmode='array',
tickvals=np.arange(0, end_time, tick_interval), tickfont=dict(size=11), row=6, col=2,
titlefont=dict(size=13), title_standoff=11)
for row in range(6):
for col in range(3):
fig.update_xaxes(showticklabels=False, tickmode='array',
tickvals=np.arange(0, end_time, tick_interval), row=row, col=col)
fig.update_traces(mode='lines')
if during_training:
fig.write_image(f"figures/during_training/{env.task_fun()[4]}_r{abs(int(perf))}.eps")
return
elif failure != 'normal':
fig.write_image(f"figures/{name}_{failure}_r{abs(int(perf))}.pdf")
else:
fig.write_image(f"figures/{name}_r{abs(int(perf))}.pdf")
fig.show()
return
def plot_response_att(name, env, task, perf, during_training=False, failure=None, FDD=False, broken=False):
"""
plot the response for an attitude task
"""
# fig = go.Figure()
# fig.add_trace(go.Scatter(
# x=env.time, y=env.ref_signal[0, :], name=r'$h [m]$',
# line=dict(color='#EF553B', dash='dashdot')))
#
subplot_indices = {0: [1, 2], 1: [1, 1], 3: [2, 2], 4: [2, 1], 5: [4, 2],
6: [3, 2], 7: [3, 1], 8: [7, 1], 9: [5, 1], 10: [7, 2], 11: [7, 2]}
fig = make_subplots(rows=6, cols=2, vertical_spacing=0.2 / 6, horizontal_spacing=0.17 / 2)
if broken:
env.time = env.time[:env.step_count - 2]
env.state_history = env.state_history[:env.step_count - 2]
if env.external_ref_signal is not None:
fig.append_trace(go.Scatter(
x=env.time, y=env.external_ref_signal.T, name=r'$h [m]$',
line=dict(color='#EF553B', dash='dashdot')), row=5, col=1)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[0, :],
line=dict(color='#EF553B')),
row=3, col=1)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[1, :],
line=dict(color='#EF553B', dash='dashdot')),
row=3, col=2)
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[2, :],
line=dict(color='#EF553B', dash='dashdot')),
row=4, col=2)
fig.append_trace(go.Scatter(
x=env.time, y=-env.state_history[9, :].T + env.external_ref_signal.T, name=r'$h [m]$',
line=dict(color='#636EFA')), row=4, col=1)
fig.update_yaxes(title_text=r'$\delta h \:\: [\text{m}]$', row=4, col=1, title_standoff=8)
else:
for sig_index, state_index in enumerate(task[1]):
fig.append_trace(go.Scatter(
x=env.time, y=env.ref_signal[sig_index, :],
line=dict(color='#EF553B', dash='dashdot')),
row=subplot_indices[state_index][0], col=subplot_indices[state_index][1])
if env.task_fun()[4] == 'altitude_2attitude':
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[9, :].T - env.ref_signal[0, :], name=r'$h [m]$',
line=dict(color='#636EFA')), row=4, col=1)
fig.update_yaxes(title_text=r'$h\:\: [\text{m}]$', row=4, col=1, title_standoff=8)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[0, :].T, name=r'$p [\frac{deg}{s}]$',
line=dict(color='#636EFA')), row=1, col=2)
fig.update_yaxes(title_text=r'$p\:\: [\text{deg}\:\text{s}^{-1}]$', row=1, col=2, title_standoff=7,
tickfont=dict(size=11)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[1, :].T, name=r'$q [^\circ/s]$',
line=dict(color='#636EFA')), row=1, col=1)
fig.update_yaxes(title_text=r'$q\:\: [\text{deg}\:\text{s}^{-1}]$', row=1, col=1, title_standoff=13,
tickmode='array',
tickvals=np.arange(-10, 10 + 5, 5),
ticktext=['-10', ' ', '0', ' ', '10'],
range=[-10, 11],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[2, :].T, name=r'$r [^\circ/s]$',
line=dict(color='#636EFA')), row=2, col=2)
fig.update_yaxes(row=2, col=2, title_standoff=14,
tickmode='array',
tickvals=np.arange(-5, 5 + 2.5, 2.5),
range=[-5, 7],
ticktext=['-5', ' ', '0', ' ', '5'],
title_text=r'$r\:\: [\text{deg}\:\text{s}^{-1}]$',
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[3, :].T, name=r'$V [m/s]$',
line=dict(color='#636EFA')), row=4, col=1)
fig.update_yaxes(title_text=r'$V\:\: [\text{ms}^{-1}]$', row=4, col=1, title_standoff=13,
tickmode='array',
tickvals=np.arange(80, 120 + 10, 10),
ticktext=['80', ' ', '100', ' ', '120'],
tickfont=dict(size=11),
range=[77, 120],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[4, :].T, name=r'$\alpha [^\circ]$',
line=dict(color='#636EFA')), row=2, col=1)
fig.update_yaxes(title_text=r'$\alpha\:\: [\text{deg}]$', row=2, col=1, title_standoff=18,
tickmode='array',
tickvals=np.arange(0, 10 + 5, 2.5),
ticktext=['0', ' ', '5', ' ', '10'],
range=[-2, 10],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[5, :].T, name=r'$\beta [^\circ]$',
line=dict(color='#636EFA')), row=4, col=2)
fig.update_yaxes(title_text=r'$\beta\:\: [\text{deg}]$', row=4, col=2, title_standoff=14,
tickmode='array',
tickvals=np.arange(-2, 2 + 1, 1),
ticktext=['-2', ' ', '0', ' ', '2'],
range=[-2, 2],
tickfont=dict(size=11),
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[6, :].T, name=r'$\phi [^\circ]$',
line=dict(color='#636EFA')), row=3, col=2)
fig.update_yaxes(title_text=r'$\phi\:\: [\text{deg}]$', row=3, col=2, title_standoff=6,
tickmode='array',
tickvals=[-35, 0, 35, 70],
# ticktext=['-35', '0', ' ', '70'],
tickfont=dict(size=11),
range=[-37, 72],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[7, :].T, name=r'$\theta [^\circ]$',
line=dict(color='#636EFA')), row=3, col=1)
fig.update_yaxes(title_text=r'$\theta\:\: [\text{deg}]$', row=3, col=1,
tickmode='array',
tickvals=np.arange(-10, 20 + 10, 10),
ticktext=['-10', '0', '10 ', '20'],
tickfont=dict(size=11),
range=[-16, 20.5],
titlefont=dict(size=13)
)
fig.append_trace(go.Scatter(
x=env.time, y=env.state_history[9, :].T, name=r'$h [m]$',
line=dict(color='#636EFA')), row=5, col=1)
fig.update_yaxes(title_text=r'$h\:\: [\text{m}]$', row=5, col=1, title_standoff=5,
tickmode='array',
tickvals=
|
np.arange(1600, 2400 + 200, 200)
|
numpy.arange
|
from PIL import Image
import os
import torch
import torch.utils.data
import pandas as pd
import numpy as np
from torchvision import transforms, utils
import torch.nn as nn
import torch.nn.functional as F
from model.model_vtgnet import FeatExtractor, TrajGenerator
import cv2
from scipy import interpolate
save_path = './test_results/'
os.makedirs(save_path, exist_ok=True)
interval_before = 11 # 1.5 s
interval_after = 22 # 3 s
feature_size = 512
# Device configuration
torch.cuda.set_device(0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model1_s = FeatExtractor(feature_size=feature_size).to(device) # keep straight
model2_s = TrajGenerator(feature_size=feature_size).to(device)
model1_r = FeatExtractor(feature_size=feature_size).to(device) # turn right
model2_r = TrajGenerator(feature_size=feature_size).to(device)
model1_l = FeatExtractor(feature_size=feature_size).to(device) # turn left
model2_l = TrajGenerator(feature_size=feature_size).to(device)
model_path = './model/weights/'
model1_l.load_state_dict(torch.load(model_path + '2-model1.pth', map_location=lambda storage, loc: storage)) # model1 -- Feature Extractor
model2_l.load_state_dict(torch.load(model_path + '2-model2.pth', map_location=lambda storage, loc: storage)) # model2 -- Trajectory Generator
model1_r.load_state_dict(torch.load(model_path + '1-model1.pth', map_location=lambda storage, loc: storage))
model2_r.load_state_dict(torch.load(model_path + '1-model2.pth', map_location=lambda storage, loc: storage))
model1_s.load_state_dict(torch.load(model_path + '0-model1.pth', map_location=lambda storage, loc: storage))
model2_s.load_state_dict(torch.load(model_path + '0-model2.pth', map_location=lambda storage, loc: storage))
model1_s.eval()
model2_s.eval()
model1_r.eval()
model2_r.eval()
model1_l.eval()
model2_l.eval()
# camera parameters
fx = 983.044006
fy = 983.044006
cx = 6.095593000000e+02
cy = 1.728540000000e+02
csv_path = 'test_data/vtgnet/data_reference.csv'
data = pd.read_csv(csv_path, header=None)
results = []
with torch.no_grad():
for idx in range(len(data)):
print('{}/{}'.format(idx+1,data.shape[0]))
# get command
command = data.iloc[idx,0]
# history info
info_st_index = 1 + 12
info_st_index_2 = (info_st_index + 4*(interval_before+1))
info_history = data.iloc[idx, info_st_index:info_st_index_2].to_numpy().reshape(-1,4)
info_history_net = info_history[:,0:3]
info_history_net = torch.from_numpy(info_history_net.astype('float')).unsqueeze(0).to(device)
info_future = data.iloc[idx, info_st_index_2:].to_numpy().reshape(-1,4)
local_x_history = info_history[:,0]
local_y_history = info_history[:,1]
spd_history = info_history[:,2]
yaw_history = info_history[:,3]
local_x_future = info_future[:,0]
local_y_future = info_future[:,1]
spd_future = info_future[:,2]
yaw_future = info_future[:,3]
image = []
for k in range(1, 1 + interval_before+1):
image.append( transforms.Resize((224,224))(Image.open(data.iloc[idx,k])) )
image = torch.stack( [transforms.ToTensor()(image[k]) for k in range(len(image))], dim=0 )
for ii in range(image.size(0)):
image[ii,:,:,:] = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])(image[ii,:,:,:])
image = image.unsqueeze(0).to(device)
# keep straight
if command == 0:
features = torch.Tensor(1,12,feature_size).to(device)
for p in range(12):
features[:,p,:] = model1_s(image[:,p,:,:,:])
outputs, logvar, attentions = model2_s(features, info_history_net)
# turn right
if command == 1 :
features = torch.Tensor(1,12,feature_size).to(device)
for p in range(12):
features[:,p,:] = model1_r(image[:,p,:,:,:])
outputs, logvar, attentions = model2_r(features, info_history_net)
# turn left
if command == 2:
features = torch.Tensor(1,12,feature_size).to(device)
for p in range(12):
features[:,p,:] = model1_l(image[:,p,:,:,:])
outputs, logvar, attentions = model2_l(features, info_history_net)
var = logvar.exp().reshape(-1,3).cpu().detach().numpy()
attentions = attentions.squeeze(0).cpu().detach().numpy()
planned = (outputs.reshape(-1,3).cpu().detach().numpy())
local_x_planned = planned[:,0]
local_y_planned = planned[:,1]
spd_planned = planned[:,2]
local_x_planned_var = var[:,0]
local_y_planned_var = var[:,1]
spd_planned_var = var[:,2]
img_center = cv2.imread(data.iloc[idx,1 + interval_before])
local_x_planned_sigma = np.sqrt(local_x_planned_var)
local_y_planned_sigma = np.sqrt(local_y_planned_var)
spd_planned_sigma = np.sqrt(spd_planned_var)
xx = np.arange(1,22+0.001,1)
f_x_planned = interpolate.interp1d(xx, local_x_planned, kind='cubic')
f_y_planned = interpolate.interp1d(xx, local_y_planned, kind='cubic')
f_x_gt = interpolate.interp1d(xx, local_x_future, kind='cubic')
f_y_gt = interpolate.interp1d(xx, local_y_future, kind='cubic')
f_x_planned_sigma = interpolate.interp1d(xx, local_x_planned_sigma, kind='cubic')
f_y_planned_sigma = interpolate.interp1d(xx, local_y_planned_sigma, kind='cubic')
x_new = np.arange(1, 22, 0.05)
local_x_planned = f_x_planned(x_new)
local_y_planned = f_y_planned(x_new)
local_x_planned_sigma = f_x_planned_sigma(x_new)
local_y_planned_sigma = f_y_planned_sigma(x_new)
local_x_future = f_x_gt(x_new)
local_y_future = f_y_gt(x_new)
# draw the trajectory: ground truth
X = local_x_future + 0.12
Y = 1.52
Z = local_y_future + (0.17+1.55)
filtered_idx = np.where(Z>0)
Z = Z[filtered_idx]
X = X[filtered_idx]
X = X/Z
Y = Y/Z
us = (fx*X + cx).astype(int)
vs = (fy*Y + cy).astype(int)
vertices_gt = np.stack((us, vs)).transpose().reshape(-1,2)
# draw the trajectory: generated values
X0 = local_x_planned + 0.12
Y0 = 1.52
Z0 = local_y_planned + (0.17+1.55)
filtered_idx = np.where(Z0>0)
Z0 = Z0[filtered_idx]
X0 = X0[filtered_idx]
# draw the uncertainty area.
if command == 0 or idx == 19 or idx == 2:
X1 = local_x_planned + 0.12 - local_x_planned_sigma
Y1 = 1.52
Z1 = local_y_planned + (0.17+1.55)
filtered_idx = np.where(Z1>0)
Z1 = Z1[filtered_idx]
X1 = X1[filtered_idx]
X2 = local_x_planned + 0.12 + local_x_planned_sigma
Y2 = 1.52
Z2 = local_y_planned + (0.17+1.55)
filtered_idx = np.where(Z2>0)
Z2 = Z2[filtered_idx]
X2 = X2[filtered_idx]
else:
X1 = local_x_planned + 0.12
Y1 = 1.52
Z1 = local_y_planned + (0.17+1.55) - local_y_planned_sigma
filtered_idx = np.where(Z1>0)
Z1 = Z1[filtered_idx]
X1 = X1[filtered_idx]
X2 = local_x_planned + 0.12
Y2 = 1.52
Z2 = local_y_planned + (0.17+1.55) + local_y_planned_sigma
filtered_idx =
|
np.where(Z2>0)
|
numpy.where
|
import numpy as np
from scipy.special import sph_harm
def geometry(t, initial_star_lon, initial_obs_lon, omega_orb, omega_rot):
# edge-on, zero-obliquity planet
star_lat = 0.5 * np.pi * np.ones_like(t)
obs_lat = 0.5 * np.pi * np.ones_like(t)
star_lon = initial_obs_lon - (omega_rot - omega_orb) * t
obs_lon = initial_obs_lon - omega_rot * t
return star_lat, star_lon, obs_lat, obs_lon
def illumination(lat, lon, star_lat, star_lon):
I =
|
np.cos(lat)
|
numpy.cos
|
#%%
import numpy as np
from scipy.special import erf, erfinv, expit # expit = sigmoid
from scipy.stats import beta
import matplotlib.pyplot as plt
from pathlib import Path
HERE = Path(__file__).parent
PLOT = HERE/"plots"
def piecelin_unit(x, n, soft=0):
flx_pts = np.sort(np.random.uniform(0, 1, size=n))
slopes = np.random.uniform(0, 10e5, size=n+1)
slopes[0] = 0.001
slopes[n] = 0.999
slp_chg = slopes[1:] - slopes[:-1]
nomalize = ((flx_pts[1:]-flx_pts[:-1])*slopes[1:-1]).sum()
x = x[np.newaxis, :]
slp_chg = slp_chg[:, np.newaxis]
flx_pts = flx_pts[:, np.newaxis]
if soft: # using softplus
y = (slp_chg * np.logaddexp(0,(x - flx_pts)/soft)*soft).sum(axis=0)
else: # using relu
y = (slp_chg * (x - flx_pts) * (x > flx_pts)).sum(axis=0)
return y/nomalize
def piecewise_lin(x, xlims=[0,1], ylims=[0,1], **kwargs):
(l,r),(b,t) = xlims,ylims
return b+(t-b)*piecelin_unit((x-l)/(r-l), **kwargs)
def piecetan_unit(x, n, soft=0):
jmps = np.random.uniform(0, 1, size=n+1)
amps = np.random.uniform(0, 1, size=n+1)
amps /= amps.sum()
x = x[np.newaxis, :]
jmps = jmps[:, np.newaxis]
amps = amps[:, np.newaxis]
if soft: # using sigmoid
y = (amps * expit((x-jmps)/(soft))).sum(axis=0)
else: # using heavyside
y = (amps * (x > jmps)).sum(axis=0)
return y
def piecewise_tan(x, xlims=[0,1], ylims=[0,1], **kwargs):
(l,r),(b,t) = xlims,ylims
return b+(t-b)*piecetan_unit((x-l)/(r-l), **kwargs)
def test_plot_piecewise():
x = np.linspace(-3,3,1000)
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(10,3))
seed = np.random.randint(1000)
for soft in np.linspace(0,0.03,3):
np.random.seed(seed)
ax1.plot(x, piecewise_lin(x, n=4, soft=soft, xlims=[-2,2], ylims=[-1,1]))
ax2.plot(x, piecewise_tan(x, n=4, soft=soft, xlims=[-2,2], ylims=[-1,1]))
ax2.set_xlim(-2,2)
ax2.set_ylim(-1,1)
ax1.set_title("relu and softplus")
ax2.set_title("heavysides and sigmoid")
plt.savefig(PLOT/"monofunc__my_piecewise.pdf")
plt.clf()
# test_plot_piecewise()
#%%
def piecewise_gap_unit(x, n, soft=0):
N = n
n = np.random.randint(1,n)
N = N-n
flx = np.sort(np.random.uniform(0, 1, size=n+1))
flx[0] = 0
flx[n] = 1
yflex = np.sort(np.random.uniform(0, 1, size=n+1))
yflex[0] = 0
yflex[n] = 1
slopes = (yflex[1:]-yflex[:n])/(flx[1:]-flx[:n])
chg = slopes[1:] - slopes[:-1]
y = slopes[0]*x
x = x[np.newaxis, :]
chg = chg[ : , np.newaxis]
flx = flx[1:n, np.newaxis]
y += (chg * np.logaddexp(0,(x - flx)/soft)*soft).sum(axis=0)
n = np.random.randint(0,N)
N = N-n
jmps = np.random.uniform(0, 1, size=n+1)
amps = np.random.uniform(0, 1, size=n+1)
jmps = jmps[:, np.newaxis]
amps = amps[:, np.newaxis]
y += (amps * expit((x-jmps)/(soft))).sum(axis=0)
n =
|
np.random.randint(0,N)
|
numpy.random.randint
|
import numpy as np
import pytest
from matplotlib import pyplot as plt
from tikzplotlib import clean_figure, get_tikz_code
RC_PARAMS = {"figure.figsize": [5, 5], "figure.dpi": 220, "pgf.rcfonts": False}
class Test_plottypes:
"""Testing plot types found here https://matplotlib.org/3.1.1/tutorials/introductory/sample_plots.html"""
def test_plot(self):
x = np.linspace(1, 100, 20)
y = np.linspace(1, 100, 20)
with plt.rc_context(rc=RC_PARAMS):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot(x, y)
ax.set_ylim([20, 80])
ax.set_xlim([20, 80])
raw = get_tikz_code()
clean_figure(fig)
clean = get_tikz_code()
# Use number of lines to test if it worked.
# the baseline (raw) should have 20 points
# the clean version (clean) should have 2 points
# the difference in line numbers should therefore be 2
numLinesRaw = raw.count("\n")
numLinesClean = clean.count("\n")
assert numLinesRaw - numLinesClean == 18
plt.close("all")
def test_step(self):
x = np.linspace(1, 100, 20)
y = np.linspace(1, 100, 20)
with plt.rc_context(rc=RC_PARAMS):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.step(x, y)
ax.set_ylim([20, 80])
ax.set_xlim([20, 80])
with pytest.warns(Warning):
clean_figure(fig)
plt.close("all")
def test_scatter(self):
x = np.linspace(1, 100, 20)
y = np.linspace(1, 100, 20)
with plt.rc_context(rc=RC_PARAMS):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.scatter(x, y)
ax.set_ylim([20, 80])
ax.set_xlim([20, 80])
raw = get_tikz_code()
clean_figure()
clean = get_tikz_code()
# Use number of lines to test if it worked.
# the baseline (raw) should have 20 points
# the clean version (clean) should have 2 points
# the difference in line numbers should therefore be 2
numLinesRaw = raw.count("\n")
numLinesClean = clean.count("\n")
assert numLinesRaw - numLinesClean == 6
plt.close("all")
def test_bar(self):
x = np.linspace(1, 100, 20)
y = np.linspace(1, 100, 20)
with plt.rc_context(rc=RC_PARAMS):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.bar(x, y)
ax.set_ylim([20, 80])
ax.set_xlim([20, 80])
with pytest.warns(Warning):
clean_figure(fig)
plt.close("all")
def test_hist(self):
x = np.linspace(1, 100, 20)
y = np.linspace(1, 100, 20)
with plt.rc_context(rc=RC_PARAMS):
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.hist(x, y)
ax.set_ylim([20, 80])
ax.set_xlim([20, 80])
with pytest.warns(Warning):
clean_figure(fig)
plt.close("all")
def test_plot3d(self):
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r * np.sin(theta)
y = r * np.cos(theta)
with plt.rc_context(rc=RC_PARAMS):
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot(x, y, z)
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
ax.set_zlim([-2, 2])
ax.view_init(30, 30)
raw = get_tikz_code(fig)
clean_figure(fig)
clean = get_tikz_code()
# Use number of lines to test if it worked.
numLinesRaw = raw.count("\n")
numLinesClean = clean.count("\n")
assert numLinesRaw - numLinesClean == 13
plt.close("all")
def test_scatter3d(self):
theta = np.linspace(-4 * np.pi, 4 * np.pi, 100)
z = np.linspace(-2, 2, 100)
r = z ** 2 + 1
x = r *
|
np.sin(theta)
|
numpy.sin
|
#import sys
import re # Regular expressions
import numpy as np
import pylab as plt
import matplotlib as mpl
#from scipy.optimize import leastsq
#import scipy.special
import PIL # Python Image Library (for opening PNG, etc.)
import sys, os
from skbeam.core.accumulators.binned_statistic import BinnedStatistic2D,BinnedStatistic1D
import skbeam.core.roi as roi
import skbeam.core.correlation as corr
import skbeam.core.utils as utils
from pyCHX.chx_generic_functions import average_array_withNan
def convert_Qmap( img, qx_map, qy_map=None, bins=None, rangeq=None, origin=None, mask=None, statistic='mean'):
"""Y.G. Nov 3@CHX
Convert a scattering image to a qmap by giving qx_map and qy_map
Return converted qmap, x-coordinates and y-coordinates
"""
if qy_map is not None:
if rangeq is None:
qx_min,qx_max = qx_map.min(), qx_map.max()
qy_min,qy_max = qy_map.min(), qy_map.max()
rangeq = [ [qx_min,qx_max], [qy_min,qy_max] ]
#rangeq = [qx_min,qx_max , qy_min,qy_max]
if bins is None:
bins = qx_map.shape
b2d = BinnedStatistic2D( qx_map.ravel(), qy_map.ravel(),
statistic=statistic, bins=bins, mask=mask.ravel(),
range=rangeq)
remesh_data, xbins, ybins = b2d( img.ravel() ), b2d.bin_centers[0], b2d.bin_centers[1]
else:
if rangeq is None:
qx_min,qx_max = qx_map.min(), qx_map.max()
rangeq = [qx_min,qx_max]
if bins is None:
bins = [ qx_map.size ]
#print( rangeq, bins )
if mask is not None:
m = mask.ravel()
else:
m = None
b1d = BinnedStatistic1D( qx_map.ravel(), bins= bins, mask=m )
remesh_data = b1d( img.ravel() )
#print('Here')
xbins= b1d.bin_centers
ybins=None
return remesh_data, xbins, ybins
def qphiavg(image, q_map=None, phi_map=None, mask=None, bins= None,
origin=None, range=None, statistic='mean'):
''' Octo 20, 2017 Yugang According to Julien's Suggestion
Get from https://github.com/CFN-softbio/SciStreams/blob/master/SciStreams/processing/qphiavg.py
With a small revision --> return three array rather than dict
quick qphi average calculator.
ignores bins for now
'''
# TODO : replace with method that takes qphi maps
# TODO : also return q and phi of this...
# print("In qphi average stream")
shape = image.shape
if bins is None:
bins=shape
#print(bins)
if origin is None:
origin = (shape[0] - 1) / 2., (shape[1] - 1) / 2.
from skbeam.core.utils import radial_grid, angle_grid
if q_map is None:
q_map = radial_grid(origin, shape)
if phi_map is None:
phi_map = angle_grid(origin, shape)
expected_shape = tuple(shape)
if mask is not None:
if mask.shape != expected_shape:
raise ValueError('"mask" has incorrect shape. '
' Expected: ' + str(expected_shape) +
' Received: ' + str(mask.shape))
mask = mask.reshape(-1)
rphibinstat = BinnedStatistic2D(q_map.reshape(-1), phi_map.reshape(-1),
statistic=statistic, bins=bins, mask=mask,
range=range)
sqphi = rphibinstat(image.ravel())
qs = rphibinstat.bin_centers[0]
phis = rphibinstat.bin_centers[1]
return sqphi, qs, phis
def get_QPhiMap(img_shape, center):
'''Y.G., Dev Nov 10, 2018 Get q_map and phi_map by giving image shape and center
e.g.,
q_map, phi_map = get_QPhiMap( mask.shape, center[::-1])
'''
q_map = utils.radial_grid( center, img_shape, pixel_size= [1,1] )
phi_map = np.degrees( utils.angle_grid(center, img_shape,) )
return q_map, phi_map
def get_img_qphimap(img, q_map, phi_map, mask, bins, center,
qang_range=None, statistic='mean'):
'''Y.G., Dev Nov 10, 2018 Get phi_map by giving image
e.g.,
q_map, phi_map = get_QPhiMap( mask.shape, center[::-1])
sqphi, qs, phis = get_img_qphimap( avg, q_map, phi_map, mask,
bins=[ 1500, 1800],center=center[::-1],
qang_range=None, statistic='mean')
'''
sqphi, qs, phis = qphiavg(img, q_map=q_map, phi_map=phi_map, mask=mask, bins= bins,
origin= center, range=qang_range, statistic=statistic)
return sqphi, qs, phis
def get_iq_from_sqphi( sqphi):
'''Y.G., Dev Nov 10, 2018 Get iq from a q-phi map
e.g.,
iqPHI = get_iq_from_sqphi( Iqphi )'''
return np.nan_to_num( average_array_withNan( sqphi, axis=1 ) )
def get_phi_from_sqphi( sqphi):
'''<NAME> Nov 10, 2018 Get Iphi from a q-phi map
e.g.,
iqPHI = get_iq_from_sqphi( Iqphi )
qc= np.argmax( iqPHI )
qw = 5
iphiQ = get_phi_from_sqphi( Iqphi[qc-qw:qc+qw] )
'''
return np.nan_to_num( average_array_withNan( sqphi, axis=0 ) )
def convert_Qmap_old( img, qx_map, qy_map=None, bins=None, rangeq=None):
"""<NAME> 3@CHX
Convert a scattering image to a qmap by giving qx_map and qy_map
Return converted qmap, x-coordinates and y-coordinates
"""
if qy_map is not None:
if rangeq is None:
qx_min,qx_max = qx_map.min(), qx_map.max()
qy_min,qy_max = qy_map.min(), qy_map.max()
rangeq = [ [qx_min,qx_max], [qy_min,qy_max] ]
if bins is None:
bins = qx_map.shape
remesh_data, xbins, ybins = np.histogram2d(qx_map.ravel(), qy_map.ravel(),
bins=bins, range= rangeq, normed=False, weights= img.ravel() )
else:
if rangeq is None:
qx_min,qx_max = qx_map.min(), qx_map.max()
rangeq = [qx_min,qx_max]
if bins is None:
bins = qx_map.size
else:
if isinstance( bins, list):
bins = bins[0]
print( rangeq, bins )
remesh_data, xbins = np.histogram(qx_map.ravel(),
bins=bins, range= rangeq, normed=False, weights= img.ravel() )
ybins=None
return remesh_data, xbins, ybins
# Mask
################################################################################
class Mask(object):
'''Stores the matrix of pixels to be excluded from further analysis.'''
def __init__(self, infile=None, format='auto'):
'''Creates a new mask object, storing a matrix of the pixels to be
excluded from further analysis.'''
self.data = None
if infile is not None:
self.load(infile, format=format)
def load(self, infile, format='auto', invert=False):
'''Loads a mask from a a file. If this object already has some masking
defined, then the new mask is 'added' to it. Thus, one can load multiple
masks to exlude various pixels.'''
if format=='png' or infile[-4:]=='.png':
self.load_png(infile, invert=invert)
elif format=='hdf5' or infile[-3:]=='.h5' or infile[-4:]=='.hd5':
self.load_hdf5(infile, invert=invert)
else:
print("Couldn't identify mask format for %s."%(infile))
def load_blank(self, width, height):
'''Creates a null mask; i.e. one that doesn't exlude any pixels.'''
# TODO: Confirm that this is the correct order for x and y.
self.data = np.ones((height, width))
def load_png(self, infile, threshold=127, invert=False):
'''Load a mask from a PNG image file. High values (white) are included,
low values (black) are exluded.'''
# Image should be black (0) for excluded pixels, white (255) for included pixels
img = PIL.Image.open(infile).convert("L") # black-and-white
img2 = img.point(lambda p: p > threshold and 255)
data = np.asarray(img2)/255
data = data.astype(int)
if invert:
data = -1*(data-1)
if self.data is None:
self.data = data
else:
self.data *= data
def load_hdf5(self, infile, invert=False):
with h5py.File(infile, 'r') as f:
data = np.asarray( f['mask'] )
if invert:
data = -1*(data-1)
if self.data is None:
self.data = data
else:
self.data *= data
def invert(self):
'''Inverts the mask. Can be used if the mask file was written using the
opposite convention.'''
self.data = -1*(self.data-1)
# End class Mask(object)
########################################
# Calibration
################################################################################
class Calibration(object):
'''Stores aspects of the experimental setup; especially the calibration
parameters for a particular detector. That is, the wavelength, detector
distance, and pixel size that are needed to convert pixel (x,y) into
reciprocal-space (q) value.
This class may also store other information about the experimental setup
(such as beam size and beam divergence).
'''
def __init__(self, wavelength_A=None, distance_m=None, pixel_size_um=None):
self.wavelength_A = wavelength_A
self.distance_m = distance_m
self.pixel_size_um = pixel_size_um
# Data structures will be generated as needed
# (and preserved to speedup repeated calculations)
self.clear_maps()
# Experimental parameters
########################################
def set_wavelength(self, wavelength_A):
'''Set the experimental x-ray wavelength (in Angstroms).'''
self.wavelength_A = wavelength_A
def get_wavelength(self):
'''Get the x-ray beam wavelength (in Angstroms) for this setup.'''
return self.wavelength_A
def set_energy(self, energy_keV):
'''Set the experimental x-ray beam energy (in keV).'''
energy_eV = energy_keV*1000.0
energy_J = energy_eV/6.24150974e18
h = 6.626068e-34 # m^2 kg / s
c = 299792458 # m/s
wavelength_m = (h*c)/energy_J
self.wavelength_A = wavelength_m*1e+10
def get_energy(self):
'''Get the x-ray beam energy (in keV) for this setup.'''
h = 6.626068e-34 # m^2 kg / s
c = 299792458 # m/s
wavelength_m = self.wavelength_A*1e-10 # m
E = h*c/wavelength_m # Joules
E *= 6.24150974e18 # electron volts
E /= 1000.0 # keV
return E
def get_k(self):
'''Get k = 2*pi/lambda for this setup, in units of inverse Angstroms.'''
return 2.0*np.pi/self.wavelength_A
def set_distance(self, distance_m):
'''Sets the experimental detector distance (in meters).'''
self.distance_m = distance_m
def set_pixel_size(self, pixel_size_um=None, width_mm=None, num_pixels=None):
'''Sets the pixel size (in microns) for the detector. Pixels are assumed
to be square.'''
if pixel_size_um is not None:
self.pixel_size_um = pixel_size_um
else:
if num_pixels is None:
num_pixels = self.width
pixel_size_mm = width_mm*1./num_pixels
self.pixel_size_um = pixel_size_mm*1000.0
def set_beam_position(self, x0, y0):
'''Sets the direct beam position in the detector images (in pixel
coordinates).'''
self.x0 = x0
self.y0 = y0
def set_image_size(self, width, height=None):
'''Sets the size of the detector image, in pixels.'''
self.width = width
if height is None:
# Assume a square detector
self.height = width
else:
self.height = height
def get_q_per_pixel(self):
'''Gets the delta-q associated with a single pixel. This is computed in
the small-angle limit, so it should only be considered approximate.
For instance, wide-angle detectors will have different delta-q across
the detector face.'''
if self.q_per_pixel is not None:
return self.q_per_pixel
c = (self.pixel_size_um/1e6)/self.distance_m
twotheta = np.arctan(c) # radians
self.q_per_pixel = 2.0*self.get_k()*np.sin(twotheta/2.0)
return self.q_per_pixel
# Maps
########################################
def clear_maps(self):
self.r_map_data = None
self.q_per_pixel = None
self.q_map_data = None
self.angle_map_data = None
self.qx_map_data = None
self.qy_map_data = None
self.qz_map_data = None
self.qr_map_data = None
def r_map(self):
'''Returns a 2D map of the distance from the origin (in pixel units) for
each pixel position in the detector image.'''
if self.r_map_data is not None:
return self.r_map_data
x = np.arange(self.width) - self.x0
y = np.arange(self.height) - self.y0
X, Y = np.meshgrid(x, y)
R = np.sqrt(X**2 + Y**2)
self.r_map_data = R
return self.r_map_data
def q_map(self):
'''Returns a 2D map of the q-value associated with each pixel position
in the detector image.'''
if self.q_map_data is not None:
return self.q_map_data
c = (self.pixel_size_um/1e6)/self.distance_m
twotheta = np.arctan(self.r_map()*c) # radians
self.q_map_data = 2.0*self.get_k()*np.sin(twotheta/2.0)
return self.q_map_data
def angle_map(self):
'''Returns a map of the angle for each pixel (w.r.t. origin).
0 degrees is vertical, +90 degrees is right, -90 degrees is left.'''
if self.angle_map_data is not None:
return self.angle_map_data
x = (np.arange(self.width) - self.x0)
y = (np.arange(self.height) - self.y0)
X,Y = np.meshgrid(x,y)
#M = np.degrees(np.arctan2(Y, X))
# Note intentional inversion of the usual (x,y) convention.
# This is so that 0 degrees is vertical.
#M = np.degrees(np.arctan2(X, Y))
# TODO: Lookup some internal parameter to determine direction
# of normal. (This is what should befine the angle convention.)
M = np.degrees(np.arctan2(X, -Y))
self.angle_map_data = M
return self.angle_map_data
def qx_map(self):
if self.qx_map_data is not None:
return self.qx_map_data
self._generate_qxyz_maps()
return self.qx_map_data
def qy_map(self):
if self.qy_map_data is not None:
return self.qy_map_data
self._generate_qxyz_maps()
return self.qy_map_data
def qz_map(self):
if self.qz_map_data is not None:
return self.qz_map_data
self._generate_qxyz_maps()
return self.qz_map_data
def qr_map(self):
if self.qr_map_data is not None:
return self.qr_map_data
self._generate_qxyz_maps()
return self.qr_map_data
def _generate_qxyz_maps(self):
# Conversion factor for pixel coordinates
# (where sample-detector distance is set to d = 1)
c = (self.pixel_size_um/1e6)/self.distance_m
x = np.arange(self.width) - self.x0
y = np.arange(self.height) - self.y0
X, Y = np.meshgrid(x, y)
R = np.sqrt(X**2 + Y**2)
#twotheta = np.arctan(self.r_map()*c) # radians
theta_f = np.arctan2( X*c, 1 ) # radians
#alpha_f_prime = np.arctan2( Y*c, 1 ) # radians
alpha_f = np.arctan2( Y*c*np.cos(theta_f), 1 ) # radians
self.qx_map_data = self.get_k()*np.sin(theta_f)*np.cos(alpha_f)
self.qy_map_data = self.get_k()*( np.cos(theta_f)*np.cos(alpha_f) - 1 ) # TODO: Check sign
self.qz_map_data = -1.0*self.get_k()*np.sin(alpha_f)
self.qr_map_data = np.sign(self.qx_map_data)*np.sqrt(np.square(self.qx_map_data) + np.square(self.qy_map_data))
# End class Calibration(object)
########################################
# CalibrationGonio
################################################################################
class CalibrationGonio(Calibration):
"""
The geometric claculations used here are described:
http://gisaxs.com/index.php/Geometry:WAXS_3D
"""
# Experimental parameters
########################################
def set_angles(self, det_phi_g=0., det_theta_g=0.,
sam_phi=0, sam_chi=0, sam_theta=0,
offset_x = 0, offset_y =0, offset_z=0):
'''
YG. Add sample rotation angles that convert qmap from lab frame to sample frame
All the angles are given in degrees
sam_phi, rotate along lab-frame x, CHX phi
sam_chi, rotate along lab-frame z, CHX chi
sam_theta, rotate along lab-frame y, CHX theta
YG add offset corrections at Sep 21, 2017
det_phi_g, rotate along y-axis, delta at CHX
det_theta_g, away from z-plane, gamma at CHX
For SMI, because only rotate along y-axis, (det_theta_g=0.), only care about
offset_x, offset_z '''
#print('Set angles here')
self.det_phi_g = det_phi_g
self.det_theta_g = det_theta_g
self.offset_x = offset_x
self.offset_y = offset_y
self.offset_z = offset_z
self.sam_phi=sam_phi
self.sam_chi= sam_chi
self.sam_theta=sam_theta
def rotation_matix(self, sam_phi, sam_theta, sam_chi, degrees=True):
'''
sam_phi, rotate along lab-frame x, CHX phi
sam_chi, rotate along lab-frame z, CHX chi
sam_theta, rotate along lab-frame y, CHX theta
'''
if degrees:
sam_phi, sam_chi, sam_theta = np.radians(sam_phi), np.radians(sam_chi), np.radians(sam_theta)
Rx = np.array( [ [1, 0, 0 ],
[0, np.cos( sam_phi ), np.sin( sam_phi ) ],
[0, -np.sin( sam_phi ), np.cos( sam_phi ) ]
]
)
Rz = np.array( [ [ np.cos( sam_chi ), np.sin( sam_chi ), 0 ],
[-np.sin( sam_chi ), np.cos( sam_chi ), 0 ],
[0, 0, 1 ]
]
)
Ry = np.array( [ [np.cos( sam_theta ), 0, np.sin( sam_theta ) ],
[0, 1, 0 ],
[-np.sin( sam_theta ), 0, np.cos( sam_theta ) ]
]
)
Rxy = np.dot(Rx,Ry)
return np.dot(Rxy,Rz)
def _generate_qxyz_map_SF_from_Lab(self,qx,qy,qz,
sam_phi, sam_theta, sam_chi,
degrees=True):
'''
Convert qmap from Lab frame to sample frame
'''
self.Rot = self.rotation_matix( sam_phi, sam_theta, sam_chi, degrees=degrees )
qsx, qsy, qsz = np.dot(self.Rot, [ np.ravel(qx), np.ravel(qy), np.ravel(qz)] )
return qsx.reshape( qx.shape), qsy.reshape( qy.shape),qsz.reshape( qz.shape)
def _generate_qxyz_maps_samFrame(self, degrees=True):
"""
Get lab frame qmap
"""
self._generate_qxyz_maps()
self.qx_map_lab_data,self.qy_map_lab_data,self.qz_map_lab_data= self._generate_qxyz_map_SF_from_Lab(
self.qx_map_data,self.qy_map_data,self.qz_map_data,
self.sam_phi, self.sam_theta, self.sam_chi,
degrees=degrees )
self.qr_map_lab_data = np.sqrt(np.square(self.qx_map_lab_data) + np.square(self.qy_map_lab_data))
self.q_map_lab_data = np.sqrt(np.square(self.qx_map_lab_data) +
np.square(self.qy_map_lab_data) +
np.square(self.qz_map_lab_data)
)
def get_ratioDw(self):
width_mm = self.width*self.pixel_size_um/1000.
return self.distance_m/(width_mm/1000.)
# Maps
########################################
def q_map(self):
if self.q_map_data is None:
self._generate_qxyz_maps()
return self.q_map_data
def angle_map(self):
if self.angle_map_data is not None:
self._generate_qxyz_maps()
return self.angle_map_data
def _generate_qxyz_maps_no_offest(self):
"""
The geometric claculations used here are described:
http://gisaxs.com/index.php/Geometry:WAXS_3D
"""
d = self.distance_m
pix_size = self.pixel_size_um/1e6
phi_g = np.radians(self.det_phi_g)
theta_g =
|
np.radians(self.det_theta_g)
|
numpy.radians
|
# -*- coding: UTF-8 -*-
import logging as log
from OpenGL.GL import *
import numpy as np
class Camera(object):
def __init__(self):
self.__T_world_view = np.eye(4, dtype=np.float32)
self.__T_view_world = np.eye(4, dtype=np.float32)
self.__T_view_proj = np.eye(4, dtype=np.float32)
self.__T_proj_view = np.eye(4, dtype=np.float32)
self.__T_proj_world = np.eye(4, dtype=np.float32)
self.__viewport = (0.0, 0.0, 1.0, 1.0)
self.__relative_viewport = True
self._w = 0
self._h = 0
self.dirty = False
def lookAt(self, pos, target, up):
pos = np.array(pos,dtype=np.float32); target = np.array(target, dtype=np.float32); up = np.array(up, dtype=np.float32)
z = pos - target; z *= (1.0 / np.linalg.norm(z))
x = np.cross(up, z); x *= (1.0/np.linalg.norm(x))
y = np.cross(z,x)
rot = np.vstack((x,y,z))
self.__T_view_world[:3,:3] = rot
self.__T_view_world[:3,3] = -np.dot(rot,pos)
self.__T_world_view[:3,:3] = rot.transpose()
self.__T_world_view[:3,3] = pos
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
self.dirty = True
def from_radius_angles(self, radius, theta, phi):
x = radius * np.sin(phi) * np.cos(theta)
y = radius * np.sin(phi) * np.sin(theta)
z = radius * np.cos(phi)
pos = np.array((x,y,z),dtype=np.float32); target = np.array( (0,0,0), dtype=np.float32);
_z = pos - target; _z *= (1.0 / np.linalg.norm(_z))
up = (0,0,1)
if np.linalg.norm( np.cross(up, _z) ) == 0.0:
up = (np.cos(theta),np.sin(theta),0)
self.lookAt((x,y,z),(0,0,0),up)
def setT_world_view(self, T_world_view):
self.__T_world_view[:] = T_world_view
self.__T_view_world[:] = np.linalg.inv(T_world_view)
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
self.dirty = True
def setT_view_proj(self, T_view_proj):
self.__T_view_proj[:] = T_view_proj
self.__T_proj_view[:] = np.linalg.inv(T_view_proj)
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
self.dirty = True
def projection(self, fov, aspect, near, far):
diff = near - far
A = np.tan(fov/2.0)
self.__T_proj_view[:] = np.array( [ [A/aspect,0,0,0],
[0,A,0,0],
[0,0,(far+near)/diff,2*far*near/diff],
[0,0,-1,0] ], dtype=np.float32)
self.__T_view_proj[:] = np.linalg.inv(self.__T_proj_view)
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
self.dirty = True
def ortho(self, left, right, bottom, top, nearVal, farVal):
self.__T_proj_view[:] = Camera.__glOrtho__(left, right, bottom, top, nearVal, farVal)
self.__T_view_proj[:] = np.linalg.inv(self.__T_proj_view)
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
self.dirty = True
def realCameraIntrinsic(self, fx, fy, x0, y0, W, H, near, far):
I = np.array([ [fx, 0.0, x0],
[0., fy, y0],
[0., 0, 1.0]])
self.setIntrinsic(I, W, H, near, far)
def realCamera(self, W, H, K, R, t, near, far, scale=1.0, originIsInTopLeft=True):
self.setIntrinsic(K, W, H, near, far, scale, originIsInTopLeft)
self.__T_world_view[:3,:3] = R.transpose()
self.__T_world_view[:3,3] = -np.dot(R.transpose(), t.squeeze())
z_flip = np.eye(4, dtype=np.float32)
z_flip[2, 2] = -1
self.__T_world_view[:] = self.__T_world_view.dot(z_flip)
self.__T_view_world[:] = np.linalg.pinv(self.__T_world_view)
# self.__T_view_world[:3,:3] = self.__T_world_view[:3,:3].transpose()
# self.__T_view_world[:3,3] = -np.dot(self.__T_world_view[:3,:3], self.__T_view_world[:3,3].squeeze())
# self.__T_world_view
self.__T_proj_world[:] = np.dot(self.__T_proj_view, self.__T_view_world)
def real_camera(self, W, H, K, R, t, near, far, scale=1.0, originIsInTopLeft=False, r=0.0, c=0.0):
self.setIntrinsic(K, W, H, near, far, scale, originIsInTopLeft)
Kinv = np.linalg.pinv(K)
p = np.array([r,c,1], dtype=np.float64)
d = np.dot(Kinv,p)
eps = 1e-7
alpha = np.arctan(d[1]/(np.sqrt(d[2]**2 + d[0]**2)+eps))
beta = np.arctan(d[0]/(d[2]+eps))
Rx_alpha = np.array([[1, 0 , 0 , 0 ],
[ 0, np.cos(alpha), -
|
np.sin(alpha)
|
numpy.sin
|
import numpy as np
import numpy.linalg as la
from trainer import Trainer
class Gd(Trainer):
"""
Gradient descent with constant learning rate.
Arguments:
lr (float): an estimate of the inverse smoothness constant
"""
def __init__(self, lr, *args, **kwargs):
super(Gd, self).__init__(*args, **kwargs)
self.lr = lr
def step(self):
return self.w - self.lr * self.grad
def init_run(self, *args, **kwargs):
super(Gd, self).init_run(*args, **kwargs)
class Nesterov(Trainer):
"""
Nesterov's accelerated gradient descent with constant learning rate.
Arguments:
lr (float): an estimate of the inverse smoothness constant
strongly_convex (boolean, optional): if true, uses the variant
for strongly convex functions, which requires mu>0 (default: False)
"""
def __init__(self, lr, strongly_convex=False, mu=0, *args, **kwargs):
super(Nesterov, self).__init__(*args, **kwargs)
self.lr = lr
if mu < 0:
raise ValueError("Invalid mu: {}".format(mu))
if strongly_convex and mu == 0:
raise ValueError("""Mu must be larger than 0 for strongly_convex=True,
invalid value: {}""".format(mu))
if strongly_convex:
self.mu = mu
kappa = (1/self.lr)/self.mu
self.momentum = (np.sqrt(kappa)-1) / (np.sqrt(kappa)+1)
self.strongly_convex = strongly_convex
def step(self):
if not self.strongly_convex:
alpha_new = 0.5 * (1 + np.sqrt(1 + 4 * self.alpha ** 2))
self.momentum = (self.alpha - 1) / alpha_new
self.alpha = alpha_new
self.w_nesterov_old = self.w_nesterov.copy()
self.w_nesterov = self.w - self.lr * self.grad
return self.w_nesterov + self.momentum * (self.w_nesterov - self.w_nesterov_old)
def init_run(self, *args, **kwargs):
super(Nesterov, self).init_run(*args, **kwargs)
self.w_nesterov = self.w.copy()
self.alpha = 1.
class Adgd(Trainer):
"""
Adaptive gradient descent based on the local smoothness constant
Arguments:
eps (float, optional): an estimate of 1 / L^2, where L is the global smoothness constant (default: 0)
"""
def __init__(self, eps=0.0, lr0=None, *args, **kwargs):
if not 0.0 <= eps:
raise ValueError("Invalid eps: {}".format(eps))
super(Adgd, self).__init__(*args, **kwargs)
self.eps = eps
self.lr0 = lr0
def estimate_stepsize(self):
L = la.norm(self.grad - self.grad_old) / la.norm(self.w - self.w_old)
if np.isinf(self.theta):
lr_new = 0.5 / L
else:
lr_new = min(np.sqrt(1 + self.theta) * self.lr, self.eps / self.lr + 0.5 / L)
self.theta = lr_new / self.lr
self.lr = lr_new
def step(self):
self.w_old = self.w.copy()
self.grad_old = self.grad.copy()
return self.w - self.lr * self.grad
def init_run(self, *args, **kwargs):
super(Adgd, self).init_run(*args, **kwargs)
self.theta = np.inf
grad = self.grad_func(self.w)
if self.lr0 is None:
self.lr0 = 1e-10
self.lr = self.lr0
self.lrs = [self.lr]
self.w_old = self.w.copy()
self.grad_old = grad
self.w -= self.lr * grad
self.save_checkpoint()
def update_logs(self):
super(Adgd, self).update_logs()
self.lrs.append(self.lr)
class AdgdAccel(Trainer):
"""
Adaptive gradient descent with heuristic Nesterov's acceleration
Targeted at locally strongly convex functions, so by default uses
estimation with min(sqrt(1 + theta_{k-1} / 2) * la_{k-1}, 0.5 / L_k)
Arguments:
a_lr (float, optional): increase parameter for learning rate (default: 0.5)
a_mu (float, optional): increase parameter for strong convexity (default: 0.5)
b_lr (float, optional): local smoothness scaling (default: 0.5)
b_mu (float, optional): local strong convexity scaling (default: 0.5)
"""
def __init__(self, a_lr=0.5, a_mu=0.5, b_lr=0.5, b_mu=0.5, *args, **kwargs):
super(AdgdAccel, self).__init__(*args, **kwargs)
self.a_lr = a_lr
self.a_mu = a_mu
self.b_lr = b_lr
self.b_mu = b_mu
def estimate_stepsize(self):
L = la.norm(self.grad - self.grad_old) / la.norm(self.w - self.w_old)
lr_new = min(np.sqrt(1 + self.a_lr * self.theta_lr) * self.lr, self.b_lr / L)
self.theta_lr = lr_new / self.lr
self.lr = lr_new
mu_new = min(np.sqrt(1 + self.a_mu * self.theta_mu) * self.mu, self.b_lr * L)
self.theta_mu = mu_new / self.mu
self.mu = mu_new
def step(self):
self.w_old = self.w.copy()
self.grad_old = self.grad.copy()
momentum = (np.sqrt(1 / self.lr) - np.sqrt(self.mu)) / (np.sqrt(1 / self.lr) +
|
np.sqrt(self.mu)
|
numpy.sqrt
|
import math
import numpy as np
import numpy.matlib as matlib
import scipy.interpolate as interpolate
import marvin
import marvin.tools.rss as rss
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from astropy.io import fits
from pydl.pydlutils.sdss import sdss_flagval
from scipy import sparse
import mnsa.kernel
import os
# Set Marvin configuration so it gets everything local
marvin.config.setRelease('DR15')
marvin.config.mode = 'local'
marvin.config.download = True
# Rough grid length in arcsec to use for each IFU size
gridsize = {7: 12, 19: 17, 37: 22, 61: 27, 91: 32, 127: 36}
class Reconstruct(object):
"""Base class for reconstruction of cubes from RSS files
Parameters:
----------
plate : int, np.int32
plate number
ifu : int, np.int32
IFU number
release : str
data release (default 'DR15')
waveindex : int, np.int32
indices of wavelengths to reconstruct, or None to reconstruct all
(default None)
pixelscale : float, np.float32
pixel scale of output grid in arcsec (default 0.75)
dkernel : float, np.float32
underlying resolution of kernel in arcsec (default 0.05)
verbose : bool
If True, be verbose
Attributes:
----------
verbose : bool
If True, be verbose
plate : int, np.int32
plate number
ifu : int, np.int32
IFU number
nfiber : int
number of fibers
release : str
data release
rss : RSS object
Marvin RSS output
waveindex : int, np.int32
indices of wavelengths to reconstruct
dkernel : np.float32
underlying resolution of kernel in arcsec
nExp : int, np.int32
number of exposures
xpos : 2D ndarray of np.float32
X positions of each fiber at each wavelength [nExp * nfiber, nWave]
ypos : 2D ndarray of np.float32
Y positions of each fiber at each wavelength [nExp * nfiber, nWave]
wave : ndarray of np.float32
wavelength grid
nWave : int, np.int32
size of wavelength grid
obsinfo : ndarray
observing information object
seeing0 : ndarray of np.float32
FWHM of seeing at guider wavelength (5400 Angstroms) [nExp]
seeing : 2D ndarray of np.float32
FWHM of seeing at each wavelength [nWave, nExp]
bbwave : ndarray of np.float32
[4] g, r, i, z band reference wavelengths
bbindex : ndarray of np.float32
[4] index of wave closest to g, r, i, z reference wavelengths
pixelscale : np.float32
pixel scale for output grid (arcsec)
dkernel : np.float32
pixel scale for kernel (arcsec)
kernel : Kernel class object
object to return radial kernel function
conversion : np.float32
factor to multiply per fiber units by to get per pixel units
nside : np.int32
number of pixels on a side for the image grid
nimage : np.int32
number of pixels total for the image grid
length : np.float32
length of image grid in arcsec (based on outer pixel edges)
x2i : 2D ndarray of np.float32
array of X positions on grid
y2i : 2D ndarray of np.float32
array of Y positions on grid
xi : ndarray of np.float32
1D array of X positions
yi : ndarray of np.float32
1D array of Y positions
xmin : np.float32
outer edge of lowest X pixel
ymin : np.float32
outer edge of lowest Y pixel
xmax : np.float32
outer edge of highest X pixel
ymax : np.float32
outer edge of highest Y pixel
"""
def __init__(self, plate=None, ifu=None, release='DR15', waveindex=None,
pixelscale=0.75, dkernel=0.05, verbose=True):
self.plate = plate
self.ifu = ifu
self.plateifu = "{plate}-{ifu}".format(plate=self.plate, ifu=self.ifu)
self.release = release
self.nfiber = int(self.ifu) // 100 # depends on MaNGA naming
self.rss = None
self.waveindex = waveindex
self.pixelscale = pixelscale
self.verbose = verbose
self.dkernel = dkernel
if self.waveindex is not None:
self.waveindex = self._arrayify_int32(self.waveindex)
self.fiberradius = 1.
self.GPSF = None
self.RPSF = None
self.IPSF = None
self.ZPSF = None
self.GFWHM = None
self.RFWHM = None
self.IFWHM = None
self.ZFWHM = None
return
def _arrayify_int32(self, quantity):
"""Cast quantity as ndarray of numpy.int32"""
try:
length = len(quantity)
except TypeError:
length = 1
return np.zeros(length, dtype=np.int32) + quantity
def run(self):
"""Perform all steps to create a cube"""
if(self.verbose):
print("Importing RSS fluxes.")
self.set_rss()
if(self.verbose):
print("Creating output grid.")
self.set_image_grid()
if(self.verbose):
print("Setting up kernel.")
self.set_kernel()
if(self.verbose):
print("Constructing data flux and ivar arrays to use.")
self.set_flux_rss()
if(self.verbose):
print("Constructing PSF model flux array.")
self.set_flux_psf()
if(self.verbose):
print("Calculate cube.")
self.calculate_cube()
if (len(self.wave) == self.rss.data['FLUX'].data.shape[1]):
self.set_band()
return
def set_rss(self):
"""Acquire the RSS data and set related attributes
Notes:
-----
Uses Marvin tools to get RSS data.
Sets attributes:
.rss - Marvin RSS object
.nExp - Number of exposures
.xpos - X positions of each fiber [nExp * nfiber, nWave]
.ypos - Y positions of each fiber [nExp * nfiber, nWave]
.wave - Selected wavelength grid [nWave]
.nWave - Size of wavelength grid
.obsinfo - Observing information object
.seeing0 - FWHM of seeing at guider wavelength (5400 Angstroms) [nExp]
.seeing - FWHM at each wavelength [nWave, nExp]
.bbwave - g, r, i, z band reference wavelengths
.bbindex - index of wave closest to g, r, i, z reference wavelengths
"""
self.rss = rss.RSS(plateifu=self.plateifu, release=self.release)
self.nExp = self.rss.data[0].header['NEXP']
self.xpos = self.rss.data['XPOS'].data
self.ypos = self.rss.data['YPOS'].data
self.wave = self.rss.data['WAVE'].data
self.nWave = self.rss.data['FLUX'].shape[1]
# Find wavelengths near griz
gpivot = 4702.50
rpivot = 6176.58
ipivot = 7496.12
zpivot = 8946.71
self.bbwave = np.array([gpivot, rpivot, ipivot, zpivot],
dtype=np.float32)
self.bbindex = np.zeros(len(self.bbwave), dtype=np.int32)
for j, wave in enumerate(self.bbwave):
self.bbindex[j] = min(range(len(self.wave)),
key=lambda i: abs(self.wave[i] - wave))
# Use waveindex if that was set
if (self.waveindex is not None):
self.nWave = len(self.waveindex)
self.wave = self.wave[self.waveindex]
self.xpos = self.xpos[:, self.waveindex]
self.ypos = self.ypos[:, self.waveindex]
# Set FWHM values as a function of wavelength
self.obsinfo = self.rss.data['OBSINFO'].data
self.seeing0 = (self.obsinfo.field('SEEING') *
self.obsinfo.field('PSFFAC'))
lambda0 = 5400.
self.seeing = [[self.seeing0[i] * math.pow(lambda0 / self.wave[j], 1. / 5.)
for i in range(self.seeing0.shape[0])]
for j in range(self.nWave)]
self.seeing = np.array(self.seeing)
return
def _create_grid(self, d=None):
"""Create a grid (used for image and kernel)"""
rough_length = gridsize[self.nfiber]
nside = ((np.int32(np.ceil(rough_length / d)) // 2) * 2 + 1)
length = np.float32(nside * d)
xmin = -0.5 * length
xmax = 0.5 * length
ymin = -0.5 * length
ymax = 0.5 * length
xi = np.linspace(xmin + 0.5 * d, xmax - 0.5 * d, nside,
dtype=np.float32)
yi = np.linspace(ymin + 0.5 * d, ymax - 0.5 * d, nside,
dtype=np.float32)
x2i, y2i = np.meshgrid(xi, yi)
return (nside, length, x2i, y2i, xi, yi)
def set_image_grid(self):
"""Create the image output grid
Notes:
-----
Sets attributes:
.conversion - factor to multiply per fiber units by to get
per pixel units
.nside - number of pixels on a side for the image grid
.nimage - number of pixels total in image
.length - length of image grid in arcsec (based on outer pixel edges)
.x2i - 2-D array of X positions
.y2i - 2-D array of Y positions
.xi - 1-D array of X positions
.yi - 1-D array of Y positions
.xmin - outer edge of lowest X pixel
.ymin - outer edge of lowest Y pixel
.xmax - outer edge of highest X pixel
.ymax - outer edge of highest Y pixel
"""
self.conversion = self.pixelscale ** 2 / np.pi
(self.nside, self.length, self.x2i, self.y2i, self.xi, self.yi) = self._create_grid(self.pixelscale)
self.nimage = self.nside ** 2
self.xmin = -0.5 * self.length
self.xmax = 0.5 * self.length
self.ymin = -0.5 * self.length
self.ymax = 0.5 * self.length
return
def set_kernel(self):
"""Set the kernel for each wavelength and exposure
Notes:
-----
Sets attributes:
.kernel - Kernel class object
"""
rough_length = gridsize[self.nfiber]
self.kernel = mnsa.kernel.Kernel(rough_length=rough_length,
dkernel=self.dkernel,
nseeing=100)
return
def set_flux_psf(self, xcen=0., ycen=0., alpha=1, noise=None):
"""Set the fiber fluxes to a PSF corresponding to the kernel
Parameters:
----------
xcen : float, np.float32
X center of PSF desired
ycen : float, np.float32
Y center of PSF desired
alpha : float, np.float32
scale to apply to PSF size (default 1)
noise : float, np.float32
noise scaling log factor
Notes:
-----
Requires set_kernel() to have already been called to set the
flux for each exposure and wavelength.
Only uses wavelengths specified by the object's waveindex
attribute or all the wavelengths if waveindex not given.
It adds noise by multiplying the fluxes by 10^noise, sampling
from a Poisson distribution using that as a mean, and then
dividing by 10^noise.
Sets attributes:
.flux_psf - flux in each fiber for simulation [nExp * nfiber, nWave]
.flux_psf_ivar - inverse variance of flux in each fiber for simulation
[nExp * nfiber, nWave]
.flux_psf0 - noise free version of flux_psf [nExp * nFiber, nWave]
"""
if (xcen is None):
xcen = 0.
if (ycen is None):
ycen = 0.
self.flux_psf = np.zeros([self.xpos.shape[0], self.nWave])
self.flux_psf_ivar = np.ones([self.xpos.shape[0], self.nWave],
dtype=np.float32)
self.flux_psf_mask = np.zeros([self.xpos.shape[0], self.nWave])
for iWave in np.arange(self.nWave):
for iExp in np.arange(self.nExp):
xsample = self.xpos[iExp * self.nfiber:(iExp + 1) * self.nfiber, iWave]
ysample = self.ypos[iExp * self.nfiber:(iExp + 1) * self.nfiber, iWave]
dx = xsample - xcen
dy = ysample - ycen
rsample = np.sqrt(dx**2 + dy**2)
self.flux_psf[iExp * self.nfiber:(iExp + 1) * self.nfiber,
iWave] = self.kernel.radial(seeing=self.seeing[iWave, iExp] * alpha, radii=rsample) * np.pi * self.fiberradius**2
self.flux_psf0 = self.flux_psf.copy()
# Add noise if desired
if(noise is not None):
for i in range(len(self.flux_psf)):
self.flux_psf[i] = np.random.poisson(self.flux_psf[i] * 10 ** noise) / 10. ** noise
return
def set_flux_rss(self):
"""Set the flux to the RSS input values
Notes:
-----
Only uses wavelengths specified by the object's waveindex
attribute or all the wavelengths if waveindex not given.
Sets attributes:
.flux - flux in each fiber [nExp * nfiber, nWave]
.flux_ivar - inverse variance of flux in each fiber
[nExp * nfiber, nWave]
.flux_mask - mask of each fiber [nExp * nfiber, nWave]
.flux_disp - LSF dispersion [nExp * nfiber, nWave]
.flux_predisp - LSF pre-dispersion [nExp * nfiber, nWave]
.lsf_exist - True if flux_disp, flux_predisp available, False if not
"""
self.flux = self.rss.data['FLUX'].data
self.flux_ivar = self.rss.data['IVAR'].data
self.flux_mask = self.rss.data['MASK'].data
if (self.waveindex is not None):
self.flux = self.flux[:, self.waveindex]
self.flux_ivar = self.flux_ivar[:, self.waveindex]
self.flux_mask = self.flux_mask[:, self.waveindex]
try:
self.flux_disp = self.rss.data['DISP'].data
self.flux_predisp = self.rss.data['PREDISP'].data
if (self.waveindex is not None):
self.flux_disp = self.flux_disp[:, self.waveindex]
self.flux_predisp = self.flux_predisp[:, self.waveindex]
self.lsf_exist = True
except:
self.lsf_exist = False
return
def create_weights(self, xsample=None, ysample=None, ivar=None,
waveindex=None):
"""Calculate weights based on nearest fiber
Parameters:
----------
xsample : ndarray of np.float32
X position of samples
ysample : ndarray of np.float32
Y position of samples
ivar : ndarray of np.float32
inverse variance of samples
Returns:
-------
wwT : ndarray of np.float32
normalized weights [nside * nside, nExp * nfiber]
Notes:
-----
This version just sets the weight to unity for the nearest fiber.
"""
iok = np.where(ivar > 0.)
w = np.zeros([len(xsample), self.nside * self.nside], dtype=np.float32)
for i in np.arange(self.nside):
for j in np.arange(self.nside):
dx = xsample - self.x2i[i, j]
dy = ysample - self.y2i[i, j]
r = np.sqrt(dx ** 2 + dy ** 2)
iclosest = r[iok].argmin()
w[iclosest, i * self.nside + j] = 1.
wwT = self.normalize_weights(w)
return (wwT)
def normalize_weights(self, w):
"""Normalize weights
Parameters:
----------
w : ndarray of np.float32
weights [nExp * nfiber, nside * nside]
Returns:
-------
wwT : ndarray of np.float32
normalized weights [nside * nside, nExp * nfiber]
Notes:
-----
Normalizes the contributions of each pixel over all fiber-exposures.
If the pixel has no contributions, sets weight to zero.
"""
wsum = w.sum(axis=0)
ww = np.zeros(w.shape, dtype=np.float32)
for i in np.arange(self.nimage):
if wsum[i] == 0:
ww[:, i] = 0
else:
ww[:, i] = w[:, i] / wsum[i]
wwT = ww.T
return (wwT)
def calculate_cube(self):
"""Calculate cube and cube inverse variance
Notes:
------
Sets attributes:
.cube : ndarray of np.float32
[nWave, nside, nside] reconstruction result
.cube_psf : ndarray of np.float32
[nWave, nside, nside] reconstruction result for PSF
.cube_ivar : ndarray of np.float32
[nWave, nside, nside] inverse variance of reconstruction result
.cube_corr : list of correlation matrix in sparse array format.
If waveindex is None, return the correlation matrix at SDSS
g,r,i,z broadband effective wavelengths; else, return the
correlation matrix for first four wavelength indexes
.cube_mask : ndarray of np.int32
[nWave, nside, nside] mask of reconstruction pixels
"""
if self.waveindex is None:
waveindex = np.arange(self.nWave)
nWave = len(waveindex)
else:
nWave = self.nWave
waveindex = self.waveindex
self.cube = np.zeros([nWave, self.nside, self.nside],
dtype=np.float32)
self.cube_psf = np.zeros([nWave, self.nside, self.nside],
dtype=np.float32)
self.cube_ivar = np.zeros([nWave, self.nside, self.nside],
dtype=np.float32)
i = 0
self.cube_corr = []
self.slice_fail = []
self.cube_mask = np.zeros([nWave, self.nside, self.nside],
dtype=np.int32)
if(self.lsf_exist):
self.disp = np.zeros([nWave, self.nside, self.nside],
dtype=np.float32)
self.predisp = np.zeros([nWave, self.nside, self.nside],
dtype=np.float32)
print(nWave)
for iWave in np.arange(nWave):
print(iWave, flush=True)
try:
w0, weights = self.create_weights(xsample=self.xpos[0:self.nExp * self.nfiber, iWave],
ysample=self.ypos[0:self.nExp * self.nfiber, iWave],
ivar=self.flux_ivar[:, iWave],
waveindex=iWave)
except np.linalg.LinAlgError:
print('failing to converge', iWave)
self.slice_fail.append(iWave)
self.w0 = w0
fcube = ((weights.dot(self.flux[:, iWave])).reshape(self.nside,
self.nside) *
self.conversion)
self.cube[i, :, :] = fcube
fcube = ((weights.dot(self.flux_psf[:, iWave])).reshape(self.nside,
self.nside) *
self.conversion)
self.cube_psf[i, :, :] = fcube
# covariance
covar = (self.covar(iWave, self.flux_ivar, weights) *
self.conversion**2)
var = np.diagonal(covar)
igt0 = np.where(var > 0)[0]
ivar = np.zeros(self.nside * self.nside, dtype=np.float32)
ivar[igt0] = 1. / var[igt0]
self.cube_ivar[i, :, :] = ivar.reshape([self.nside,
self.nside])
# correlation matrix only available for up to four wavelength slices
if self.waveindex is None:
if iWave in self.bbindex:
corr = covar / np.outer(np.sqrt(var), np.sqrt(var))
corr[np.where(covar == 0)] = 0
self.cube_corr.append(sparse.csr_matrix(corr))
elif i < 4:
corr = covar / np.outer(np.sqrt(var), np.sqrt(var))
corr[np.where(covar == 0)] = 0
self.cube_corr.append(sparse.csr_matrix(corr))
# mask
self.cube_mask[i, :, :] = self.mask(iWave, self.flux_ivar,
self.flux_mask, w0,
weights).reshape([self.nside,
self.nside])
i = i + 1
if(self.lsf_exist):
self.disp[iWave] = (
(np.abs(weights) / (matlib.repmat(np.abs(weights).sum(axis=1), weights.shape[-1], 1).T)).dot(
self.flux_disp[:, iWave])).reshape(self.nside, self.nside)
self.predisp[iWave] = (
(np.abs(weights) / (matlib.repmat(np.abs(weights).sum(axis=1), weights.shape[-1], 1).T)).dot(
self.flux_predisp[:, iWave])).reshape(self.nside, self.nside)
if(self.lsf_exist):
indices = np.isnan(self.disp)
self.disp[indices] = 0
indices = np.isnan(self.predisp)
self.predisp[indices] = 0
return
def covar(self, iWave=None, flux_ivar=None, weights=None):
"""Return cube covariance matrix for a wavelength
Parameters:
----------
iWave : int, np.int32
index of wavelength to calculate for
flux_ivar: ndarray of np.float32
flux inverse variance of each fiber [nExp * nfiber]
weights: ndarray of np.float32
[nside * nside, nExp * nfiber]
weights matrix between pixels and fibers
Returns:
-------
covar : ndarray of np.float32
[nside * nside, nside * nside] covariance matrix
"""
iok = np.where(flux_ivar[:, iWave] > 0)[0]
wwT = (weights[:, :])[:, iok]
covar = wwT.dot(np.diag(1 / (flux_ivar[iok, iWave]))).dot(wwT.T)
return (covar)
def mask(self, iWave=None, flux_ivar=None, flux_mask=None, w0=None,
weights=None):
"""Return mask matrix for a typical wavelength given the weights matrix
Parameters:
----------
iWave : int, np.int32
index of wavelength to calculate for
flux_ivar: ndarray of np.float32
flux inverse variance of each fiber [nExp * nfiber]
flux_mask : ndarray of np.int32
mask of each fiber [nExp * nfiber]
w0: ndarray of np.float32
[nside * nside, nExp * nfiber]
unnormalized weights without bad fibers
weights: ndarray of np.float32
[nside * nside, nExp * nfiber]
weights matrix between pixels and fibers
Returns:
-------
maskimg : ndarray of np.int32
[nside * nside] bitmask of pixels
Notes:
-----
Uses MANGA_DRP3PIXMASK bit values:
DEADFIBER - set if a dead fiber would have contributed to pixel
LOWCOV - set if variance would be higher than twice median
were all fiber-exposures equally noisy
NOCOV - set if no coverage by any fiber
DONOTUSE - set if LOWCOV and/or NOCOV set
"""
flagdeadfiber = sdss_flagval('MANGA_DRP3PIXMASK', 'DEADFIBER')
flaglocov = sdss_flagval('MANGA_DRP3PIXMASK', 'LOWCOV')
flagnocov = sdss_flagval('MANGA_DRP3PIXMASK', 'NOCOV')
flagnouse = sdss_flagval('MANGA_DRP3PIXMASK', 'DONOTUSE')
(mask_dead, mask_lowcov, mask_nocov,
mask_dnu) = [np.zeros(self.nside**2) for i in range(4)]
index_nocov = np.where(w0.sum(axis=0) == 0)[0]
mask_nocov[index_nocov] = flagnocov
mask_dnu[index_nocov] = flagnouse
cov = np.diag(weights.dot(weights.T))
cov = cov / np.median(cov[np.where(cov)])
indices = np.logical_or(cov == 0, cov > 2)
mask_lowcov[indices] = flaglocov
mask_dnu[indices] = flagnouse
index_deadfibers = np.where(np.bitwise_and(np.uint32(flagdeadfiber),
np.uint32(flux_mask[:,
iWave])))
mask_dead = (((w0[index_deadfibers] != 0).sum(axis=0) != 0) *
flagdeadfiber)
maskimg = np.uint32(mask_nocov)
maskimg = np.bitwise_or(maskimg, np.uint32(mask_lowcov))
maskimg = np.bitwise_or(maskimg, np.uint32(mask_dead))
maskimg = np.bitwise_or(maskimg, np.uint32(mask_dnu))
return maskimg
def plot_slice(self, iWave=0, keyword=None, vmax=None, vmin=0.):
"""Plot a slice of the cube
Parameters:
----------
iWave : default=0,int, np.int32
index of wavelength to plot, order in waveindex, not the value.
keyword : 'simulation' or else
if keyword == 'simulation': plot reconstruction from simulated flux
else plot reconstruction from real flux
vmax, vmin: float
maximum and minimum of plot
default vmax is set to maximum of flux
default vmin is set to 0.
"""
if keyword == 'simulation':
target = self.cube_psf[iWave, :, :]
else:
target = self.cube[iWave, :, :]
keyword = ''
if (vmax is None):
vmax = target.max() * 1.02
extent = (self.xmin, self.xmax, self.ymin, self.ymax)
plt.figure(figsize=(6.5, 5.))
font = {'family': 'sans-serif',
'size': 15}
plt.rc('font', **font)
plt.imshow(target, extent=extent, vmin=vmin,
vmax=vmax, cmap=cm.gray_r, origin='lower')
plt.xlabel('X (arcsec)')
plt.ylabel('Y (arcsec)')
plt.title('reconstruction of ' + keyword + ' slice')
plt.colorbar(label='flux')
def set_band(self):
"""Set average results for each band include FWHM estimate
Notes:
-----
Only uses the full range wavelengths will give the band average
Sets attributes:
.GPSF/RPSF/IPSF/ZPSF - ndarray of np.float32
[nside, nside] simulation image for each broadband
.GIMG/RIMG/IIMG/ZIMG - ndarray of np.float32
[nside, nside] real image for each broadband
.GFWHM/RFWHM/IFWHM/ZFWHM - float, np.float32
FWHM for each simulation image
"""
self.GPSF = self.PSFaverage('g', self.wave, self.cube_psf)
self.RPSF = self.PSFaverage('r', self.wave, self.cube_psf)
self.IPSF = self.PSFaverage('i', self.wave, self.cube_psf)
self.ZPSF = self.PSFaverage('z', self.wave, self.cube_psf)
self.GIMG = self.PSFaverage('g', self.wave, self.cube)
self.RIMG = self.PSFaverage('r', self.wave, self.cube)
self.IIMG = self.PSFaverage('i', self.wave, self.cube)
self.ZIMG = self.PSFaverage('z', self.wave, self.cube)
self.GFWHM = self.fit_fwhm(self.GPSF)
self.RFWHM = self.fit_fwhm(self.RPSF)
self.IFWHM = self.fit_fwhm(self.IPSF)
self.ZFWHM = self.fit_fwhm(self.ZPSF)
return
def fit_fwhm(self, image, xcen=None, ycen=None):
"""Fit FWHM to an image using radial kernel as a model
"""
seeings = self.kernel.seeing
nx = image.shape[0]
ny = image.shape[1]
if(xcen is None):
xcen = (np.float32(nx) * 0.5) - 0.5
if(ycen is None):
ycen = (np.float32(ny) * 0.5) - 0.5
xx = np.outer(np.arange(nx, dtype=np.float32) - xcen,
np.ones(ny, dtype=np.float32))
yy = np.outer(np.ones(nx, dtype=np.float32),
np.arange(ny, dtype=np.float32) - ycen)
rr = np.sqrt(xx**2 + yy**2) * self.pixelscale
chi2 = np.zeros(len(seeings), dtype=np.float32)
for i, seeing in enumerate(seeings):
model = self.kernel.radial(seeing=seeing, radii=rr.flatten())
model = model * self.pixelscale**2
A = ((model * image.flatten()).sum() /
(model ** 2).sum())
chi2[i] = ((A * model - image.flatten()) ** 2).sum()
ind = np.argmin(chi2)
seeing = seeings[ind]
fwhm = self.kernel.fwhm(seeing=seeing)
return(fwhm)
def PSFaverage(self, color=None, wave=None, PSF=None):
""" calculate FWHM for given image
Parameters:
----------
color : str, the color of band, can choose 'g'/'r'/'i'/'z'
wave : ndarray of np.float32
the wavelengths for the cube
PSF : ndarray of np.float32 [nside,nside,nWave]
the spectrum of cube
Returns:
-------
PSF_ave : ndarray of np.float32 [nside,nside]
the average of cube for given band
"""
filterfile = os.path.join(os.getenv('MNSA_DIR'),
'python', 'data', color + '_filter.dat')
band0 = np.loadtxt(filterfile)
band1 = np.arange(3400, band0[0, 0], 25)
band2 = np.arange(band0[-1, 0], 11000, 25)
weight1 = np.zeros(band1.shape)
weight2 = np.zeros(band2.shape)
band = np.concatenate((np.concatenate((band1, band0[:, 0]), axis=0),
band2), axis=0)
weight = np.concatenate((np.concatenate((weight1, band0[:, 1]), axis=0),
weight2), axis=0)
fun_band = interpolate.interp1d(band, weight)
band_value = fun_band(wave)
n = PSF.shape[1]
nWave = len(wave)
PSF_ave = (matlib.repmat(band_value, n**2, 1).T *
(PSF.reshape(nWave, n**2))).reshape(nWave, n, n).sum(axis=0) / band_value.sum()
return PSF_ave
def write(self, filename=None):
"""Write to a FITS file as MaNGA cube form
Parameters:
----------
filename: str
the name of fits file
"""
# Headers
card_GFWHM = fits.Card('GFWHM', self.GFWHM,
'Reconstructed FWHM in g-band (arcsec)')
card_RFWHM = fits.Card('RFWHM', self.RFWHM,
'Reconstructed FWHM in r-band (arcsec)')
card_IFWHM = fits.Card('IFWHM', self.IFWHM,
'Reconstructed FWHM in i-band (arcsec)')
card_ZFWHM = fits.Card('ZFWHM', self.ZFWHM,
'Reconstructed FWHM in z-band (arcsec)')
card_FWHM_list = [card_GFWHM, card_RFWHM, card_IFWHM, card_ZFWHM]
card_BSCALE = fits.Card('BSCALE', 1.00000, 'Intensity unit scaling')
card_BZERO = fits.Card('BZERO', 0.00000, 'Intensity zeropoint')
card_BSCALE_2 = fits.Card('BSCALE', 1.00000, 'Flux unit scaling')
card_BZERO_2 = fits.Card('BZERO', 0.00000, 'Flux zeropoint')
card_WCS_1 = fits.Card('CRPIX1', (self.nside + 1) / 2,
'Reference pixel (1-indexed)')
card_WCS_2 = fits.Card('CRPIX2', (self.nside + 1) / 2,
'Reference pixel (1-indexed)')
card_WCS_3 = fits.Card('CRVAL1',
self.rss.data['FLUX'].header['IFURA'])
card_WCS_4 = fits.Card('CRVAL2',
self.rss.data['FLUX'].header['IFUDEC'])
card_WCS_5 = fits.Card('CD1_1',
round(- self.length / self.nside / 3600, 9))
card_WCS_6 = fits.Card('CD2_2',
round(self.length / self.nside / 3600, 9))
card_WCS_7 = fits.Card('CTYPE1', 'RA---TAN')
card_WCS_8 = fits.Card('CTYPE2', 'DEC--TAN')
card_WCS_9 = fits.Card('CUNIT1', 'deg')
card_WCS_10 = fits.Card('CUNIT2', 'deg')
card_WCS_list = [card_WCS_1, card_WCS_2, card_WCS_3, card_WCS_4,
card_WCS_5, card_WCS_6, card_WCS_7, card_WCS_8,
card_WCS_9, card_WCS_10]
# Primary
hp = fits.PrimaryHDU(header=self.rss.data[0].header)
hp.header['BUNIT'] = ('1E-17 erg/s/cm^2/Ang/spaxel',
'Specific intensity (per spaxel)')
hp.header['MASKNAME'] = ('MANGA_DRP3PIXMASK',
'Bits in sdssMaskbits.par used by mask extension')
hp = self._insert_cardlist(hdu=hp, insertpoint='EBVGAL',
cardlist=card_FWHM_list, after=True)
if 'BSCALE' not in list(hp.header.keys()):
hp.header.insert('BUNIT', card_BSCALE, after=False)
if 'BZERO' not in list(hp.header.keys()):
hp.header.insert('BUNIT', card_BZERO, after=False)
# Flux
cubehdr = fits.ImageHDU(name='FLUX', data=self.cube,
header=self.rss.data['FLUX'].header)
cubehdr.header['BUNIT'] = ('1E-17 erg/s/cm^2/Ang/spaxel',
'Specific intensity (per spaxel)')
cubehdr.header['MASKNAME'] = ('MANGA_DRP3PIXMASK',
'Bits in sdssMaskbits.par used by mask extension')
cubehdr.header['HDUCLAS1'] = 'CUBE'
cubehdr.header.rename_keyword('CTYPE1', 'CTYPE3')
cubehdr.header.rename_keyword('CRPIX1', 'CRPIX3')
cubehdr.header.rename_keyword('CRVAL1', 'CRVAL3')
cubehdr.header.rename_keyword('CD1_1', 'CD3_3')
cubehdr.header.rename_keyword('CUNIT1', 'CUNIT3')
cubehdr = self._insert_cardlist(hdu=cubehdr, insertpoint='EBVGAL',
cardlist=card_FWHM_list, after=True)
cubehdr = self._insert_cardlist(hdu=cubehdr, insertpoint='CUNIT3',
cardlist=card_WCS_list, after=True)
if 'BSCALE' not in list(cubehdr.header.keys()):
cubehdr.header.insert('BUNIT', card_BSCALE, after=False)
if 'BZERO' not in list(cubehdr.header.keys()):
cubehdr.header.insert('BUNIT', card_BZERO, after=False)
try:
card_flux_fail = fits.Card('FAILSLIC', str(self.slice_fail),
'slices failed to converge')
cubehdr.header.insert('ZFWHM', card_flux_fail, after=True)
except:
pass
# IVAR
ivar_hdr = fits.ImageHDU(name='IVAR', data=self.cube_ivar,
header=self.rss.data['IVAR'].header)
ivar_hdr.header['HDUCLAS1'] = 'CUBE'
# MASK
mask_hdr = fits.ImageHDU(name='MASK', data=self.cube_mask,
header=self.rss.data['MASK'].header)
mask_hdr.header['HDUCLAS1'] = 'CUBE'
# DISP
disp_hdr = fits.ImageHDU(name='DISP', data=self.disp,
header=self.rss.data['DISP'].header)
# PREDISP
predisp_hdr = fits.ImageHDU(name='PREDISP', data=self.predisp,
header=self.rss.data['PREDISP'].header)
# IMG & PSF for each band
card_BUNIT = fits.Card('BUNIT', 'nanomaggies/pixel')
loc = ['IFURA', 'IFUDEC', 'OBJRA', 'OBJDEC']
card_loc_list = self._set_cardlist(cubehdr, loc) + [card_BSCALE_2,
card_BZERO_2,
card_BUNIT]
GIMG_hdr = fits.ImageHDU(name='GIMG', data=self.GIMG)
GIMG_hdr = self._insert_cardlist(hdu=GIMG_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_GFWHM],
after=False)
RIMG_hdr = fits.ImageHDU(name='RIMG', data=self.RIMG)
RIMG_hdr = self._insert_cardlist(hdu=RIMG_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_RFWHM],
after=False)
IIMG_hdr = fits.ImageHDU(name='IIMG', data=self.IIMG)
IIMG_hdr = self._insert_cardlist(hdu=IIMG_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_IFWHM],
after=False)
ZIMG_hdr = fits.ImageHDU(name='ZIMG', data=self.GIMG)
ZIMG_hdr = self._insert_cardlist(hdu=ZIMG_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_ZFWHM],
after=False)
GPSF_hdr = fits.ImageHDU(name='GPSF', data=self.GPSF)
GPSF_hdr = self._insert_cardlist(hdu=GPSF_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_GFWHM],
after=False)
RPSF_hdr = fits.ImageHDU(name='RPSF', data=self.RPSF)
RPSF_hdr = self._insert_cardlist(hdu=RPSF_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_RFWHM],
after=False)
IPSF_hdr = fits.ImageHDU(name='IPSF', data=self.IPSF)
IPSF_hdr = self._insert_cardlist(hdu=IPSF_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_IFWHM],
after=False)
ZPSF_hdr = fits.ImageHDU(name='ZPSF', data=self.GPSF)
ZPSF_hdr = self._insert_cardlist(hdu=ZPSF_hdr, insertpoint='EXTNAME',
cardlist=card_WCS_list +
card_loc_list + [card_ZFWHM],
after=False)
# CORR
CORR_hdr = []
for i in range(4):
corr = self._table_correlation(correlation=self.cube_corr[i].toarray(), thresh=1E-14)
corr.header.append(fits.Card('BBWAVE', self.bbwave[i],
'Wavelength (Angstroms)'))
corr.header.append(fits.Card('BBINDEX', self.bbindex[i],
'Slice number'))
corr.header.append(fits.Card('COVTYPE', 'Correlation'))
corr.header.append(fits.Card('COVSHAPE', '(%s,%s)' %
(self.nimage, self.nimage)))
CORR_hdr.append(corr)
CORR_hdr[0].header.append(fits.Card('EXTNAME', 'GCORREL'))
CORR_hdr[1].header.append(fits.Card('EXTNAME', 'RCORREL'))
CORR_hdr[2].header.append(fits.Card('EXTNAME', 'ICORREL'))
CORR_hdr[3].header.append(fits.Card('EXTNAME', 'ZCORREL'))
hduRSS = []
for i in range(1, len(self.rss.data)):
if ((self.rss.data[i].header['XTENSION'] == 'IMAGE') and
(len(self.rss.data[i].data) == len(self.rss.data['WAVE'].data))):
hduRSS.append(self.rss.data[i])
try:
# PSF
PSF_hdr = fits.ImageHDU(name='PSF', data=self.cube_psf,
header=cubehdr.header)
hdu = fits.HDUList([hp, cubehdr, PSF_hdr, ivar_hdr, mask_hdr,
disp_hdr, predisp_hdr] + hduRSS +
[self.rss.data['OBSINFO'],
GIMG_hdr, RIMG_hdr, IIMG_hdr, ZIMG_hdr,
GPSF_hdr, RPSF_hdr, IPSF_hdr, ZPSF_hdr] +
CORR_hdr)
except:
hdu = fits.HDUList([hp, cubehdr, ivar_hdr, mask_hdr,
self.rss.data['WAVE'],
self.rss.data['SPECRES'],
self.rss.data['SPECRESD'],
self.rss.data['OBSINFO'],
GIMG_hdr, RIMG_hdr, IIMG_hdr, ZIMG_hdr,
GPSF_hdr, RPSF_hdr, IPSF_hdr, ZPSF_hdr] +
CORR_hdr)
# Doesn't actually gzip??
if filename and len(filename) > 8 and filename[-8:] == 'fits.gz':
data = filename
else:
data = filename + ".fits.gz".format(filename=filename)
hdu.writeto(data, overwrite=True, checksum=True)
hdu.close()
return
def _insert_cardlist(self, hdu=None, insertpoint=None, cardlist=None,
after=False):
"""insert a cardlist into the header of a FITS hdu
Parameters:
----------
hdu: FITS hdu
insertpoint: int
The index into the list of header keywords before which
the new keyword should be inserted, or the name of a
keyword before which the new keyword should be inserted.
Can also accept a (keyword, index) tuple for inserting
around duplicate keywords.
cardlist: list
list of header cards to be inserted. Header
cards will be inserted as the order of the list
after: bool
If set to True, insert after the specified index or
keyword, rather than before it. Defaults to False.
Return:
--------
hdu: fits file that have the cards inserted
"""
for i in range(len(cardlist)):
if after:
hdu.header.insert(insertpoint, cardlist[i], after=after)
insertpoint = cardlist[i].keyword
else:
hdu.header.insert(insertpoint, cardlist[i], after=after)
return hdu
def _set_cardlist(self, hdu=None, keyword_list=None):
""" Extract header card list from a FITS hdu
Parameters:
----------
hdu: FITS hdu
keyword_list: list
keywords to be extracted, including value and comments
Return:
--------
cardlist: list
cards of FITS headers
"""
cardlist = []
for index, keyword in enumerate(keyword_list):
cardlist.append(fits.Card(keyword, hdu.header[keyword],
hdu.header.comments[keyword]))
return cardlist
def _table_correlation(self, correlation=None, thresh=1E-12):
"""create a BinTableHDU for the correlation in sparse matrix form
Parameters:
----------
correlation: ndarray of float32
[nside*nside,nside*nside] correlation matrix
thresh: float32
threshold for the correlation entries to be stored.
Return:
--------
hdu: BinTableHDU that includes the information of the correlation matrix
Note:
--------
Five columns of the table are value, the location (C1,c2) of
the first point in the grid, the location (C1,c2) of the
second point in the grid.
"""
nside = int(np.sqrt(correlation.shape[0]))
index_G = np.where(np.abs(correlation) > thresh)
corr_G = correlation[index_G]
triangle = np.where(index_G[1] >= index_G[0])[0]
index_G = np.array([index_G[0][triangle], index_G[1][triangle]])
corr_G = corr_G[triangle]
i_c1, i_c2, j_c1, j_c2 = [[] for i in range(4)]
for i in range(len(corr_G)):
i_c2.append(index_G[0, i] // nside)
i_c1.append(index_G[0, i] % nside)
j_c2.append(index_G[1, i] // nside)
j_c1.append(index_G[1, i] % nside)
i1 = fits.Column(name='INDXI_C1', array=np.array(i_c1), format='J')
i2 = fits.Column(name='INDXI_C2', array=np.array(i_c2), format='J')
j1 = fits.Column(name='INDXJ_C1', array=np.array(j_c1), format='J')
j2 = fits.Column(name='INDXJ_C2', array=np.array(j_c2), format='J')
value = fits.Column(name='RHOIJ', array=np.array(corr_G), format='D')
hdu = fits.BinTableHDU.from_columns([i1, i2, j1, j2, value])
return hdu
class ReconstructShepard(Reconstruct):
"""Reconstruction of cubes from Shepards method
Attributes:
----------
plate : int, np.int32
plate number
ifu : int, np.int32
IFU number
nfiber : int
number of fibers
release : str
data release (default 'DR15')
rss : RSS object
Marvin RSS output
waveindex : int, np.int32
indices of wavelengths to reconstruct (default None)
Notes:
------
Unless waveindex is set, uses all wavelengths.
"""
def create_weights(self, xsample=None, ysample=None,
ivar=None, waveindex=None, shepard_sigma=0.7):
"""Calculate weights for Shepards method
Parameters:
----------
xsample : ndarray of np.float32
X position of samples
ysample : ndarray of np.float32
Y position of samples
ivar : ndarray of np.float32
inverse variance of samples
shepard_sigma : float, np.float32
sigma for Gaussian in kernel, in arcsec (default 0.7)
Returns:
-------
w0 : ndarray of np.float32
unnormalized weights without bad fibers, [nExp * nfiber,nside * nside]
wwT : ndarray of np.float32
normalized weights [nside * nside, nExp * nfiber]
Notes:
-----
This version uses Shepards method.
"""
nsample = len(xsample)
dx = (np.outer(xsample, np.ones(self.nimage, dtype=np.float32)) -
np.outer(
|
np.ones(nsample, dtype=np.float32)
|
numpy.ones
|
import os
import numpy as np
import autograd.numpy as anp
import pandas as pd
import plac
from autograd import grad
from autograd import elementwise_grad as egrad
from drforest.ensemble import DimensionReductionForestRegressor
from drforest.dimension_reduction import (
SlicedAverageVarianceEstimation, SlicedInverseRegression)
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics.pairwise import euclidean_distances
n_samples = 2000
n_features = 10
n_points = 100
signal_to_noise = 3
n_iter = 5
OUT_DIR = 'lsvi_results'
if not os.path.exists(OUT_DIR):
os.mkdir(OUT_DIR)
def run_lsvi_sim(dataset_name):
if dataset_name == 'sim1':
def func(X):
return anp.abs(X[:, 0]) + anp.abs(X[:, 1])
elif dataset_name == 'sim2':
def func(X):
return X[:, 0] + X[:, 1] ** 2
elif dataset_name == 'sim3':
def func(X):
scale = 0.25
return (5 * anp.maximum(
anp.exp(-scale * X[:, 0] ** 2),
anp.exp(-scale * X[:, 1] ** 2)))
elif dataset_name == 'sim4':
def func(X):
r1 = X[:, 0] - X[:, 1]
r2 = X[:, 0] + X[:, 1]
return (20 * anp.maximum(
anp.maximum(anp.exp(-2 * r1 ** 2), anp.exp(-r2 ** 2)),
2 * anp.exp(-0.5 * (X[:, 0] ** 2 + X[:, 1] ** 2))))
else:
raise ValueError('Unrecognized dataset')
grad_func = egrad(func)
def true_directions(X0):
true_dir = grad_func(X0)
return true_dir / np.linalg.norm(true_dir, axis=1, keepdims=True)
drf_metrics = np.zeros((n_iter, 2))
drf_k_metrics = np.zeros((n_iter, 2))
drf_max_metrics = np.zeros((n_iter, 2))
drf_k_max_metrics = np.zeros((n_iter, 2))
save_metrics = np.zeros((n_iter, 2))
sir_metrics = np.zeros((n_iter, 2))
local_sir_metrics = np.zeros((n_iter, 2))
for idx in range(n_iter):
rng = np.random.RandomState(123 * idx)
if dataset_name == 'linear':
cov = np.zeros((n_features, n_features))
cov[:4, :4] = 0.9
cov[np.diag_indices_from(cov)] = 1.
X = rng.multivariate_normal(
mean=np.zeros(n_features),
cov=cov,
size=n_samples)
else:
X = rng.uniform(
-3, 3, n_samples * n_features).reshape(n_samples, n_features)
dist = euclidean_distances(X, X)
y = func(X)
if dataset_name == 'linear':
sigma = 0.5
else:
sigma = np.var(y) / signal_to_noise
y += np.sqrt(sigma) * rng.randn(n_samples)
forests = []
for min_samples_leaf in [3, 10, 25, 50, 100]:
forests.append(DimensionReductionForestRegressor(
store_X_y=True, n_jobs=-1,
min_samples_leaf=min_samples_leaf,
random_state=42).fit(X, y))
forest_max = []
if n_features > 5:
for min_samples_leaf in [3, 10, 25, 50, 100]:
forest_max.append(DimensionReductionForestRegressor(
store_X_y=True, n_jobs=-1,
min_samples_leaf=min_samples_leaf, max_features=5,
random_state=42).fit(X, y))
neighbors = NearestNeighbors(metric='euclidean').fit(X)
save = SlicedAverageVarianceEstimation().fit(X, y)
save_dir = save.directions_[0].reshape(-1, 1)
sir = SlicedInverseRegression().fit(X, y)
sir_dir = sir.directions_[0].reshape(-1, 1)
# sample n_point indices and check directions
indices = rng.choice(np.arange(n_samples), replace=False, size=n_points)
pred_dirs = []
pred_dirs_k = []
for forest in forests:
pred_dirs.append(forest.local_principal_direction(
X[indices], n_jobs=-1))
pred_dirs_k.append(forest.local_principal_direction(
X[indices], k=2, n_jobs=-1))
pred_dirs_max = []
pred_dirs_max_k = []
if n_features > 5:
for forest in forest_max:
pred_dirs_max.append(
forest.local_principal_direction(
X[indices], n_jobs=-1))
pred_dirs_max_k.append(
forest.local_principal_direction(
X[indices], k=2, n_jobs=-1))
else:
pred_dirs_max = pred_dirs
pred_dirs_max_k = pred_dirs_k
true_dir = true_directions(X[indices])
frob_norm = np.zeros(n_points)
frob_norm[:] = np.inf
trcor = np.zeros(n_points)
trcor[:] = -np.inf
frob_norm_k = np.zeros(n_points)
frob_norm_k[:] = np.inf
trcor_k = np.zeros(n_points)
trcor_k[:] = -np.inf
frob_norm_max = np.zeros(n_points)
frob_norm_max[:] = np.inf
trcor_max = np.zeros(n_points)
trcor_max[:] = -np.inf
frob_norm_max_k =
|
np.zeros(n_points)
|
numpy.zeros
|
import numpy as np
import matplotlib.pyplot as plt
import treecorr as tc
import astropy.table
import healpy as hp
from corrLSS.util import apply_mask,radec2thetaphi
# Cosmology
def set_cosmology():
Omega_matter = 0.140247/0.6800232**2
Omega_baryon = 0.022337/0.6800232**2
Omega_curvature = 0
H0 = 68.002320
sigma_8 = 0.811322
n_s = 0.963180
from astropy.cosmology import FlatLambdaCDM
cosmo=FlatLambdaCDM(H0=H0,Om0=Omega_matter)
return cosmo
def arrange_catalog(catfile,rndfile=None,zmin=None,zmax=None,objtype=None,truthfile=None):
"""
Use treecorr to evaluate two point correlation given a data catalog and a random catalog
"""
print("Reading data catalog")
#datatab=astropy.table.Table.read(catfile)
cat=astropy.io.fits.open(catfile)
datacat=cat[1].data
try:
z_data=datacat['Z_COSMO']
print("Using Z_COSMO for z")
except:
try:
z_data=datacat['TRUEZ']
print("Using TRUEZ for z")
except:
try:
z_data=datacat['Z']
print("Using Z for z")
except:
raise ValueError("None of the specified z-types match. Check fits header")
if truthfile is not None: #- required to match targetid for ra,dec
tru=astropy.io.fits.open(truthfile)
trucat=tru[1].data
truid=trucat['TARGETID']
dataid=datacat['TARGETID']
#- map targetid sorted as in dataid
tt=np.argsort(truid)
ss=np.searchsorted(truid[tt],dataid)
srt_idx=tt[ss]
np.testing.assert_array_equal(truid[srt_idx],dataid)
print("100% targets matched for data catalog")
ra_data=trucat['RA'][srt_idx]
dec_data=trucat['DEC'][srt_idx]
else:
ra_data=datacat['ra']
dec_data=datacat['dec']
if objtype is not None:
try:
kk=np.where(datacat['SOURCETYPE']==objtype)[0]
print("Using sourcetype {}".format(objtype))
except:
try:
kk=np.where(datacat['SPECTYPE']==objtype)[0]
print("Using spectype {}".format(objtype))
except:
print("Objtype doesn't match header key. Check fits header")
print("Total {} in the data: {}".format(objtype,len(kk)))
print("Total {} in the data: {}".format(objtype,len(kk)))
ra_data=ra_data[kk]
dec_data=dec_data[kk]
z_data=z_data[kk]
cosmo=set_cosmology()
if zmin is None: zmin=np.min(z_data)
if zmax is None: zmax=np.max(z_data)
print("zmin:{} to zmax: {}".format(zmin,zmax))
#TODO make this loop for differnt redshift bins to avoid reading catalogs each time
wh=np.logical_and(z_data>zmin,z_data<zmax)
ngal=np.count_nonzero(wh)
print("Bin contains: {} galaxies".format(np.count_nonzero(wh)))
print(cosmo.H0)
cmvr_data=cosmo.comoving_distance(z_data[wh])*cosmo.H0.value/100.
dmin,dmax=cosmo.comoving_distance([zmin,zmax])*cosmo.H0.value/100.
print("Dmin to Dmax: {} to {}".format(dmin,dmax))
print("Organizing data catalog to use")
datacat=make_catalog(ra_data[wh],dec_data[wh],cmvr_data)
if rndfile is not None:
print("Reading random catalog")
#rndtab=astropy.table.Table.read(rndfile)
rnd=astropy.io.fits.open(rndfile)
rndtab=rnd[1].data
z_rnd=rndtab['z']
ra_rnd=rndtab['ra']
dec_rnd=rndtab['dec']
whr=np.logical_and(z_rnd>zmin,z_rnd<zmax)
nran=np.count_nonzero(whr)
print("Bin Contains: {} random objects".format( np.count_nonzero(whr)))
cmvr_rnd=cosmo.comoving_distance(z_rnd[whr])*cosmo.H0.value/100.
print("Organizing random catalog to use")
rndcat=make_catalog(ra_rnd[whr],dec_rnd[whr],cmvr_rnd)
return datacat, rndcat
else:
return datacat
def correlate_tc(datacat,rndcat,outfile,cutoff=None):
"""
datacat and randcat are tc.catalog object
"""
print("Auto correlating data")
dd=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
dd.process(datacat)
print("Auto correlating random")
rr=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
rr.process(rndcat)
print("Cross Correlating")
dr=tc.NNCorrelation(min_sep=0.1,bin_size=0.025,max_sep=180.)
dr.process(datacat,rndcat)
print("Calculating 2-pt. correlation")
xi,xivar=dd.calculateXi(rr,dr)
tab=astropy.table.Table([np.exp(dd.logr),xi,xivar],names=('r','xi','xivar'))
tab.write(outfile,overwrite=True)
def random_data_xyz(datacat,bandwidth=0.2,format='xyz'):
"""
data cat is treecorr catalog object and should have x, y, and z
random is created here in xyz
"""
from scipy.stats import gaussian_kde
if format=='xyz':
values=np.vstack([datacat.x,datacat.y,datacat.z])
kde=gaussian_kde(values,bw_method=bandwidth/values.std(ddof=1))
nx,ny,nz=kde.resample(2*len(datacat.z))
randcat=tc.Catalog(x=nx,y=ny,z=nz)
elif format=='radecr':
values=np.vstack([datacat.ra/datacat.ra_units,datacat.dec/datacat.dec_units,datacat.r])
kde=gaussian_kde(values,bw_method=bandwidth/values.std(ddof=1))
nra,ndec,nr=kde.resample(2*len(datacat.r))
randcat=tc.Catalog(ra=nra,dec=ndec,ra_units='deg',dec_units='deg',r=nr)
return randcat
def make_catalog(ra,dec,cmvr=None): #- ra, dec in degrees
cat=tc.Catalog(ra=ra,dec=dec,r=cmvr,ra_units='deg',dec_units='deg')
return cat
def two_point(data,data_R,bins,method='landy-szalay',seed=1234,saverandom=False):
"""
Uses nearest neighbors KDtree to evaluate two point correlation
args:
data: n samples x m features data array, eg. x,y,z positions
bins: 1d bins array
return:
two - pt correlation correlation give the method.
Errors are not returned. A bootstrap sampling can be run N times to
evaluate errors.
"""
from sklearn.neighbors import KDTree
data = np.asarray(data)
bins = np.asarray(bins)
rng = np.random.RandomState(seed)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
n_samples, n_features = data.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
if data_R is None:
data_R = data.copy()
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
else:
data_R = np.asarray(data_R)
if (data_R.ndim != 2) or (data_R.shape[-1] != n_features):
raise ValueError('data_R must have same n_features as data')
factor = len(data_R) * 1. / len(data)
KDT_D=KDTree(data)
KDT_R=KDTree(data_R)
print("Correlating Data, data size: {}".format(len(data)))
counts_DD=KDT_D.two_point_correlation(data,bins)
print('Correlating Random, random size: {}'.format(len(data_R)))
counts_RR=KDT_R.two_point_correlation(data_R,bins)
DD=np.diff(counts_DD)
RR=np.diff(counts_RR)
#- Check for zero in RR
RR_zero = (RR == 0)
RR[RR_zero]=1
if method == 'standard':
corr = factor**2*DD/RR - 1
elif method == 'landy-szalay':
print("Cross Correlating")
counts_DR=KDT_R.two_point_correlation(data,bins)
DR=np.diff(counts_DR)
print("Evaluating correlation using {}".format(method))
corr = (factor**2 * DD - 2 * factor * DR + RR)/RR
corr[RR_zero] = np.nan
return corr
def extract_catalog(catalog,zmin=None,zmax=None):
print("Reading catalog.")
tab = astropy.table.Table.read(catalog)
ra = tab['RA']
dec = tab['DEC']
z = tab['Z']
print("Objects in catalog: {}".format(len(z)))
if zmin is None: zmin=np.min(z)
if zmax is None: zmax=np.max(z)
sel=
|
np.where((z >= zmin) & (z < zmax))
|
numpy.where
|
"""
Library of functions for fitting populations curves.
All fits assume the final 'state' is unpopulated in the beginning,
and the sum of populations of other 'states' is unity. The functions
return an array of size nstates x ntimes (always 2D, even if nstates == 1).
k1 k1
Sequential: A -> B -> ...
k1 k2
Equilibria: A <=> B <=> ...
k-1 k-2
Deriving these equations involves solving systems of ODEs. For a concise
review, see https://pubs.acs.org/doi/abs/10.1021/ed076p1578
"""
import numpy as np
varlbls = dict(
single_exp = ['tau1'],
single_expc = ['tau1', 'c'],
single_biexp = ['amp1', 'tau1', 'amp2', 'tau2'],
single_triexp = ['amp1', 'tau1', 'amp2', 'tau2', 'amp3', 'tau3'],
double_seq = ['tau1'],
double_eq = ['tau1', 'tau-1'],
triple_seq = ['A0', 'tau1', 'tau2'],
triple_1steq = ['A0', 'tau1', 'tau-1', 'tau2'],
triple_2ndeq = ['A0', 'tau1', 'tau2', 'tau-2'],
triple_alleq = ['A0', 'tau1', 'tau-1', 'tau2', 'tau-2']
)
def get_labels(name):
"""Returns a list of variable labels based on function name."""
if 'delay_' in name:
basename = name.replace('delay_', '')
return ['t0'] + varlbls[basename]
else:
return varlbls[name]
def single_exp(x, tau1):
"""Returns an exponential function."""
return np.atleast_2d(1 - np.exp(-x / tau1))
def single_expc(x, tau1, c):
"""Returns an exponential function plus a constant."""
return np.atleast_2d(1 - np.exp(-x / tau1) - c)
def single_biexp(x, a1, tau1, a2, tau2):
"""Returns a biexponential function."""
return np.atleast_2d(1 - a1*np.exp(-x / tau1) - a2*np.exp(-x / tau2))
def single_triexp(x, a1, tau1, a2, tau2, a3, tau3):
"""Returns a triexponential function."""
return np.atleast_2d(1 - a1*np.exp(-x / tau1) - a2*np.exp(-x / tau2) -
a3*np.exp(-x / tau3))
def double_seq(x, tau1):
"""Returns reactant-product sequential functions."""
ab = _empty_a(2, x)
ab[1] = np.exp(-x / tau1)
ab[0] = 1 - ab[1]
return ab
def double_eq(x, tau1, taum1):
"""Returns reactant-product equilibrium functions."""
ab = _empty_a(2, x)
ttau = tau1 + taum1
ab[1] = (1/ttau)*(tau1 + taum1*np.exp(-ttau*x / (tau1*taum1)))
ab[0] = 1 - ab[1]
return ab
def triple_seq(x, a0, tau1, tau2):
"""Returns reactant-intermediate-product sequential functions."""
abc = _empty_a(3, x)
abc[2] = a0*np.exp(-x/tau1)
abc[1] = a0*tau2/(tau1 - tau2) * (np.exp(-x/tau1) -
np.exp(-x/tau2)) + (1-a0)*np.exp(-x/tau2)
abc[0] = 1 - abc[2] - abc[1]
return abc
def triple_1steq(x, a0, tau1, taum1, tau2):
"""Returns reactant-intermediate-product with R-I equilibrium."""
abc = _empty_a(3, x)
k1, km1, k2 = 1/tau1, 1/taum1, 1/tau2
s = k1 + km1 + k2
c = k1*k2
g1 = 0.5*(s + np.sqrt(s**2 - 4*c))
g2 = 0.5*(s - np.sqrt(s**2 - 4*c))
abc[2] = ((a0*(g1 - km1 - k2) - (1-a0)*km1)/(g1 - g2) * np.exp(-g1*x) +
(a0*(g2 - km1 - k2) - (1-a0)*km1)/(g2 - g1) * np.exp(-g2*x))
abc[1] = ((-a0*k1 + (1-a0)*(g1 - k1))/(g1 - g2) * np.exp(-g1*x) +
(-a0*k1 + (1-a0)*(g2 - k1))/(g2 - g1) * np.exp(-g2*x))
abc[0] = 1 - abc[2] - abc[1]
return abc
def triple_2ndeq(x, a0, tau1, tau2, taum2):
"""Returns reactant-intermediate-product with I-P equilibrium."""
abc = _empty_a(3, x)
k1, k2, km2 = 1/tau1, 1/tau2, 1/taum2
s = k1 + k2 + km2
c = k1*(k2 + km2)
g1 = 0.5*(s + np.sqrt(s**2 - 4*c))
g2 = 0.5*(s - np.sqrt(s**2 - 4*c))
abc[2] = a0*np.exp(-x/tau1)
abc[1] = ((a0*k1*(km2 - g1) +
(1-a0)*(g1 - k1)*(g1 - km2))/(g1*(g1 - g2)) * np.exp(-g1*x) +
(a0*k1*(km2 - g2) +
(1-a0)*(g2 - k1)*(g2 - km2))/(g2*(g2 - g1)) *
|
np.exp(-g2*x)
|
numpy.exp
|
import random
import keras.backend as K
import math
shape_r_out = 480
# number of cols of model outputs
shape_c_out = 640
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def generate_dummy(size=14, num_fixations=100, num_salience_points=200):
# first generate dummy gt and salience map
discrete_gt = np.zeros((size, size))
s_map = np.zeros((size, size))
for i in range(0, num_fixations):
discrete_gt[np.random.randint(size), np.random.randint(size)] = 1.0
for i in range(0, num_salience_points):
s_map[np.random.randint(size), np.random.randint(size)] = 255 * round(random.random(), 1)
# check if gt and s_map are same size
assert discrete_gt.shape == s_map.shape, 'sizes of ground truth and salience map don\'t match'
return s_map, discrete_gt
def normalize_map(s_map):
# normalize the salience map (as done in MIT code)
norm_s_map = (s_map - np.min(s_map)) / ((np.max(s_map) - np.min(s_map)) * 1.0)
return norm_s_map
def discretize_gt(gt):
import warnings
warnings.warn('can improve the way GT is discretized')
return gt / 255
def auc_judd(s_map, gt):
# ground truth is discrete, s_map is continous and normalized
# gt = discretize_gt(gt)
# thresholds are calculated from the salience map, only at places where fixations are present
thresholds = []
for i in range(0, gt.shape[0]):
for k in range(0, gt.shape[1]):
if gt[i][k] > 0:
thresholds.append(s_map[i][k])
num_fixations = np.sum(gt)
# num fixations is no. of salience map values at gt >0
thresholds = sorted(set(thresholds))
# fp_list = []
# tp_list = []
area = []
area.append((0.0, 0.0))
for thresh in thresholds:
# in the salience map, keep only those pixels with values above threshold
temp = np.zeros(s_map.shape)
temp[s_map >= thresh] = 1.0
assert np.max(gt) <= 1.0, 'something is wrong with ground truth..not discretized properly max value > 1.0'
assert np.max(s_map) <= 1.0, 'something is wrong with salience map..not normalized properly max value > 1.0'
num_overlap = np.where(np.add(temp, gt) == 2)[0].shape[0]
tp = num_overlap / (num_fixations * 1.0)
# total number of pixels > threshold - number of pixels that overlap with gt / total number of non fixated pixels
# this becomes nan when gt is full of fixations..this won't happen
fp = (np.sum(temp) - num_overlap) / ((np.shape(gt)[0] * np.shape(gt)[1]) - num_fixations)
area.append((round(tp, 4), round(fp, 4)))
# tp_list.append(tp)
# fp_list.append(fp)
# tp_list.reverse()
# fp_list.reverse()
area.append((1.0, 1.0))
# tp_list.append(1.0)
# fp_list.append(1.0)
# print tp_list
area.sort(key=lambda x: x[0])
tp_list = [x[0] for x in area]
fp_list = [x[1] for x in area]
return np.trapz(np.array(tp_list), np.array(fp_list))
def auc_borji(s_map, gt, splits=100, stepsize=0.1):
# gt = discretize_gt(gt)
num_fixations = np.sum(gt)
num_pixels = s_map.shape[0] * s_map.shape[1]
random_numbers = []
for i in range(0, splits):
temp_list = []
for k in range(0, int(num_fixations)):
temp_list.append(np.random.randint(num_pixels))
random_numbers.append(temp_list)
aucs = []
# for each split, calculate auc
for i in random_numbers:
r_sal_map = []
for k in i:
r_sal_map.append(s_map[int(k % s_map.shape[0] - 1), int(k / s_map.shape[0])])
# in these values, we need to find thresholds and calculate auc
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
r_sal_map = np.array(r_sal_map)
# once threshs are got
thresholds = sorted(set(thresholds))
area = []
area.append((0.0, 0.0))
for thresh in thresholds:
# in the salience map, keep only those pixels with values above threshold
temp = np.zeros(s_map.shape)
temp[s_map >= thresh] = 1.0
num_overlap = np.where(np.add(temp, gt) == 2)[0].shape[0]
tp = num_overlap / (num_fixations * 1.0)
# fp = (np.sum(temp) - num_overlap)/((np.shape(gt)[0] * np.shape(gt)[1]) - num_fixations)
# number of values in r_sal_map, above the threshold, divided by num of random locations = num of fixations
fp = len(np.where(r_sal_map > thresh)[0]) / (num_fixations * 1.0)
area.append((round(tp, 4), round(fp, 4)))
area.append((1.0, 1.0))
area.sort(key=lambda x: x[0])
tp_list = [x[0] for x in area]
fp_list = [x[1] for x in area]
aucs.append(np.trapz(np.array(tp_list), np.array(fp_list)))
return np.mean(aucs)
def auc_shuff(s_map, gt, shufMap, stepSize=.01):
"""
Computer SAUC score. A simple implementation
:param gt : list of fixation annotataions
:param s_map : list only contains one element: the result annotation - predicted saliency map
:return score: int : score
"""
# resAnn = resAnn / 254
salMap = s_map - np.min(s_map)
if np.max(salMap) > 0:
salMap = salMap / (np.max(salMap) - np.min(salMap))
xd, yd = np.where(gt > 0)
Sth = np.asarray([salMap[x][y] for x, y in zip(xd, yd)])
Nfixations = len(gt)
others = np.copy(shufMap)
for x, y in zip(xd, yd):
others[x][y] = 0
ind = np.nonzero(others) # find fixation locations on other images
nFix = shufMap[ind]
randfix = salMap[ind]
Nothers = sum(nFix)
allthreshes = np.arange(0, np.max(np.concatenate((Sth, randfix), axis=0)), stepSize)
allthreshes = allthreshes[::-1]
tp = np.zeros(len(allthreshes) + 2)
fp = np.zeros(len(allthreshes) + 2)
tp[-1] = 1.0
fp[-1] = 1.0
tp[1:-1] = [float(np.sum(Sth >= thresh)) / Nfixations for thresh in allthreshes]
fp[1:-1] = [float(np.sum(nFix[randfix >= thresh])) / Nothers for thresh in allthreshes]
auc = np.trapz(tp, fp)
return auc
def nss_metric(gt, s_map):
# gt = discretize_gt(gt)
s_map_norm = (s_map - np.mean(s_map)) /
|
np.std(s_map)
|
numpy.std
|
import numpy as np
import scipy.stats as stats
import scipy.linalg
import pytest
import open_cp.kernels as testmod
import open_cp.data
import unittest.mock as mock
import shapely.geometry
def slow_gaussian_kernel_new(pts, mean, var):
"""Test case where `pts`, `mean`, `var` are all of shape 2."""
assert(len(pts.shape) == 2 and len(mean.shape) == 2 and len(var.shape) == 2)
space_dim = pts.shape[0]
num_pts = pts.shape[1]
num_samples = mean.shape[1]
assert(space_dim == mean.shape[0])
assert((space_dim, num_samples) == var.shape)
out = np.empty(num_pts)
for i in range(num_pts):
total = np.empty(num_samples)
for j in range(num_samples):
prod = np.empty(space_dim)
for k in range(space_dim):
v = var[k][j] * 2
prod[k] = np.exp(- (pts[k][i] - mean[k][j]) **
2 / v) / np.sqrt(np.pi * v)
total[j] = np.product(prod)
out[i] = np.mean(total)
return out
def test_slow_gaussian_kernel_single_new():
pts = np.empty((1, 1))
pts[0][0] = 1
mean = np.empty((1, 1))
mean[0][0] = 0.5
var = np.empty((1, 1))
var[0][0] = 3
expected = np.exp(-0.25 / 6) / np.sqrt(6 * np.pi)
got = slow_gaussian_kernel_new(pts, mean, var)
np.testing.assert_allclose(expected, got)
def test_compare_GaussianKernel():
for k in range(1, 6):
for M in range(1, 6):
mean = np.random.random(size=(k,M))
var = 0.0001 + np.random.random(size=(k,M))**2
kernel = testmod.GaussianKernel(mean, var)
for N in range(1, 6):
pts = np.random.random(size=(k,N))
want = slow_gaussian_kernel_new(pts, mean, var)
got = kernel(pts)
print(k,M,N)
np.testing.assert_allclose(got, want)
# Single point case
pts = np.random.random(size=k)
want = slow_gaussian_kernel_new(pts[:,None], mean, var)[0]
got = kernel(pts)
print("Single point case k={}, M={}".format(k,M))
assert want == pytest.approx(got)
def test_compare_GaussianKernel_k1_case():
for M in range(1, 6):
mean = np.random.random(size=M)
var = 0.0001 + np.random.random(size=M)**2
kernel = testmod.GaussianKernel(mean, var)
for N in range(1, 6):
pts = np.random.random(size=N)
want = slow_gaussian_kernel_new(pts[None,:], mean[None,:], var[None,:])
got = kernel(pts)
print(M,N)
np.testing.assert_allclose(got, want)
# Single point case
print("Single point case, M={}".format(M))
pts = np.random.random()
want = slow_gaussian_kernel_new(np.asarray(pts)[None,None], mean[None,:], var[None,:])[0]
got = kernel(pts)
assert want == pytest.approx(got)
def test_1D_kth_distance():
coords = [0,1,2,3,6,7,9,15]
distances = testmod.compute_kth_distance(coords, k=3)
np.testing.assert_allclose(distances, [3,2,2,3,3,4,6,9])
def test_2D_kth_distance():
coords = [[0,0,1,1],[0,1,0,2]]
distances = testmod.compute_kth_distance(coords, k=2)
np.testing.assert_allclose(distances, [1,np.sqrt(2),np.sqrt(2),2])
def slow_kth_nearest(points, index):
"""(k, N) input. Returns ordered list [0,...] of distance to kth nearest point from index"""
if len(points.shape) == 1:
points = points[None, :]
pt = points[:, index]
distances = np.sqrt(np.sum((points - pt[:,None])**2, axis=0))
distances.sort()
return distances
def test_slow_kth_nearest():
pts = np.array([1,2,4,5,7,8,9])
got = slow_kth_nearest(pts, 0)
np.testing.assert_array_equal(got, [0,1,3,4,6,7,8])
got = slow_kth_nearest(pts, 3)
np.testing.assert_array_equal(got, [0,1,2,3,3,4,4])
got = slow_kth_nearest(pts, 4)
np.testing.assert_array_equal(got, [0,1,2,2,3,5,6])
pts = np.array([[0,0],[1,1],[0,1],[1,0],[2,3]]).T
got = slow_kth_nearest(pts, 0)
np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(13)])
got = slow_kth_nearest(pts, 1)
np.testing.assert_allclose(got, [0,1,1,np.sqrt(2),np.sqrt(5)])
def test_1d_kth_nearest():
# In the 1D scale we don't need to rescale
pts = np.random.random(size=20) * 20 - 10
for k in [1,2,3,4,5]:
distances = [slow_kth_nearest(pts, i)[k] for i in range(len(pts))]
def expected_kernel(x):
value = 0
for i, p in enumerate(pts):
value += stats.norm(loc=p, scale=distances[i]).pdf(x)
return value / len(pts)
kernel = testmod.kth_nearest_neighbour_gaussian_kde(pts, k=k)
test_points = np.random.random(size=10) * 15
np.testing.assert_allclose( kernel(test_points), expected_kernel(test_points) )
def test_2d_kth_nearest():
for space_dim in range(2, 5):
pts = np.random.random(size=(space_dim, 20))
stds = np.std(pts, axis=1)
rescaled = np.empty_like(pts)
for i in range(space_dim):
rescaled[i] = pts[i] / stds[i]
for k in [1,2,3,4,5,6]:
distances = [slow_kth_nearest(rescaled, i)[k] for i in range(pts.shape[1])]
def expected_kernel(x):
value = 0
for i in range(pts.shape[1]):
prod = 1
for coord in range(space_dim):
p = pts[coord,i]
prod *= stats.norm(loc=p, scale=distances[i]*stds[coord]).pdf(x[coord])
value += prod
return value / pts.shape[1]
kernel = testmod.kth_nearest_neighbour_gaussian_kde(pts, k=k)
test_points = np.random.random(size=(space_dim, 10))
np.testing.assert_allclose( kernel(test_points), expected_kernel(test_points) )
def test_ReflectedKernel():
kernel = lambda pt : np.abs(pt)
testkernel = testmod.ReflectedKernel(kernel)
assert( testkernel(5) == 10 )
np.testing.assert_allclose(testkernel([1,2,3]), [2,4,6])
# 2 (or 3 etc.) dim kernel only
testkernel = testmod.ReflectedKernel(lambda pt : np.abs(pt[0]))
np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [2,4,6])
testkernel = testmod.ReflectedKernel(lambda pt : pt[0] * (pt[0]>=0))
np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [1,2,3])
testkernel = testmod.ReflectedKernel(lambda pt : pt[0] * (pt[0]>=0), reflected_axis=1)
np.testing.assert_allclose(testkernel([[1,2,3],[4,5,6]]), [2,4,6])
def test_ReflectedKernelEstimator():
estimator = mock.MagicMock()
kernel_mock = mock.MagicMock()
estimator.return_value = kernel_mock
test = testmod.ReflectedKernelEstimator(estimator)
kernel = test([1,2,3,4])
estimator.assert_called_with([1,2,3,4])
assert(kernel.reflected_axis == 0)
assert(kernel.delegate is kernel_mock)
test = testmod.ReflectedKernelEstimator(estimator, reflected_axis=2)
kernel = test([1,2,3,4])
assert(kernel.reflected_axis == 2)
def test_GaussianBase_not_point():
with pytest.raises(ValueError):
testmod.GaussianBase(5.2)
def test_GaussianBase_set_covariance():
gb = testmod.GaussianBase([1,2,3,4])
with pytest.raises(ValueError):
gb.covariance_matrix = [[1,2,3], [2,3,4]]
with pytest.raises(ValueError):
gb.covariance_matrix = [[1,2], [3,4]]
gb.covariance_matrix = 1
gb = testmod.GaussianBase([[1,2,3,4], [4,2,2,1]])
with pytest.raises(ValueError):
gb.covariance_matrix = [[1,2,3], [2,3,4]]
gb.covariance_matrix = [[2,2], [3,4]]
with pytest.raises(ValueError):
gb.covariance_matrix = [[1,2], [3,4]]
with pytest.raises(ValueError):
gb.covariance_matrix = 1
def test_GaussianBase_set_band():
gb = testmod.GaussianBase([1,2,3,4])
assert gb.bandwidth == pytest.approx(4 ** (-1/5))
gb.bandwidth = "scott"
assert gb.bandwidth == pytest.approx(4 ** (-1/5))
with pytest.raises(ValueError):
gb.bandwidth = "matt"
gb.bandwidth = "silverman"
assert gb.bandwidth == pytest.approx(3 ** (-1/5))
gb = testmod.GaussianBase([[1,2,3,4],[4,2,1,3]])
assert gb.bandwidth == pytest.approx(4 ** (-1/6))
gb.bandwidth = "scott"
assert gb.bandwidth == pytest.approx(4 ** (-1/6))
with pytest.raises(ValueError):
gb.bandwidth = "matt"
gb.bandwidth = "silverman"
assert gb.bandwidth == pytest.approx(4 ** (-1/6))
def test_GaussianBase_set_weights():
gb = testmod.GaussianBase([1,2,3,4])
assert gb.weights is None
gb.weights = [.2, 0, 5, 2]
with pytest.raises(ValueError):
gb.weights = [.2, 0, 5]
with pytest.raises(ValueError):
gb.weights = 2
with pytest.raises(ValueError):
gb.weights = [[1,2,3],[4,5,6]]
sqrt2pi = np.sqrt(2 * np.pi)
def test_GaussianBase_eval():
gb = testmod.GaussianBase([1,2,3,4])
assert gb.covariance_matrix[0,0] == pytest.approx(20/12)
gb.covariance_matrix = 1.0
gb.bandwidth = 1.0
x5 = np.sum(np.exp([-16/2, -9/2, -4/2, -1/2])) / 4 / sqrt2pi
assert gb(5) == pytest.approx(x5)
x2 = np.sum(np.exp([-1/2, 0, -1/2, -4/2])) / 4 / sqrt2pi
assert gb(2) == pytest.approx(x2)
x0 = np.sum(np.exp([-1/2, -4/2, -9/2, -16/2])) / 4 / sqrt2pi
assert gb(0) == pytest.approx(x0)
np.testing.assert_allclose(gb([0]), [x0])
np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])
def test_GaussianBase_eval_with_bandwidth():
gb = testmod.GaussianBase([1,2,3,4])
assert gb.covariance_matrix[0,0] == pytest.approx(20/12)
gb.covariance_matrix = 1.0
gb.bandwidth = 2.0
x5 = np.sum(np.exp([-16/8, -9/8, -4/8, -1/8])) / 8 / sqrt2pi
assert gb(5) == pytest.approx(x5)
x2 = np.sum(np.exp([-1/8, 0, -1/8, -4/8])) / 8 / sqrt2pi
assert gb(2) == pytest.approx(x2)
x0 = np.sum(np.exp([-1/8, -4/8, -9/8, -16/8])) / 8 / sqrt2pi
assert gb(0) == pytest.approx(x0)
np.testing.assert_allclose(gb([0]), [x0])
np.testing.assert_allclose(gb([0,2,5,2,5,0]), [x0,x2,x5,x2,x5,x0])
def test_GaussianBase_large_eval():
n = 1000000
pts = np.arange(n) / n
gb = testmod.GaussianBase(pts)
gb.covariance_matrix = 1.0
gb.bandwidth = 1.0
x5 = np.sum(np.exp(-(5 - pts)**2 / 2)) / n / sqrt2pi
assert gb(5) == pytest.approx(x5)
x3 = np.sum(np.exp(-(3 - pts)**2 / 2)) / n / sqrt2pi
assert gb(3) == pytest.approx(x3)
np.testing.assert_allclose(gb([5,3]), [x5,x3])
def test_GaussianBase_large_eval_3d():
n = 1000000
pts = np.random.random((3,n)) * 100
gb = testmod.GaussianBase(pts)
gb.covariance_matrix = np.eye(3)
gb.bandwidth = 1.0
pt = np.asarray([1,2,3])
x = np.sum(np.exp(-np.sum((pts - pt[:,None])**2,axis=0) / 2)) / n / (sqrt2pi**3)
assert gb([1,2,3]) == pytest.approx(x)
pt = np.asarray([4,2,1])
y = np.sum(np.exp(-np.sum((pts - pt[:,None])**2,axis=0) / 2)) / n / (sqrt2pi**3)
assert gb([4,2,1]) == pytest.approx(y)
np.testing.assert_allclose(gb([[1,4], [2,2], [3,1]]), [x,y])
def test_GaussianBase_eval_with_cov():
gb = testmod.GaussianBase([1,2,3,4])
assert gb.covariance_matrix[0,0] == pytest.approx(20/12)
gb.covariance_matrix = 0.5
gb.bandwidth = 1.0
x5 = np.sum(np.exp([-16, -9, -4, -1])) / 4 /
|
np.sqrt(0.5)
|
numpy.sqrt
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import itertools
import numpy as np
import pytest
from brainstorm.handlers import NumpyHandler
from brainstorm.optional import has_pycuda
non_default_handlers = []
handler_ids = []
if has_pycuda:
from brainstorm.handlers import PyCudaHandler
non_default_handlers.append(PyCudaHandler())
handler_ids.append("PyCudaHandler")
# np.random.seed(1234)
ref_dtype = np.float32
ref = NumpyHandler(ref_dtype)
some_2d_shapes = ((1, 1), (4, 1), (1, 4), (5, 5), (3, 4), (4, 3))
some_nd_shapes = ((1, 1, 4), (1, 1, 3, 3), (3, 4, 2, 1))
np.set_printoptions(linewidth=150)
def operation_check(handler, op_name, ref_args, ignored_args=(), atol=1e-8):
args = get_args_from_ref_args(handler, ref_args)
getattr(ref, op_name)(*ref_args)
getattr(handler, op_name)(*args)
check_list = []
for i, (ref_arg, arg) in enumerate(zip(ref_args, args)):
if i in ignored_args:
# print(i, "was ignored")
continue
if type(ref_arg) is ref.array_type:
arg_ref = handler.get_numpy_copy(arg)
check = np.allclose(ref_arg, arg_ref, atol=atol)
check_list.append(check)
if not check:
print("-" * 40)
print("\nCheck failed for argument number %d:" % i)
print("Reference (expected) array {}:\n{}".format(
ref_arg.shape, ref_arg))
print("\nObtained array {}:\n{}".format(arg_ref.shape,
arg_ref))
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
else:
check = (ref_arg == arg)
check_list.append(check)
if not check:
print("-" * 40)
print("Check failed for argument number %d:" % i)
print("\nReference (expected) value:\n", ref_arg)
print("\nObtained value:\n", arg)
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
# print("Check was ", check)
if False in check_list:
return False
else:
return True
def get_args_from_ref_args(handler, ref_args):
args = []
for ref_arg in ref_args:
if type(ref_arg) is ref.array_type:
temp = handler.create_from_numpy(ref_arg)
args.append(temp)
else:
args.append(ref_arg)
return args
def get_random_arrays(shapes=some_2d_shapes, dtype=ref_dtype):
arrays = []
for shape in shapes:
arrays.append(np.random.randn(*shape).astype(dtype))
return arrays
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sum_t(handler):
list_a = get_random_arrays()
list_axis = [0, 1, None]
for a, axis in itertools.product(list_a, list_axis):
if axis == 0:
out = np.zeros((1, a.shape[1]), dtype=ref_dtype)
elif axis == 1:
out = np.zeros((a.shape[0]), dtype=ref_dtype)
else:
out = np.array([0.], dtype=ref_dtype).reshape(tuple())
ref_args = (a, axis, out)
assert operation_check(handler, 'sum_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros((a.shape[0], a.shape[0]), dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_add_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.random.randn(a.shape[0], a.shape[0]).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_add_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*a.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*b.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_broadcast_t(handler):
args_to_check = [
([1], 0, [3]),
([1], 0, [1]),
([1, 2], 0, [3, 2]),
([3, 1], 1, [3, 2]),
([1, 2, 5], 0, [3, 2, 5]),
([3, 1, 5], 1, [3, 2, 5]),
([3, 2, 1], 2, [3, 2, 5])
]
a_shapes, axes, out_shapes = list(zip(*args_to_check))
list_a = get_random_arrays(a_shapes)
list_out = get_random_arrays(out_shapes)
for ref_args in zip(list_a, axes, list_out):
assert operation_check(handler, 'broadcast_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_clip_t(handler):
list_a = get_random_arrays(some_nd_shapes)
list_clip_min = [-0.4, 0, 0.2]
list_clip_max = [-0.1, 0, 0.3]
for a, clip_min, clip_max in itertools.product(list_a, list_clip_min,
list_clip_max):
if clip_max >= clip_min:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, clip_min, clip_max, out)
assert operation_check(handler, 'clip_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_log_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
a += 10 # to remove negatives
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'log_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sqrt_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
a += 10 # to remove negatives
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'sqrt_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_abs_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'abs_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sign_t(handler):
list_a = get_random_arrays(some_nd_shapes)
list_a += [np.random.random_integers(-2, 2, (3, 3))]
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'sign_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_divide_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'divide_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_divide_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'divide_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_mv(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
# print("==================================")
# print("Testing mult_mv() with row vectors")
# print("==================================")
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_mv', ref_args)
# print("=====================================")
# print("Testing mult_mv() with column vectors")
# print("=====================================")
list_b = get_random_arrays()
list_b = [b[:, 0].reshape((-1, 1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
# print('-' * 40)
# print("a:\n", a)
# print("b:\n", b)
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_binarize_v(handler):
v = np.random.random_integers(0, 4, (10, 1)).astype(ref_dtype)
out = np.random.random_sample((10, 5))
ref_args = (v, out)
assert operation_check(handler, 'binarize_v', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_index_m_by_v(handler):
m_list = get_random_arrays()
for m in m_list:
v = np.random.random_integers(0, m.shape[1] - 1, (m.shape[0], 1))
out = np.random.random_sample(v.shape)
ref_args = (m, v, out)
assert operation_check(handler, 'index_m_by_v', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sigmoid(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'sigmoid', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sigmoid_deriv(handler):
list_x = get_random_arrays(some_nd_shapes)
list_y = get_random_arrays(some_nd_shapes)
list_dy = get_random_arrays(some_nd_shapes)
for x, y, dy in zip(list_x, list_y, list_dy):
dx = np.zeros_like(x, dtype=ref_dtype)
ref_args = (x, y, dy, dx)
assert operation_check(handler, 'sigmoid_deriv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_tanh(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'tanh', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_tanh_deriv(handler):
list_x = get_random_arrays(some_nd_shapes)
list_y = get_random_arrays(some_nd_shapes)
list_dy = get_random_arrays(some_nd_shapes)
for x, y, dy in zip(list_x, list_y, list_dy):
dx = np.zeros_like(x, dtype=ref_dtype)
ref_args = (x, y, dy, dx)
assert operation_check(handler, 'tanh_deriv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_rel(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'rel', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_rel_deriv(handler):
list_x = get_random_arrays(some_nd_shapes)
list_y = get_random_arrays(some_nd_shapes)
list_dy = get_random_arrays(some_nd_shapes)
for x, y, dy in zip(list_x, list_y, list_dy):
dx = np.zeros_like(x, dtype=ref_dtype)
ref_args = (x, y, dy, dx)
assert operation_check(handler, 'rel_deriv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_el(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'el', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_el_deriv(handler):
list_x = get_random_arrays(some_nd_shapes)
list_y = get_random_arrays(some_nd_shapes)
list_dy = get_random_arrays(some_nd_shapes)
for x, y, dy in zip(list_x, list_y, list_dy):
dx = np.zeros_like(x, dtype=ref_dtype)
ref_args = (x, y, dy, dx)
assert operation_check(handler, 'el_deriv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_conv2d_forward(handler):
img_shapes = [(1, 3, 3, 1), (3, 8, 8, 1), (2, 6, 4, 3), (1, 3, 4, 2)]
w_shapes = [(1, 1, 1), (3, 3, 3), (6, 2, 2), (2, 1, 3)]
list_x = get_random_arrays(img_shapes)
stride = (1, 1)
padding = 1
for ws in w_shapes:
for x in list_x:
w_shape = (ws[0], ws[1], ws[2], x.shape[3])
w = np.random.uniform(size=w_shape).astype(ref_dtype)
b = np.random.uniform(size=(w.shape[0],)).astype(ref_dtype)
oh = (x.shape[1] + 2 * padding - w.shape[1]) / stride[0] + 1
ow = (x.shape[2] + 2 * padding - w.shape[2]) / stride[1] + 1
out = np.zeros((x.shape[0], oh, ow, w.shape[0]), dtype=ref_dtype)
ref_args = (x, w, b, out, padding, stride)
passed = operation_check(handler, 'conv2d_forward_batch', ref_args,
atol=1e-6)
if not passed:
print(x.shape, w.shape)
assert passed
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_conv2d_backward(handler):
img_shapes = [(1, 3, 3, 1), (4, 8, 8, 1), (3, 6, 4, 10), (1, 3, 4, 2)]
w_shapes = [(1, 1, 1), (3, 1, 1), (6, 2, 3), (2, 1, 3)]
list_x = get_random_arrays(img_shapes)
stride = (1, 1)
padding = 1
for ws in w_shapes:
for x in list_x:
w_shape = (ws[0], ws[1], ws[2], x.shape[3])
w = np.random.uniform(size=w_shape).astype(ref_dtype)
b = np.random.uniform(size=(w.shape[0],)).astype(ref_dtype)
oh = (x.shape[1] + 2 * padding - w.shape[1]) / stride[0] + 1
ow = (x.shape[2] + 2 * padding - w.shape[2]) / stride[1] + 1
out_shape = (x.shape[0], oh, ow, w.shape[0])
o_deltas = np.random.uniform(size=out_shape).astype(ref_dtype)
i_deltas = np.zeros_like(x, dtype=ref_dtype)
w_deltas = np.zeros_like(w, dtype=ref_dtype)
b_deltas = np.zeros_like(b, dtype=ref_dtype)
ref_args = (x, w, padding, stride, i_deltas,
o_deltas, w_deltas, b_deltas)
passed = operation_check(handler, 'conv2d_backward_batch',
ref_args, atol=1e-6)
if not passed:
print(x.shape, w.shape)
assert passed
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_maxpool2d_forward(handler):
img_shapes = [(1, 5, 5, 1), (1, 8, 8, 3), (3, 6, 4, 2), (1, 6, 9, 2)]
window_list = [(2, 2), (3, 3), (4, 4), (2, 1), (1, 2)]
strides_list = [(1, 1), (2, 2), (1, 2), (2, 1)]
list_x = get_random_arrays(img_shapes)
for x in list_x:
for padding in (0, 1, 2):
for strides in strides_list:
for window in window_list:
out_shape = (
x.shape[0],
(x.shape[1] + 2*padding - window[0]) // strides[0] + 1,
(x.shape[2] + 2*padding - window[1]) // strides[1] + 1,
x.shape[3]
)
outputs = np.zeros(out_shape, dtype=ref_dtype)
argmax =
|
np.zeros(out_shape, dtype=ref_dtype)
|
numpy.zeros
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of LIBSGM
#
# https://github.com/CNES/Pandora_libsgm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains tests for the sgm_python_parall code
"""
import unittest
import test.test_libsgm_python.common as common
import numpy as np
import libsgm_python.sgm_python_parall as sgm
class TestSgmPythonParall(unittest.TestCase):
""" "
Test Python version of LibSGM
"""
###############################################################
# Sgm
###############################################################
def test_sgm_middle_value_invalid(self):
""" "
Test SGM middle value invalid
"""
p1 = 8
p2 = 32
cv_in = np.array(
[
[[1, 15, 20], [14, 16, 6], [8, 19, 8]],
[[13, 11, 3], [np.nan, np.nan, np.nan], [16, 4, 12]],
[[18, 2, 17], [23, 7, 1], [5, 20, 14]],
]
)
p1_in = p1 * np.ones((3, 3, 8))
p2_in = p2 *
|
np.ones((3, 3, 8))
|
numpy.ones
|
from anndata import AnnData
import numpy as np
import pandas as pd
import warnings
from .. import logging as logg
from .distributed import materialize_as_ndarray
from .utils import _get_mean_var
def highly_variable_genes(
adata,
min_disp=None, max_disp=None,
min_mean=None, max_mean=None,
n_top_genes=None,
n_bins=20,
flavor='seurat',
subset=False,
inplace=False):
"""Annotate highly variable genes [Satija15]_ [Zheng17]_.
Expects logarithmized data.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Parameters
----------
adata : :class:`~anndata.AnnData`
The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
flavor : {'seurat', 'cell_ranger'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
subset : `bool`, optional (default: `False`)
Inplace subset to highly-variable genes if `True` otherwise merely indicate
highly variable genes.
inplace : `bool`, optional (default: `False`)
Whether to place calculated metrics in `.var` or return them.
Returns
-------
Union[None, np.recarray]
Depending on `inplace` returns calculated metrics (`np.recarray`) or
updates `.var` with the following fields
* `highly_variable` - boolean indicator of highly-variable genes
* `means` - means per gene
* `dispersions` - dispersions per gene
* `dispersions_norm` - normalized dispersions per gene
Notes
-----
This function replaces :func:`~scanpy.api.pp.filter_genes_dispersion`.
"""
logg.msg('extracting highly variable genes', r=True, v=4)
if n_top_genes is not None and not all([
min_disp is None, max_disp is None, min_mean is None, max_mean is None]):
logg.info('If you pass `n_top_genes`, all cutoffs are ignored.')
if min_disp is None: min_disp = 0.5
if min_mean is None: min_mean = 0.0125
if max_mean is None: max_mean = 3
X = np.expm1(adata.X) if flavor == 'seurat' else adata.X
mean, var = materialize_as_ndarray(_get_mean_var(X))
# now actually compute the dispersion
mean[mean == 0] = 1e-12 # set entries equal to zero to small value
dispersion = var / mean
if flavor=='seurat': # logarithmized mean as in Seurat
dispersion[dispersion == 0] = np.nan
dispersion = np.log(dispersion)
mean = np.log1p(mean)
# all of the following quantities are "per-gene" here
df = pd.DataFrame()
df['mean'] = mean
df['dispersion'] = dispersion
if flavor == 'seurat':
df['mean_bin'] = pd.cut(df['mean'], bins=n_bins)
disp_grouped = df.groupby('mean_bin')['dispersion']
disp_mean_bin = disp_grouped.mean()
disp_std_bin = disp_grouped.std(ddof=1)
# retrieve those genes that have nan std, these are the ones where
# only a single gene fell in the bin and implicitly set them to have
# a normalized disperion of 1
one_gene_per_bin = disp_std_bin.isnull()
gen_indices = np.where(one_gene_per_bin[df['mean_bin']])[0].tolist()
if len(gen_indices) > 0:
logg.msg(
'Gene indices {} fell into a single bin: their '
'normalized dispersion was set to 1.\n '
'Decreasing `n_bins` will likely avoid this effect.'
.format(gen_indices), v=4)
# Circumvent pandas 0.23 bug. Both sides of the assignment have dtype==float32,
# but there’s still a dtype error without “.value”.
disp_std_bin[one_gene_per_bin] = disp_mean_bin[one_gene_per_bin].values
disp_mean_bin[one_gene_per_bin] = 0
# actually do the normalization
df['dispersion_norm'] = (df['dispersion'].values # use values here as index differs
- disp_mean_bin[df['mean_bin']].values) \
/ disp_std_bin[df['mean_bin']].values
elif flavor == 'cell_ranger':
from statsmodels import robust
df['mean_bin'] = pd.cut(df['mean'], np.r_[-np.inf,
np.percentile(df['mean'], np.arange(10, 105, 5)), np.inf])
disp_grouped = df.groupby('mean_bin')['dispersion']
disp_median_bin = disp_grouped.median()
# the next line raises the warning: "Mean of empty slice"
with warnings.catch_warnings():
warnings.simplefilter('ignore')
disp_mad_bin = disp_grouped.apply(robust.mad)
df['dispersion_norm'] = np.abs((df['dispersion'].values
- disp_median_bin[df['mean_bin']].values)) \
/ disp_mad_bin[df['mean_bin']].values
else:
raise ValueError('`flavor` needs to be "seurat" or "cell_ranger"')
dispersion_norm = df['dispersion_norm'].values.astype('float32')
if n_top_genes is not None:
dispersion_norm = dispersion_norm[~np.isnan(dispersion_norm)]
dispersion_norm[::-1].sort() # interestingly, np.argpartition is slightly slower
disp_cut_off = dispersion_norm[n_top_genes-1]
gene_subset = df['dispersion_norm'].values >= disp_cut_off
logg.msg('the {} top genes correspond to a normalized dispersion cutoff of'
.format(n_top_genes, disp_cut_off), v=5)
else:
max_disp = np.inf if max_disp is None else max_disp
dispersion_norm[
|
np.isnan(dispersion_norm)
|
numpy.isnan
|
import numpy as np
from numpy.lib.function_base import place
ids = []
tiles = np.zeros((1,10,10), dtype=np.uint8) #stub
with open('input20.txt') as f:
tiledata = f.read().strip().split('\n\n')
for td in tiledata:
t = td.strip().split('\n')
ids.append(int(t[0][5:-1]))
tile = (np.array([[list(s) for s in t[1:]]]) == '#').astype(np.uint8)
tiles =
|
np.concatenate((tiles, tile))
|
numpy.concatenate
|
import itertools
from pathlib import Path
import pytest
import numpy as np
import h5py
from kontiki.sfm import View, Landmark
from kontiki.io import load_structure, save_structure
from kontiki.utils import safe_time_span
def project_camera_trajectory(X_world, t0, trajectory, camera):
def project(t):
# World -> Trajectory
X_traj = trajectory.from_world(X_world, t0 + t)
# Trajectory -> Camera
X_camera = camera.from_trajectory(X_traj)
if X_camera[2] <= 0:
raise ValueError("Behind camera")
return camera.project(X_camera)
def rootfunc(t):
u, v = project(t)
vt = v * camera.readout / camera.rows
err = t - vt
return err
from scipy.optimize import brentq
t = brentq(rootfunc, 0, camera.readout)
return project(t), t0 + t
def generate_landmark(views, camera, trajectory, view_probs=None, tries=1000):
for _ in range(tries):
i = np.random.choice(len(views), p=view_probs)
vi = views[i]
# Initial observation
u, v = np.random.uniform(0, camera.cols), np.random.uniform(0, camera.rows)
y0 = np.array([u, v])
z0 =
|
np.random.uniform(0.5, 100)
|
numpy.random.uniform
|
# import torch
import cv2 as cv
import numpy as np
import time as timer
from pathlib import Path
# from torch.autograd import Variable
class Event_simulator():
default_config = {
"contrast_threshold_pos": 0.15, # Contrast threshold (positive)
"contrast_threshold_neg": 0.15, # Contrast threshold (negative))
"contrast_threshold_sigma_pos": 0.021, # Standard deviation of contrast threshold (positive)
"contrast_threshold_sigma_neg": 0.021, # Standard deviation of contrast threshold (negative))
"refractory_period_ns": 0, # Refractory period (time during which a pixel cannot fire events just after it fired one), in nanoseconds
"use_log_image": True, # Whether to convert images to log images in the preprocessing step.
"log_eps": 0.1, # Epsilon value used to convert images to log: L = log(eps + I / 255.0).
"random_seed": 0, # Random seed used to generate the trajectories. If set to 0 the current time(0) is taken as seed.
"frame_rate": 1200, # Specifies the input video framerate (e.g. 1200 fps)
# "use_event_frame": False, #
# "events_per_frame": 10000, #
}
def __init__(self, img, time, **config):
self.config = self.default_config
# TODO: use utils.tools import dict_update
# self.config = dict_update(self.config, dict(config))
assert len(img.shape) == 2, 'Event Simulator takes only gray image'
if self.config["use_log_image"]:
img = cv.log(self.config["log_eps"] + img)
self.last_img = img.copy()
self.ref_values = img.copy()
self.last_event_timestamp = np.zeros_like(img)
self.current_time = time
self.H, self.W = img.shape
cp = self.config["contrast_threshold_pos"]
cm = self.config["contrast_threshold_neg"]
sigma_cp = self.config["contrast_threshold_sigma_pos"]
sigma_cm = self.config["contrast_threshold_sigma_neg"]
minimum_contrast_threshold = 0.01
stepsize_pos = np.full_like(img, cp) + np.random.normal(0, sigma_cp, [self.H, self.W])
stepsize_neg =
|
np.full_like(img, cm)
|
numpy.full_like
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, get_numeric_gradient)
from paddle.fluid.tests.unittests.testsuite import create_op
from paddle.fluid import Program, program_guard
def conv2d_forward_naive(input,
filter,
group,
conv_param,
padding_algorithm='EXPLICIT',
data_format='NCHW'):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError("Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." %
str(padding_algorithm))
if data_format not in ["NCHW", "NHWC"]:
raise ValueError("Unknown Attr(data_format): '%s' ."
"It can only be 'NCHW' or 'NHWC'." % str(data_format))
channel_last = (data_format == "NHWC")
if channel_last:
input = np.transpose(input, [0, 3, 1, 2])
in_n, in_c, in_h, in_w = input.shape
f_n, f_c, f_h, f_w = filter.shape
out_n = in_n
out_c = f_n
assert f_c * group == in_c
assert np.mod(out_c, group) == 0
sub_out_c = out_c // group
sub_f_n = f_n // group
stride, pad, dilation = conv_param['stride'], conv_param['pad'], conv_param[
'dilation']
# update pad and dilation
def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding = []
for input_size, filter_size, stride_size in zip(input_shape, pool_size,
pool_stride):
out_size = int((input_size + stride_size - 1) / stride_size)
pad_sum = np.max((
(out_size - 1) * stride_size + filter_size - input_size, 0))
pad_0 = int(pad_sum / 2)
pad_1 = int(pad_sum - pad_0)
padding.append(pad_0)
padding.append(pad_1)
return padding
ksize = filter.shape[2:4]
if padding_algorithm == "VALID":
pad = [0, 0, 0, 0]
elif padding_algorithm == "SAME":
dilation = [1, 1]
input_data_shape = input.shape[2:4]
pad = _get_padding_with_SAME(input_data_shape, ksize, stride)
pad_h_0, pad_h_1 = pad[0], pad[0]
pad_w_0, pad_w_1 = pad[1], pad[1]
if len(pad) == 4:
pad_h_0, pad_h_1 = pad[0], pad[1]
pad_w_0, pad_w_1 = pad[2], pad[3]
out_h = 1 + (in_h + pad_h_0 + pad_h_1 - (dilation[0] *
(f_h - 1) + 1)) // stride[0]
out_w = 1 + (in_w + pad_w_0 + pad_w_1 - (dilation[1] *
(f_w - 1) + 1)) // stride[1]
out = np.zeros((out_n, out_c, out_h, out_w))
d_bolck_h = (dilation[0] * (f_h - 1) + 1)
d_bolck_w = (dilation[1] * (f_w - 1) + 1)
input_pad = np.pad(input, ((0, 0), (0, 0), (pad_h_0, pad_h_1),
(pad_w_0, pad_w_1)),
mode='constant',
constant_values=0)
filter_dilation = np.zeros((f_n, f_c, d_bolck_h, d_bolck_w))
filter_dilation[:, :, 0:d_bolck_h:dilation[0], 0:d_bolck_w:dilation[
1]] = filter
for i in range(out_h):
for j in range(out_w):
for g in range(group):
input_pad_masked = \
input_pad[:, g * f_c:(g + 1) * f_c,
i * stride[0]:i * stride[0] + d_bolck_h,
j * stride[1]:j * stride[1] + d_bolck_w]
f_sub = filter_dilation[g * sub_f_n:(g + 1) * sub_f_n, :, :, :]
# sub_f_n == sub_out_c
for k in range(sub_out_c):
# Multiplication of Corresponding Elements, then sum all
out[:, g * sub_out_c + k, i, j] = \
np.sum(input_pad_masked * f_sub[k, :, :, :],
axis=(1, 2, 3))
if channel_last:
out = np.transpose(out, [0, 2, 3, 1])
return out, in_n, out_h, out_w, out_c
def create_test_cudnn_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
cls_name = "{0}_{1}".format(parent.__name__, "CUDNN")
TestCUDNNCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNCase
def create_test_cudnn_fp16_class(parent, grad_check=True):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConv2DCUDNNFp16(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']))
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16")
TestConv2DCUDNNFp16.__name__ = cls_name
globals()[cls_name] = TestConv2DCUDNNFp16
def create_test_cudnn_bf16_class(parent):
@unittest.skipIf(
not core.is_compiled_with_cuda() or core.cudnn_version() < 8100,
"core is not compiled with CUDA and cudnn version need larger than 8.1.0"
)
class TestConv2DCUDNNBF16(parent):
def get_numeric_grad(self, place, check_name):
scope = core.Scope()
self._check_grad_helper()
op = create_op(scope, self.op_type, self.inputs, self.outputs,
self.attrs)
return get_numeric_gradient(place, scope, op, self.inputs_fp32,
check_name, ['Output'])
def init_kernel_type(self):
self.use_cudnn = True
self.no_need_check_grad = True
self.dtype = np.uint16
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'Input')
self.check_grad_with_place(
place, ['Input'],
'Output',
no_grad_set=set(['Filter']),
user_defined_grads=[numeric_grads])
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
numeric_grads = self.get_numeric_grad(place, 'Filter')
self.check_grad_with_place(
place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
user_defined_grads=[numeric_grads])
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNBF16")
TestConv2DCUDNNBF16.__name__ = cls_name
globals()[cls_name] = TestConv2DCUDNNBF16
def create_test_channel_last_class(parent):
class TestChannelLastCase(parent):
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "ChannelLast")
TestChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestChannelLastCase
def create_test_cudnn_channel_last_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLast")
TestCudnnChannelLastCase.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastCase
def create_test_cudnn_channel_last_fp16_class(parent, grad_check=True):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCudnnChannelLastFp16(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
def test_check_grad_no_filter(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Input'], 'Output', no_grad_set=set(['Filter']))
def test_check_grad_no_input(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place) and grad_check:
self.check_grad_with_place(
place, ['Filter'], 'Output', no_grad_set=set(['Input']))
def init_data_format(self):
self.data_format = "NHWC"
def init_test_case_2(self):
N, C, H, W = self.input_size
self.input_size = [N, H, W, C]
cls_name = "{0}_{1}".format(parent.__name__, "CudnnChannelLastFp16")
TestCudnnChannelLastFp16.__name__ = cls_name
globals()[cls_name] = TestCudnnChannelLastFp16
def create_test_padding_SAME_class(parent):
class TestPaddingSMAECase(parent):
def init_paddings(self):
self.pad = [0, 0]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingSAMEOp")
TestPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestPaddingSMAECase
def create_test_padding_VALID_class(parent):
class TestPaddingVALIDCase(parent):
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "PaddingVALIDOp")
TestPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestPaddingVALIDCase
def create_test_cudnn_padding_SAME_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingSMAECase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "SAME"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingSAMEOp")
TestCUDNNPaddingSMAECase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingSMAECase
def create_test_cudnn_padding_VALID_class(parent):
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestCUDNNPaddingVALIDCase(parent):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float32 if core.is_compiled_with_rocm(
) else np.float64
def init_paddings(self):
self.pad = [1, 1]
self.padding_algorithm = "VALID"
cls_name = "{0}_{1}".format(parent.__name__, "CudnnPaddingVALIDOp")
TestCUDNNPaddingVALIDCase.__name__ = cls_name
globals()[cls_name] = TestCUDNNPaddingVALIDCase
class TestConv2DOp(OpTest):
def setUp(self):
self.op_type = "conv2d"
self.use_cudnn = False
self.exhaustive_search = False
self.use_cuda = False
self.use_mkldnn = False
self.fuse_relu_before_depthwise_conv = False
self.data_format = "AnyLayout"
self.dtype = np.float64
self.init_kernel_type()
self.init_group()
self.init_dilation()
self.init_test_case()
conv2d_param = {
'stride': self.stride,
'pad': self.pad,
'dilation': self.dilations
}
if self.is_bfloat16_op():
input = np.random.random(self.input_size).astype(np.float32)
filter = np.random.uniform(-1, 1,
self.filter_size).astype(np.float32)
else:
input = np.random.random(self.input_size).astype(self.dtype)
filter = np.random.uniform(-1, 1,
self.filter_size).astype(self.dtype)
if not self.has_cuda():
self.fuse_relu_before_depthwise_conv = False
if self.fuse_relu_before_depthwise_conv:
input = input - 0.5
input -= (input < 0) * 0.1
input += (input >= 0) * 0.1
input2 = np.maximum(input, 0.0)
else:
input2 = input
output, _, _, _, _ = conv2d_forward_naive(input2, filter, self.groups,
conv2d_param)
if self.is_bfloat16_op():
output = output.astype(np.float32)
self.inputs = {
'Input': convert_float_to_uint16(input),
'Filter': convert_float_to_uint16(filter)
}
self.inputs_fp32 = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
else:
output = output.astype(self.dtype)
self.inputs = {
'Input': OpTest.np_dtype_to_fluid_dtype(input),
'Filter': OpTest.np_dtype_to_fluid_dtype(filter)
}
self.attrs = {
'strides': self.stride,
'paddings': self.pad,
'groups': self.groups,
'dilations': self.dilations,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn,
'data_format': self.data_format,
'fuse_relu_before_depthwise_conv':
self.fuse_relu_before_depthwise_conv,
'exhaustive_search': self.exhaustive_search
}
self.outputs = {'Output': output}
def has_cuda(self):
return core.is_compiled_with_cuda() and (self.use_cudnn or
self.use_cuda)
def test_check_output(self):
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, {'Input', 'Filter'},
'Output',
max_relative_error=0.02,
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_filter(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Input'],
'Output',
max_relative_error=0.02,
no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False))
def test_check_grad_no_input(self):
if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and
self.no_need_check_grad == True):
return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad_with_place(
place, ['Filter'],
'Output',
no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False))
def init_test_case(self):
self.pad = [0, 0]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert np.mod(self.input_size[1], self.groups) == 0
f_c = self.input_size[1] // self.groups
self.filter_size = [6, f_c, 3, 3]
def init_test_case_2(self):
pass
def init_dilation(self):
self.dilations = [1, 1]
def init_group(self):
self.groups = 1
def init_kernel_type(self):
pass
class TestWithPad(TestConv2DOp):
def init_test_case(self):
self.pad = [1, 1]
self.stride = [1, 1]
self.input_size = [2, 3, 5, 5] # NCHW
assert
|
np.mod(self.input_size[1], self.groups)
|
numpy.mod
|
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skcv.video.optical_flow.reliability import *
def test_forward_backward_reliability():
N = 99
M = 99
fflow = np.zeros((N, M, 2))
bflow = np.zeros((N, M, 2))
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 0] = 1
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 1] = 1
bflow[1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 0] = -1
bflow[1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 1] = -1
frel = occlusion_reliability(fflow, bflow)
brel = occlusion_reliability(bflow, fflow)
low_rel = (np.array([33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65,
65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65]),
np.array([65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65]))
assert_equal(np.where(frel < 1), low_rel)
low_rel = (np.array([33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65]),
np.array([33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33]))
assert_equal(np.where(brel < 1), low_rel)
def test_variation_reliability():
N = 99
M = 99
fflow = np.zeros((N, M, 2))
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 0] = 1
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 1] = 1
rel = variation_reliability(fflow)
low_rel = (np.array([33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34,
34, 35, 35, 36, 36, 37, 37, 38, 38, 39, 39, 40, 40, 41, 41, 42, 42,
43, 43, 44, 44, 45, 45, 46, 46, 47, 47, 48, 48, 49, 49, 50, 50, 51,
51, 52, 52, 53, 53, 54, 54, 55, 55, 56, 56, 57, 57, 58, 58, 59, 59,
60, 60, 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, 65, 65, 65, 65, 65,
65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
65, 65, 65, 65, 65, 65, 65, 65, 65]),
np.array([33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 33,
65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65,
33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33,
65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 65,
33, 65, 33, 65, 33, 65, 33, 65, 33, 65, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
57, 58, 59, 60, 61, 62, 63, 64, 65]))
assert_equal(np.where(rel < 1), low_rel)
def test_structure_reliability():
N = 100
M = 100
img = np.zeros((N, M, 3))
img[N / 2, M / 2, :] = [100, 100, 100]
rel = structure_reliability(img)
assert_almost_equal(np.max(rel), 1)
assert_equal(np.argmax(rel), 4848)
def test_reliability():
N = 9
M = 9
img = np.zeros((N, M, 3))
fflow = np.zeros((N, M, 2))
bflow = np.zeros((N, M, 2))
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 0] = 1
fflow[N / 3:2 * N / 3, M / 3:2 * M / 3, 1] = 1
bflow[1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 0] = -1
bflow[1 + N / 3:2 * N / 3, 1 + M / 3:2 * M / 3, 1] = -1
img[N / 3:2 * N / 3, M / 3:2 * M / 3, :] = 100
rel = flow_reliability(img, forward_flow=fflow, backward_flow=bflow,
use_structure=True)
expected_rel = np.array([[3.18997461e-03, 2.27885585e-02, 5.77428933e-02,
6.76206854e-02, 5.99810903e-02, 6.76206854e-02,
5.77428933e-02, 2.27885585e-02, 3.18997461e-03],
[2.27885585e-02, 1.83698319e-01, 4.55458765e-01,
5.45682429e-01, 5.21522853e-01, 5.45682429e-01,
4.55458765e-01, 1.83698319e-01, 2.27885585e-02],
[5.77428933e-02, 4.55458765e-01, 8.82046385e-01,
9.59261552e-01, 9.61780019e-01, 9.59261552e-01,
8.82046385e-01, 4.55458765e-01, 5.77428933e-02],
[6.76206854e-02, 5.45682429e-01, 9.59261552e-01,
1.81694479e-20, 1.34794095e-10, 1.81694479e-20,
9.59261552e-01, 5.45682429e-01, 6.76206854e-02],
[5.99810903e-02, 5.21522853e-01, 9.61780019e-01,
1.34794095e-10, 9.98152559e-01, 1.34794095e-10,
9.61780019e-01, 5.21522853e-01, 5.99810903e-02],
[6.76206854e-02, 5.45682429e-01, 9.59261552e-01,
1.81694479e-20, 1.34794095e-10, 1.81694479e-20,
9.59261552e-01, 5.45682429e-01, 6.76206854e-02],
[5.77428933e-02, 4.55458765e-01, 8.82046385e-01,
9.59261552e-01, 9.61780019e-01, 9.59261552e-01,
8.82046385e-01, 4.55458765e-01, 5.77428933e-02],
[2.27885585e-02, 1.83698319e-01, 4.55458765e-01,
5.45682429e-01, 5.21522853e-01, 5.45682429e-01,
4.55458765e-01, 1.83698319e-01, 2.27885585e-02],
[3.18997461e-03, 2.27885585e-02, 5.77428933e-02,
6.76206854e-02, 5.99810903e-02, 6.76206854e-02,
5.77428933e-02, 2.27885585e-02, 3.18997461e-03]])
assert_almost_equal(rel, expected_rel)
rel = flow_reliability(img, forward_flow=fflow, backward_flow=bflow,
use_structure=False)
low_rel = (np.array([3, 3, 3, 4, 4, 5, 5, 5]),
np.array([3, 4, 5, 3, 5, 3, 4, 5]))
assert_equal(
|
np.where(rel < 1)
|
numpy.where
|
"""
drivese_omdao.py
Created by <NAME>, <NAME> and <NAME> 2014.
Copyright (c) NREL. All rights reserved.
Functions nacelle_example_5MW_baseline_[34]pt() did not define blade_mass
We've added prob['blade_mass'] = 17740.0 (copied from hubse_omdao.py)
GNS 2019 05 13
GNS 2019 06 05: nacelle_example_*() now return prob
Classes with declarations like
class ObjName_OM(ExplicitComponent)
are OpenMDAO wrappers for pure-python objects that define the parts of a wind turbine drivetrain.
These objects are defined in drivese_components.py (which contains NO OpenMDAO code).
"""
import numpy as np
import sys
import wisdem.drivetrainse.drivese_components as dc
from wisdem.drivetrainse.hubse_omdao import HubSE, Hub_CM_Adder_OM
from wisdem.commonse.csystem import DirectionVector
from wisdem.commonse import gravity
from wisdem.commonse.utilities import hstack, vstack
from openmdao.api import Group, ExplicitComponent, IndepVarComp, Problem, view_connections
#-------------------------------------------------------------------------
# Components
#--------------------------------------------
class ForceMomentDemux(ExplicitComponent):
def setup(self):
self.add_input('Fxyz', val=np.zeros(3), units='N', desc='Force components applied at the hub center')
self.add_input('Mxyz', val=np.zeros(3), units='N*m', desc='Moment components generated by the rotor')
self.add_output('rotor_bending_moment_x', val=0.0, units='N*m', desc='The bending moment about the x axis')
self.add_output('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_output('rotor_bending_moment_z', val=0.0, units='N*m', desc='The bending moment about the z axis')
self.add_output('rotor_thrust', val=0.0, units='N', desc='The force along the x axis applied at hub center')
self.add_output('rotor_force_y', val=0.0, units='N', desc='The force along the y axis applied at hub center')
self.add_output('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
def compute(self, inputs, outputs):
outputs['rotor_thrust'], outputs['rotor_force_y'], outputs['rotor_force_z'] = inputs['Fxyz'][0], inputs['Fxyz'][1], inputs['Fxyz'][2]
outputs['rotor_bending_moment_x'], outputs['rotor_bending_moment_y'], outputs['rotor_bending_moment_z'] = inputs['Mxyz'][0], inputs['Mxyz'][1], inputs['Mxyz'][2]
#-------------------------------------------------------------------------
class LowSpeedShaft4pt_OM(ExplicitComponent):
''' LowSpeedShaft class
The LowSpeedShaft class is used to represent the low speed shaft component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('debug', default=False)
def setup(self):
# variables
self.add_input('rotor_bending_moment_x', val=0.0, units='N*m', desc='The bending moment about the x axis')
self.add_input('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_input('rotor_bending_moment_z', val=0.0, units='N*m', desc='The bending moment about the z axis')
self.add_input('rotor_thrust', val=0.0, units='N', desc='The force along the x axis applied at hub center')
self.add_input('rotor_force_y', val=0.0, units='N', desc='The force along the y axis applied at hub center')
self.add_input('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_input('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='Gearbox mass')
self.add_input('carrier_mass', val=0.0, units='kg', desc='Carrier mass')
self.add_input('overhang', val=0.0, units='m', desc='Overhang distance')
self.add_input('distance_hub2mb', val=0.0, units='m', desc='distance between hub center and upwind main bearing')
self.add_input('drivetrain_efficiency', val=0.0, desc='overall drivettrain efficiency')
# parameters
self.add_input('shrink_disc_mass', val=0.0, units='kg', desc='Mass of the shrink disc')
self.add_input('gearbox_cm', val=np.zeros(3), units='m', desc='center of mass of gearbox')
self.add_input('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_input('flange_length', val=0.0, units='m', desc='flange length')
self.add_input('shaft_angle', val=0.0, units='rad', desc='Angle of the LSS inclination with respect to the horizontal')
self.add_input('shaft_ratio', val=0.0, desc='Ratio of inner diameter to outer diameter. Leave zero for solid LSS')
self.add_input('hub_flange_thickness', val=0.0, desc='Shell thickness for spherical hub')
self.add_discrete_input('mb1Type', val='CARB', desc='main bearing #1 type- valid options are CRB/SRB/RB/CARB/TRB1/TRB2')
self.add_discrete_input('mb2Type', val='SRB', desc='main bearing #2 type- valid options are CRB/SRB/RB/CARB/TRB1/TRB2')
self.add_discrete_input('IEC_Class', val='B', desc='IEC turbulence class (A/B/C)')
# outputs
self.add_output('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_output('lss_design_bending_load', val=0.0, units='N', desc='lss design bending load')
self.add_output('lss_length', val=0.0, units='m', desc='lss length')
self.add_output('lss_diameter1', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_output('lss_diameter2', val=0.0, units='m', desc='lss outer diameter at second bearing')
self.add_output('lss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('lss_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('lss_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('lss_mb1_facewidth', val=0.0, units='m', desc='facewidth of upwind main bearing')
self.add_output('lss_mb2_facewidth', val=0.0, units='m', desc='facewidth of main bearing')
self.add_output('lss_mb1_mass', val=0.0, units='kg', desc='main bearing mass')
self.add_output('lss_mb2_mass', val=0.0, units='kg', desc='second bearing mass')
self.add_output('lss_mb1_cm', val=np.zeros(3), units='m', desc='main bearing 1 center of mass')
self.add_output('lss_mb2_cm', val=np.zeros(3), units='m', desc='main bearing 2 center of mass')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
lss4pt = dc.LowSpeedShaft4pt(discrete_inputs['mb1Type'], discrete_inputs['mb2Type'], discrete_inputs['IEC_Class'], debug=self.options['debug'])
(outputs['lss_design_torque'], outputs['lss_design_bending_load'], outputs['lss_length'], outputs['lss_diameter1'], outputs['lss_diameter2'], outputs['lss_mass'], outputs['lss_cm'], outputs['lss_I'], \
outputs['lss_mb1_facewidth'], outputs['lss_mb2_facewidth'], outputs['lss_mb1_mass'], outputs['lss_mb2_mass'], outputs['lss_mb1_cm'], outputs['lss_mb2_cm']) \
= lss4pt.compute(inputs['rotor_diameter'], inputs['rotor_mass'], inputs['rotor_thrust'], inputs['rotor_force_y'], inputs['rotor_force_z'], \
inputs['rotor_bending_moment_x'], inputs['rotor_bending_moment_y'], inputs['rotor_bending_moment_z'], \
inputs['overhang'], inputs['machine_rating'], inputs['drivetrain_efficiency'], \
inputs['gearbox_mass'], inputs['carrier_mass'], inputs['gearbox_cm'], inputs['gearbox_length'], \
inputs['shrink_disc_mass'], inputs['flange_length'], inputs['distance_hub2mb'], inputs['shaft_angle'], inputs['shaft_ratio'], \
inputs['hub_flange_thickness'])
#-------------------------------------------------------------------------
class LowSpeedShaft3pt_OM(ExplicitComponent):
''' LowSpeedShaft class
The LowSpeedShaft class is used to represent the low speed shaft component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('debug', default=False)
def setup(self):
# variables
self.add_input('rotor_bending_moment_x', val=0.0, units='N*m', desc='The bending moment about the x axis')
self.add_input('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_input('rotor_bending_moment_z', val=0.0, units='N*m', desc='The bending moment about the z axis')
self.add_input('rotor_thrust', val=0.0, units='N', desc='The force along the x axis applied at hub center')
self.add_input('rotor_force_y', val=0.0, units='N', desc='The force along the y axis applied at hub center')
self.add_input('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_input('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='Gearbox mass')
self.add_input('carrier_mass', val=0.0, units='kg', desc='Carrier mass')
self.add_input('overhang', val=0.0, units='m', desc='Overhang distance')
self.add_input('distance_hub2mb', val=0.0, units='m', desc='distance between hub center and upwind main bearing')
self.add_input('drivetrain_efficiency', val=0.0, desc='overall drivettrain efficiency')
# parameters
self.add_input('shrink_disc_mass', val=0.0, units='kg', desc='Mass of the shrink disc')
self.add_input('gearbox_cm', val=np.zeros(3), units='m', desc='center of mass of gearbox')
self.add_input('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_input('flange_length', val=0.0, units='m', desc='flange length')
self.add_input('shaft_angle', val=0.0, units='rad', desc='Angle of the LSS inclination with respect to the horizontal')
self.add_input('shaft_ratio', val=0.0, desc='Ratio of inner diameter to outer diameter. Leave zero for solid LSS')
self.add_input('hub_flange_thickness', val=0.0, desc='Shell thickness for spherical hub')
self.add_discrete_input('mb1Type', val='CARB', desc='main bearing #1 type- valid options are CRB/SRB/RB/CARB/TRB1/TRB2')
self.add_discrete_input('IEC_Class', val='B', desc='IEC turbulence class (A/B/C)')
# outputs
self.add_output('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_output('lss_design_bending_load', val=0.0, units='N', desc='lss design bending load')
self.add_output('lss_length', val=0.0, units='m', desc='lss length')
self.add_output('lss_diameter1', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_output('lss_diameter2', val=0.0, units='m', desc='lss outer diameter at second bearing')
self.add_output('lss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('lss_cm', val=np.zeros(3), desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('lss_I', val=np.zeros(3), desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('lss_mb1_facewidth', val=0.0, units='m', desc='facewidth of upwind main bearing')
self.add_output('lss_mb2_facewidth', val=0.0, units='m', desc='facewidth of main bearing')
self.add_output('lss_mb1_mass', val=0.0, units='kg', desc='main bearing mass')
self.add_output('lss_mb2_mass', val=0.0, units='kg', desc='second bearing mass')
self.add_output('lss_mb1_cm', val=np.zeros(3), units='m', desc='main bearing 1 center of mass')
self.add_output('lss_mb2_cm', val=np.zeros(3), units='m', desc='main bearing 2 center of mass')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
lss3pt = dc.LowSpeedShaft3pt(discrete_inputs['mb1Type'], discrete_inputs['IEC_Class'], debug=self.options['debug'])
(outputs['lss_design_torque'], outputs['lss_design_bending_load'], outputs['lss_length'], outputs['lss_diameter1'], outputs['lss_diameter2'], outputs['lss_mass'], outputs['lss_cm'], outputs['lss_I'], \
outputs['lss_mb1_facewidth'], outputs['lss_mb2_facewidth'], outputs['lss_mb1_mass'], outputs['lss_mb2_mass'], outputs['lss_mb1_cm'], outputs['lss_mb2_cm']) \
= lss3pt.compute(inputs['rotor_diameter'], inputs['rotor_mass'], inputs['rotor_thrust'], inputs['rotor_force_y'], inputs['rotor_force_z'], \
inputs['rotor_bending_moment_x'], inputs['rotor_bending_moment_y'], inputs['rotor_bending_moment_z'], \
inputs['overhang'], inputs['machine_rating'], inputs['drivetrain_efficiency'], \
inputs['gearbox_mass'], inputs['carrier_mass'], inputs['gearbox_cm'], inputs['gearbox_length'], \
inputs['shrink_disc_mass'], inputs['flange_length'], inputs['distance_hub2mb'], inputs['shaft_angle'], inputs['shaft_ratio'],
inputs['hub_flange_thickness'])
#-------------------------------------------------------------------------
class MainBearing_OM(ExplicitComponent):
''' MainBearings class
The MainBearings class is used to represent the main bearing components of a wind turbine drivetrain. It contains two subcomponents (main bearing and second bearing) which also inherit from the SubComponent class.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('bearing_position', default='main')
def setup(self):
# variables
self.add_input('bearing_mass', val=0.0, units='kg', desc='bearing mass from LSS model')
self.add_input('lss_diameter', val=0.0, units='m', desc='lss outer diameter at main bearing')
self.add_input('lss_design_torque', val=0.0, units='N*m', desc='lss design torque')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('lss_mb_cm', val=np.array([0., 0., 0.]), units='m', desc='x,y,z location from shaft model')
# returns
self.add_output('mb_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('mb_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('mb_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
def compute(self, inputs, outputs):
mb = dc.MainBearing(self.options['bearing_position'])
(outputs['mb_mass'], outputs['mb_cm'], outputs['mb_I']) \
= mb.compute(inputs['bearing_mass'], inputs['lss_diameter'], inputs['lss_design_torque'], inputs['rotor_diameter'], inputs['lss_mb_cm'])
#-------------------------------------------------------------------------
class Gearbox_OM(ExplicitComponent):
''' Gearbox class
The Gearbox class is used to represent the gearbox component of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('debug', default=False)
def setup(self):
# variables
self.add_input('gear_ratio', val=0.0, desc='overall gearbox speedup ratio')
self.add_discrete_input('planet_numbers', val=np.array([0, 0, 0,]), desc='number of planets in each stage')
self.add_input('rotor_rpm', val=0.0, units='rpm', desc='rotor rpm at rated power')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('rotor_torque', val=0.0, units='N*m', desc='rotor torque at rated power')
self.add_input('gearbox_input_xcm', val=0.00, units='m', desc='gearbox position along x-axis')
self.add_discrete_input('gear_configuration', val='eep', desc='string that represents the configuration of the gearbox (stage number and types)')
self.add_discrete_input('shaft_factor', val='normal', desc='normal or short shaft length')
# outputs
self.add_output('stage_masses', val=np.zeros(3), units='kg', desc='individual gearbox stage gearbox_masses')
self.add_output('gearbox_mass', val=0.0, units='kg', desc='overall component gearbox_mass')
self.add_output('gearbox_cm', val=np.zeros(3), units='m', desc='center of gearbox_mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('gearbox_I', val=np.zeros(3), units='kg*m**2', desc=' moments of gearbox_Inertia for the component [gearbox_Ixx, gearbox_Iyy, gearbox_Izz] around its center of gearbox_mass')
self.add_output('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_output('gearbox_height', val=0.0, units='m', desc='gearbox height')
self.add_output('gearbox_diameter', val=0.0, units='m', desc='gearbox diameter')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
mygearbox = dc.Gearbox(discrete_inputs['gear_configuration'], discrete_inputs['shaft_factor'], debug=self.options['debug'])
(outputs['stage_masses'], outputs['gearbox_mass'], outputs['gearbox_cm'], outputs['gearbox_I'], outputs['gearbox_length'], outputs['gearbox_height'], outputs['gearbox_diameter']) \
= mygearbox.compute(inputs['gear_ratio'], discrete_inputs['planet_numbers'], inputs['rotor_rpm'], inputs['rotor_diameter'], inputs['rotor_torque'], inputs['gearbox_input_xcm'])
#-------------------------------------------------------------------
class HighSpeedSide_OM(ExplicitComponent):
'''
HighSpeedShaft class
The HighSpeedShaft class is used to represent the high speed shaft and mechanical brake components of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def setup(self):
# variables
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('rotor_torque', val=0.0, units='N*m', desc='rotor torque at rated power')
self.add_input('gear_ratio', val=0.0, desc='overall gearbox ratio')
self.add_input('lss_diameter', val=0.0, units='m', desc='low speed shaft outer diameter')
self.add_input('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_input('gearbox_height', val=0.0, units='m', desc='gearbox height')
self.add_input('gearbox_cm', val=np.zeros(3), units='m', desc='gearbox cm [x,y,z]')
self.add_input('hss_input_length', val=0.0, units='m', desc='high speed shaft length determined by user. Default 0.5m')
# returns
self.add_output('hss_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('hss_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('hss_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('hss_length', val=0.0, units='m', desc='length of high speed shaft')
self.hss = dc.HighSpeedSide()
def compute(self, inputs, outputs):
(outputs['hss_mass'], outputs['hss_cm'], outputs['hss_I'], outputs['hss_length']) \
= self.hss.compute(inputs['rotor_diameter'], inputs['rotor_torque'], inputs['gear_ratio'], inputs['lss_diameter'], inputs['gearbox_length'], inputs['gearbox_height'], inputs['gearbox_cm'], inputs['hss_input_length'])
#----------------------------------------------------------------------------------------------
class Generator_OM(ExplicitComponent):
'''Generator class
The Generator class is used to represent the generator of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def setup(self):
# variables
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('machine_rating', val=0.0, units='kW', desc='machine rating of generator')
self.add_input('gear_ratio', val=0.0, desc='overall gearbox ratio')
self.add_input('hss_length', val=0.0, units='m', desc='length of high speed shaft and brake')
self.add_input('hss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='cm of high speed shaft and brake')
self.add_input('rotor_rpm', val=0.0, units='rpm', desc='Speed of rotor at rated power')
self.add_discrete_input('drivetrain_design', val='geared', desc='geared or single_stage or multi_drive or pm_direct_drive')
#returns
self.add_output('generator_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('generator_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('generator_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
mygen = dc.Generator(discrete_inputs['drivetrain_design'])
(outputs['generator_mass'], outputs['generator_cm'], outputs['generator_I']) \
= mygen.compute(inputs['rotor_diameter'], inputs['machine_rating'], inputs['gear_ratio'], inputs['hss_length'], inputs['hss_cm'], inputs['rotor_rpm'])
#-------------------------------------------------------------------------------
class Transformer_OM(ExplicitComponent):
''' Transformer class
The transformer class is used to represent the transformer of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component if it is in fact uptower'''
def setup(self):
# inputs
self.add_input('machine_rating', val=0.0, units='kW', desc='machine rating of the turbine')
self.add_input('tower_top_diameter', val=0.0, units='m', desc='tower top diameter for comparision of nacelle CM')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter of turbine')
self.add_input('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_input('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_input('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_input('lss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('mb1_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('mb2_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('gearbox_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('hss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('generator_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('overhang', val=0.0, units='m', desc='nacelle overhang')
self.add_discrete_input('uptower_transformer', val=True)
# outputs
self.add_output('transformer_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('transformer_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('transformer_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
rnaadder = dc.RNASystemAdder()
rna_mass, rna_cm = rnaadder.compute(inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'], inputs['gearbox_mass'],
inputs['hss_mass'], inputs['generator_mass'], inputs['lss_cm'], inputs['mb1_cm'],
inputs['mb2_cm'], inputs['gearbox_cm'], inputs['hss_cm'], inputs['generator_cm'],
inputs['overhang'], inputs['rotor_mass'], inputs['machine_rating'])
transformer = dc.Transformer(discrete_inputs['uptower_transformer'])
(outputs['transformer_mass'], outputs['transformer_cm'], outputs['transformer_I']) = transformer.compute(inputs['machine_rating'], inputs['tower_top_diameter'],
inputs['rotor_mass'], inputs['generator_cm'],
inputs['rotor_diameter'], rna_mass, rna_cm)
#-------------------------------------------------------------------------
class Bedplate_OM(ExplicitComponent):
''' Bedplate class
The Bedplate class is used to represent the bedplate of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('debug', default=False)
def setup(self):
# variables
self.add_input('gearbox_length', val=0.0, units='m', desc='gearbox length')
self.add_input('gearbox_location', val=0.0, units='m', desc='gearbox CM location')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='gearbox mass')
self.add_input('hss_location', val=0.0, units='m', desc='HSS CM location')
self.add_input('hss_mass', val=0.0, units='kg', desc='HSS mass')
self.add_input('generator_location', val=0.0, units='m', desc='generator CM location')
self.add_input('generator_mass', val=0.0, units='kg', desc='generator mass')
self.add_input('lss_location', val=0.0, units='m', desc='LSS CM location')
self.add_input('lss_mass', val=0.0, units='kg', desc='LSS mass')
self.add_input('lss_length', val=0.0, units='m', desc='LSS length')
self.add_input('lss_mb1_facewidth', val=0.0, units='m', desc='Upwind main bearing facewidth')
self.add_input('mb1_cm', val=np.zeros(3), units='m', desc='Upwind main bearing CM location')
self.add_input('mb1_mass', val=0.0, units='kg', desc='Upwind main bearing mass')
self.add_input('mb2_cm', val=np.zeros(3), units='m', desc='Downwind main bearing CM location')
self.add_input('mb2_mass', val=0.0, units='kg', desc='Downwind main bearing mass')
self.add_input('transformer_mass', val=0.0, units='kg', desc='Transformer mass')
self.add_input('transformer_cm', val=np.zeros(3), units='m', desc='transformer CM location')
self.add_input('tower_top_diameter', val=0.0, units='m', desc='diameter of the top tower section at the yaw gear')
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('machine_rating', val=0.0, units='kW', desc='machine_rating machine rating of the turbine')
self.add_input('rotor_mass', val=0.0, units='kg', desc='rotor mass')
self.add_input('rotor_bending_moment_y', val=0.0, units='N*m', desc='The bending moment about the y axis')
self.add_input('rotor_force_z', val=0.0, units='N', desc='The force along the z axis applied at hub center')
self.add_input('flange_length', val=0.0, units='m', desc='flange length')
self.add_input('distance_hub2mb', val=0.0, units='m', desc='length between rotor center and upwind main bearing')
self.add_discrete_input('uptower_transformer', val=True)
# outputs
self.add_output('bedplate_mass', val=0.0, units='kg', desc='overall component bedplate_mass')
self.add_output('bedplate_cm', val=np.zeros(3), units='m', desc='center of bedplate_mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('bedplate_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of bedplate_mass')
self.add_output('bedplate_length', val=0.0, units='m', desc='length of bedplate')
self.add_output('bedplate_height', val=0.0, units='m', desc='max height of bedplate')
self.add_output('bedplate_width', val=0.0, units='m', desc='width of bedplate')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
bpl = dc.Bedplate(discrete_inputs['uptower_transformer'], debug=self.options['debug'])
(outputs['bedplate_mass'], outputs['bedplate_cm'], outputs['bedplate_I'], outputs['bedplate_length'], outputs['bedplate_height'], outputs['bedplate_width']) \
= bpl.compute(inputs['gearbox_length'], inputs['gearbox_location'], inputs['gearbox_mass'], inputs['hss_location'], inputs['hss_mass'], inputs['generator_location'], inputs['generator_mass'], \
inputs['lss_location'], inputs['lss_mass'], inputs['lss_length'], inputs['mb1_cm'], inputs['lss_mb1_facewidth'], inputs['mb1_mass'], inputs['mb2_cm'], inputs['mb2_mass'], \
inputs['transformer_mass'], inputs['transformer_cm'], \
inputs['tower_top_diameter'], inputs['rotor_diameter'], inputs['machine_rating'], inputs['rotor_mass'], inputs['rotor_bending_moment_y'], inputs['rotor_force_z'], \
inputs['flange_length'], inputs['distance_hub2mb'])
#-------------------------------------------------------------------------------
class AboveYawMassAdder_OM(ExplicitComponent):
''' AboveYawMassAdder_OM class
The AboveYawMassAdder_OM class is used to represent the masses of all parts of a wind turbine drivetrain that
are above the yaw system.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def initialize(self):
self.options.declare('debug', default=False)
def setup(self):
# variables
self.add_input('machine_rating', val=0.0, units='kW', desc='machine rating')
self.add_input('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_input('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_input('bedplate_mass', val=0.0, units='kg', desc='component mass')
self.add_input('bedplate_length', val=0.0, units='m', desc='component length')
self.add_input('bedplate_width', val=0.0, units='m', desc='component width')
self.add_input('transformer_mass', val=0.0, units='kg', desc='component mass')
self.add_discrete_input('crane', val=True, desc='onboard crane present')
# returns
self.add_output('electrical_mass', val=0.0, units='kg', desc='component mass')
self.add_output('vs_electronics_mass', val=0.0, units='kg', desc='component mass')
self.add_output('hvac_mass', val=0.0, units='kg', desc='component mass')
self.add_output('controls_mass', val=0.0, units='kg', desc='component mass')
self.add_output('platforms_mass', val=0.0, units='kg', desc='component mass')
self.add_output('crane_mass', val=0.0, units='kg', desc='component mass')
self.add_output('mainframe_mass', val=0.0, units='kg', desc='component mass')
self.add_output('cover_mass', val=0.0, units='kg', desc='component mass')
self.add_output('above_yaw_mass', val=0.0, units='kg', desc='total mass above yaw system')
self.add_output('nacelle_length', val=0.0, units='m', desc='component length')
self.add_output('nacelle_width', val=0.0, units='m', desc='component width')
self.add_output('nacelle_height', val=0.0, units='m', desc='component height')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
aboveyawmass = dc.AboveYawMassAdder(discrete_inputs['crane'])
(outputs['electrical_mass'], outputs['vs_electronics_mass'], outputs['hvac_mass'], outputs['controls_mass'],
outputs['platforms_mass'], outputs['crane_mass'], outputs['mainframe_mass'], outputs['cover_mass'],
outputs['above_yaw_mass'], outputs['nacelle_length'], outputs['nacelle_width'], outputs['nacelle_height']) \
= aboveyawmass.compute(inputs['machine_rating'], inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'],
inputs['gearbox_mass'], inputs['hss_mass'], inputs['generator_mass'], inputs['bedplate_mass'],
inputs['bedplate_length'], inputs['bedplate_width'], inputs['transformer_mass'])
if self.options['debug']:
print('AYMA IN: {:.1f} kW BPl {:.1f} m BPw {:.1f} m'.format(
inputs['machine_rating'],inputs['bedplate_length'], inputs['bedplate_width']))
print('AYMA IN masses (kg): LSS {:.1f} MB1 {:.1f} MB2 {:.1f} GBOX {:.1f} HSS {:.1f} GEN {:.1f} BP {:.1f} TFRM {:.1f}'.format(
inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'], inputs['gearbox_mass'],
inputs['hss_mass'], inputs['generator_mass'], inputs['bedplate_mass'], inputs['transformer_mass']))
print('AYMA OUT masses (kg) : E {:.1f} VSE {:.1f} HVAC {:.1f} CNTL {:.1f} PTFM {:.1f} CRN {:.1f} MNFRM {:.1f} CVR {:.1f} AYM {:.1f}'.format(
outputs['electrical_mass'], outputs['vs_electronics_mass'], outputs['hvac_mass'], outputs['controls_mass'],
outputs['platforms_mass'], outputs['crane_mass'], outputs['mainframe_mass'], outputs['cover_mass'],
outputs['above_yaw_mass']))
print('AYMA OUT nacelle (m): L {:.2f} W {:.2f} H {:.2f}'.format(
outputs['nacelle_length'], outputs['nacelle_width'], outputs['nacelle_height']))
#---------------------------------------------------------------------------------------------------------------
class YawSystem_OM(ExplicitComponent):
''' YawSystem class
The YawSystem class is used to represent the yaw system of a wind turbine drivetrain.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def setup(self):
# variables
self.add_input('rotor_diameter', val=0.0, units='m', desc='rotor diameter')
self.add_input('rotor_thrust', val=0.0, units='N', desc='maximum rotor thrust')
self.add_input('tower_top_diameter', val=0.0, units='m', desc='tower top diameter')
self.add_input('above_yaw_mass', val=0.0, units='kg', desc='above yaw mass')
self.add_input('bedplate_height', val=0.0, units='m', desc='bedplate height')
self.add_discrete_input('yaw_motors_number', val=0, desc='default value - will be internally calculated')
# outputs
self.add_output('yaw_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('yaw_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('yaw_I', val=np.zeros(3), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
myyaw = dc.YawSystem(discrete_inputs['yaw_motors_number'])
(outputs['yaw_mass'], outputs['yaw_cm'], outputs['yaw_I']) \
= myyaw.compute(inputs['rotor_diameter'], inputs['rotor_thrust'], inputs['tower_top_diameter'], inputs['above_yaw_mass'], inputs['bedplate_height'])
#--------------------------------------------
class NacelleSystemAdder_OM(ExplicitComponent): #added to drive to include transformer
''' NacelleSystem class
The Nacelle class is used to represent the overall nacelle of a wind turbine.
It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.
It contains an update method to determine the mass, mass properties, and dimensions of the component.
'''
def setup(self):
# variables
self.add_input('above_yaw_mass', val=0.0, units='kg', desc='mass above yaw system')
self.add_input('yaw_mass', val=0.0, units='kg', desc='mass of yaw system')
self.add_input('lss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb1_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mb2_mass', val=0.0, units='kg', desc='component mass')
self.add_input('gearbox_mass', val=0.0, units='kg', desc='component mass')
self.add_input('hss_mass', val=0.0, units='kg', desc='component mass')
self.add_input('generator_mass', val=0.0, units='kg', desc='component mass')
self.add_input('bedplate_mass', val=0.0, units='kg', desc='component mass')
self.add_input('mainframe_mass', val=0.0, units='kg', desc='component mass')
self.add_input('lss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('mb1_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('mb2_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('gearbox_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('hss_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('generator_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('bedplate_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('lss_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('mb1_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('mb2_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('gearbox_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('hss_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('generator_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('bedplate_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
self.add_input('transformer_mass', val=0.0, units='kg', desc='component mass')
self.add_input('transformer_cm', val=np.array([0.0,0.0,0.0]), units='m', desc='component CM')
self.add_input('transformer_I', val=np.array([0.0,0.0,0.0]), units='kg*m**2', desc='component I')
# returns
self.add_output('nacelle_mass', val=0.0, units='kg', desc='overall component mass')
self.add_output('nacelle_cm', val=np.zeros(3), units='m', desc='center of mass of the component in [x,y,z] for an arbitrary coordinate system')
self.add_output('nacelle_I', val=np.zeros(6), units='kg*m**2', desc=' moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
def compute(self, inputs, outputs):
nacelleadder = dc.NacelleSystemAdder()
(outputs['nacelle_mass'], outputs['nacelle_cm'], outputs['nacelle_I']) \
= nacelleadder.compute(inputs['above_yaw_mass'], inputs['yaw_mass'], inputs['lss_mass'], inputs['mb1_mass'], inputs['mb2_mass'], inputs['gearbox_mass'], \
inputs['hss_mass'], inputs['generator_mass'], inputs['bedplate_mass'], inputs['mainframe_mass'], \
inputs['lss_cm'], inputs['mb1_cm'], inputs['mb2_cm'], inputs['gearbox_cm'], inputs['hss_cm'], inputs['generator_cm'], inputs['bedplate_cm'], \
inputs['lss_I'], inputs['mb1_I'], inputs['mb2_I'], inputs['gearbox_I'], inputs['hss_I'], inputs['generator_I'], inputs['bedplate_I'], \
inputs['transformer_mass'], inputs['transformer_cm'], inputs['transformer_I'])
#--------------------------------------------
class RNAMass(ExplicitComponent):
def setup(self):
# variables
self.add_input('rotor_mass', 0.0, units='kg', desc='mass of the rotor')
self.add_input('nacelle_mass', 0.0, units='kg', desc='mass of nacelle')
self.add_input('hub_system_cm', np.zeros(3), units='m', desc='location of hub center of mass relative to tower top in yaw-aligned c.s.')
self.add_input('nacelle_cm', np.zeros(3), units='m', desc='location of nacelle center of mass relative to tower top in yaw-aligned c.s.')
# order for all moments of inertia is (xx, yy, zz, xy, xz, yz) in the yaw-aligned coorinate system
self.add_input('blades_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of all blades about hub center')
self.add_input('hub_system_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of hub about its center of mass')
self.add_input('nacelle_I', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of nacelle about its center of mass')
# outputs
self.add_output('rna_mass', 0.0, units='kg', desc='total mass of RNA')
self.add_output('rna_cm', np.zeros(3), units='m', desc='location of RNA center of mass relative to tower top in yaw-aligned c.s.')
self.add_output('rna_I_TT', np.zeros(6), units='kg*m**2', desc='mass moments of inertia of RNA about tower top in yaw-aligned coordinate system')
self.declare_partials('*','*')
def _assembleI(self, I):
Ixx, Iyy, Izz, Ixy, Ixz, Iyz = I[0], I[1], I[2], I[3], I[4], I[5]
return
|
np.array([[Ixx, Ixy, Ixz], [Ixy, Iyy, Iyz], [Ixz, Iyz, Izz]])
|
numpy.array
|
from pytorch_pretrained_bert import BertModel
from pytorch_pretrained_bert.modeling import BertConfig
from .transformer import TransformerDecoder
import torch.nn as nn
import torch
import numpy as np
import onmt
import copy
MAX_SIZE = 512
def clone_or_share_layer(layer1, layer2, share=False):
if share:
layer1.weight, layer1.bias = layer2.weight, layer2.bias
else:
layer1.weight, layer1.bias = \
nn.Parameter(
layer2.weight.clone()), nn.Parameter(layer2.bias.clone())
class MyBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, bert_embeddings, token_type='A'):
super(MyBertEmbeddings, self).__init__()
self.word_lut = bert_embeddings.word_embeddings
self.position_embeddings = bert_embeddings.position_embeddings
self.token_type_embeddings = bert_embeddings.token_type_embeddings
self.LayerNorm = bert_embeddings.LayerNorm
self.dropout = bert_embeddings.dropout
self.token_type = token_type
def forward(self, input_ids, token_type_ids=None, step=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(
seq_length,
dtype=torch.long,
device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
if self.token_type is 'A':
token_type_ids = torch.zeros_like(input_ids)
else: # 'B'
token_type_ids = torch.ones_like(input_ids)
if step is not None:
position_ids.fill_(step)
words_embeddings = self.word_lut(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = \
words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BERTDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, bert_layer, init_context=False):
super(BERTDecoderLayer, self).__init__()
num_heads = \
bert_layer.attention.self.num_attention_heads
hidden_size = \
bert_layer.attention.self.query.weight.size(0)
self.init_context = init_context
self.dropout = bert_layer.attention.self.dropout.p
# Create self-attention layer
self.self_attn = onmt.modules.MultiHeadedAttention(
num_heads, hidden_size, dropout=self.dropout)
self.self_attn_drop = \
bert_layer.attention.output.dropout
self.self_attn_norm = \
copy.deepcopy(bert_layer.attention.output.LayerNorm)
# Initilaize self-attention layers with bert weights
self.self_attn.linear_keys = bert_layer.attention.self.key
self.self_attn.linear_values = bert_layer.attention.self.value
self.self_attn.linear_query = bert_layer.attention.self.query
self.self_attn.final_linear = bert_layer.attention.output.dense
# Create context-attention layer 1
self.context_attn = onmt.modules.MultiHeadedAttention(
num_heads, hidden_size, dropout=self.dropout)
self.context_attn_drop = \
bert_layer.attention.output.dropout
self.context_attn_norm = \
copy.deepcopy(bert_layer.attention.output.LayerNorm)
if init_context:
# Initilaize context-attention layers with bert weights
clone_or_share_layer(
self.context_attn.linear_keys,
bert_layer.attention.self.key,
share=False
)
clone_or_share_layer(
self.context_attn.linear_values,
bert_layer.attention.self.value,
share=False
)
clone_or_share_layer(
self.context_attn.linear_query,
bert_layer.attention.self.query,
share=False
)
clone_or_share_layer(
self.context_attn.final_linear,
bert_layer.attention.output.dense,
share=False
)
self.intermediate = bert_layer.intermediate
self.output = bert_layer.output
mask = self._get_attn_subsequent_mask(MAX_SIZE)
# Register self.mask as a buffer in BERTDecoderLayer, so
# it gets BERTDecoderLayer's cuda behavior automatically.
self.register_buffer('mask', mask)
def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
layer_cache=None, step=None):
"""
Args:
inputs (`FloatTensor`): `[batch_size x 1 x model_dim]`
memory_bank (`FloatTensor`): `[batch_size x src_len x model_dim]`
src_pad_mask (`LongTensor`): `[batch_size x 1 x src_len]`
tgt_pad_mask (`LongTensor`): `[batch_size x 1 x 1]`
Returns:
(`FloatTensor`, `FloatTensor`):
* output `[batch_size x 1 x model_dim]`
* attn `[batch_size x 1 x src_len]`
"""
dec_mask = None
if step is None:
dec_mask = torch.gt(tgt_pad_mask +
self.mask[:, :tgt_pad_mask.size(-1),
:tgt_pad_mask.size(-1)], 0)
query, attn = self.self_attn(inputs, inputs, inputs,
mask=dec_mask,
layer_cache=layer_cache,
type="self")
query_norm = self.self_attn_norm(self.self_attn_drop(query) + inputs)
mid, attn = self.context_attn(memory_bank, memory_bank,
query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
type="context")
mid_norm = self.context_attn_norm(
self.context_attn_drop(mid) + query_norm)
intermediate_output = self.intermediate(mid_norm)
output = self.output(intermediate_output, mid_norm)
return output, attn
def _get_attn_subsequent_mask(self, size):
"""
Get an attention mask to avoid using the subsequent info.
Args:
size: int
Returns:
(`LongTensor`):
* subsequent_mask `[1 x size x size]`
"""
attn_shape = (1, size, size)
subsequent_mask = np.triu(
|
np.ones(attn_shape)
|
numpy.ones
|
"""
retain 用于实现高阶导数,分开实现是为了性能,毕竟大多数时候用不到高阶导数
需要注意的是 高阶导数并非所有的都实现了,有一些算子并未实现
"""
from typing import *
import numpy as np
from numpy import ndarray
from . import _tensor
from .base import ComputeNode, ComputeNodeDetached
from . import functional
from .dtypes import float32
if TYPE_CHECKING:
from ._tensor import Tensor
def from_numpy(data: ndarray, requires_grad=False, is_leaf=False):
return _tensor.Tensor(data, requires_grad=requires_grad, is_leaf=is_leaf)
def full_like(a: "Tensor", full_val: Union[int, float], dtype=None, requires_grad=False) -> "Tensor":
data = np.full_like(a.data, full_val, dtype or a.dtype)
return _tensor.Tensor(data, requires_grad=requires_grad)
def full(shape, fill_val, dtype=float32, requires_grad=False) -> "Tensor":
data = np.full(shape, fill_val, dtype)
return _tensor.Tensor(data, requires_grad=requires_grad)
def zeros(shape, dtype=float32, requires_grad=False) -> "Tensor":
return _tensor.Tensor(np.zeros(shape, dtype), requires_grad=requires_grad)
def zeros_like(a: 'Tensor', dtype=None, requires_grad=False) -> "Tensor":
return _tensor.Tensor(np.zeros_like(a.data, dtype or a.dtype), requires_grad=requires_grad)
def ones(shape, dtype=float32, requires_grad=False):
return _tensor.Tensor(np.ones(shape, dtype), requires_grad=requires_grad)
def ones_like(a: "Tensor", dtype=None, requires_grad=False):
return _tensor.Tensor(np.ones_like(a.data, dtype or a.dtype), requires_grad=requires_grad)
def apply_broadcast_a(sa, sb, grad_a):
if sa == sb:
return grad_a
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
grad_a = np.expand_dims(grad_a.sum(axis=i), i)
return grad_a.reshape(sa)
def apply_broadcast_b(sa, sb, grad_b):
if sa == sb:
return grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if da != db == 1:
grad_b = np.expand_dims(grad_b.sum(axis=i), i)
return grad_b.reshape(sb)
def apply_broadcast(sa, sb, grad_a, grad_b):
if sa == sb:
return grad_a, grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
# 这里求和的原因是广播产生了n条路径,根据乘法结合律,
# 提取公共部分后就是当前节点的n条路径相加了
# 例如 1 x 2 x 3 x c1 + 1 x 2 x 3 x c2 + 1 x 2 x 3 x c3 ==> 1 x 2 x 3 x (c1 + c2 + c3 + c4)
grad_a = np.expand_dims(grad_a.sum(i), i)
if da != db == 1:
grad_b = np.expand_dims(grad_b.sum(i), i)
return grad_a.reshape(sa), grad_b.reshape(sb)
def apply_broadcast_a_tensor(sa, sb, grad_a: "Tensor"):
if sa == sb:
return grad_a
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if db != da == 1:
# grad_a = np.expand_dims(grad_a.sum(axis=i), i)
grad_a = UnSqueeze(grad_a.sum(dim=i), i)
return grad_a.reshape(sa)
def apply_broadcast_b_tensor(sa, sb, grad_b: "Tensor"):
if sa == sb:
return grad_b
max_dim = max(len(sa), len(sb))
new_sa = (1,) * (max_dim - len(sa)) + sa
new_sb = (1,) * (max_dim - len(sb)) + sb
for i, (da, db) in enumerate(zip(new_sa, new_sb)):
if da != db == 1:
# grad_b = np.expand_dims(grad_b.sum(axis=i), i)
grad_b = ExpandDims(grad_b.sum(dim=i), i)
return grad_b.reshape(sb)
class Add(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a + b
def backward_a(self, dy):
a, b = self.inputs_data
res = apply_broadcast_a(a.shape, b.shape, dy)
return res
def backward_b(self, dy):
a, b = self.inputs_data
res = apply_broadcast_b(a.shape, b.shape, dy)
return res
def retain_backward_a(self, dy):
a, b = self.inputs
res = apply_broadcast_a_tensor(a.shape, b.shape, dy)
return res
def retain_backward_b(self, dy):
a, b = self.inputs
res = apply_broadcast_b_tensor(a.shape, b.shape, dy)
return res
@staticmethod
def at(a: "Tensor", indices, b: "Tensor"):
if not isinstance(b, (int, float)):
b = (b,) * len(indices)
if not isinstance(b, _tensor.Tensor):
b = _tensor.Tensor(b)
if isinstance(indices, list):
for i, idx in enumerate(indices):
a = a * 1
a[idx] += b[i]
elif isinstance(indices, tuple):
a[indices] = b
return a
class Sub(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a - b
def backward_a(self, dy):
a, b = self.inputs_data
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = -dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = -dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Mul(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a * b
def backward_a(self, dy):
a, b = self.inputs_data
dy = b * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = a * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = b * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = a * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Div(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return a / b
def backward_a(self, dy):
a, b = self.inputs_data
dy = (1 / b) * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = -a * (b ** (-2)) * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = (1 / b) * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = -a * (b ** (-2)) * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Pow(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return np.power(a, b)
def backward_a(self, dy):
a, b = self.inputs_data
dy = b * (a ** (b - 1)) * dy
return apply_broadcast_a(a.shape, b.shape, dy)
def backward_b(self, dy):
a, b = self.inputs_data
dy = np.power(a, b) * np.log(a) * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = b * (a ** (b - 1)) * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = np.power(a, b) * np.log(a) * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Exp(ComputeNode):
def forward(self):
a, = self.inputs_data
res = np.exp(a)
self.kwargs["res"] = res
return res
def backward(self, dy):
res = self.kwargs['res']
return res * dy
def retain_backward(self, dy):
res = _tensor.Tensor(self.kwargs['res'])
return res * dy
class T(ComputeNode):
def forward(self) -> ndarray:
a, = self.inputs_data
return a.T
def backward(self, dy):
return dy.T
retain_backward = backward
class CopySlices(ComputeNode):
def __init__(self, a: "Tensor", idx, val):
super().__init__(a.copy())
self.out = a
self.idx = idx
self.val = val
def forward(self) -> ndarray:
self.out.data[self.idx] = self.val
self.out.grad_fn = self
self.out.grad = None
return self.out.data
def backward(self, dy: Union[int, "ndarray"] = None) -> Union[Tuple["ndarray", ...], "ndarray"]:
if self.out.is_leaf:
raise RuntimeError(
"变量已经移动到计算图内部了,移动到计算图内部的变量不能在之后进行修改,只能在移动之前修改.")
dy[self.idx] = 0 # ComputeGraph是一次性计算完成的,没有分离,所以可以直接修改dy
return dy
retain_backward = backward
class AsType(ComputeNode):
def __init__(self, a: "Tensor", dtype):
super().__init__(a)
self.new_dtype = dtype
self.old_dtype = a.dtype
def forward(self) -> np.ndarray:
a, = self.inputs_data
return a.astype(self.new_dtype)
def backward(self, dy: Union[int, "ndarray"] = None) -> Union[Tuple["ndarray", ...], "ndarray"]:
return dy.astype(self.old_dtype)
def retain_backward(self, dy: "Tensor") -> "Tensor":
return dy.astype(self.old_dtype)
class Swapaxes(ComputeNodeDetached):
def __init__(self, a: "Tensor", axis1, axis2):
super().__init__(a)
self.axis1 = axis1
self.axis2 = axis2
def forward(self) -> ndarray:
a, = self.inputs_data
return np.swapaxes(a, self.axis1, self.axis2)
def backward(self, dy: "ndarray") -> "ndarray":
return np.swapaxes(dy, self.axis2, self.axis1)
def retain_backward(self, dy: "Tensor") -> "Tensor":
da = Swapaxes(dy, self.axis2, self.axis1)
return da
class Dot(ComputeNodeDetached):
"""
矩阵乘法不存在广播机制,因此很统一所以方便运算
卷积可以转换为矩阵乘法im2col
"""
def forward(self) -> ndarray:
a, b = self.inputs_data
res = np.dot(a, b)
return res
def backward_a(self, dy):
_, b = self.inputs_data
if b.ndim >= 2:
b = np.swapaxes(b, -1, -2)
res = np.dot(dy, b)
for i in range(len(res.shape) - 2):
res = res.sum(1)
return res
def backward_b(self, dy):
a, _ = self.inputs_data
if a.ndim >= 2:
a = np.swapaxes(a, -1, -2)
res = np.dot(a, dy)
for i in range(len(res.shape) - 2):
res = res.sum(1)
return res
def retain_backward_a(self, dy):
_, b = self.inputs
if b.ndim >= 2:
b = Swapaxes(b, -1, -2)
res = Dot(dy, b)
for i in range(len(res.shape) - 2):
res = res.sum(1)
return res
def retain_backward_b(self, dy):
a, _ = self.inputs
if a.ndim >= 2:
a = Swapaxes(a, -1, -2)
res = Dot(a, dy)
for i in range(len(res.shape) - 2):
res = res.sum(1)
return res
class Abs(ComputeNode):
def forward(self) -> ndarray:
a, = self.inputs_data
return np.abs(a)
def backward(self, dy):
a, = self.inputs_data
mask = np.ones_like(dy)
mask[a < 0] = -1
return mask * dy
def retain_backward(self, dy: "Tensor") -> "Tensor":
a, = self.inputs
mask = ones_like(dy)
mask[a < 0] = -1
return mask * dy
class Log(ComputeNode):
def forward(self) -> ndarray:
x, = self.inputs_data
return np.log(x)
def backward(self, dy: ndarray):
x, = self.inputs_data
grad = (1 / x)
return grad * dy
def retain_backward(self, dy: ndarray):
x, = self.inputs
grad = (1 / x)
return grad * dy
class LogN(ComputeNodeDetached):
def forward(self) -> ndarray:
a, b = self.inputs_data
return np.power(a, b)
def backward_a(self, dy):
a, b = self.inputs_data
dy = (-1 / (((np.emath.logn(b, a)) ** 2) * a * np.log(b))) * dy
return apply_broadcast_a(a.shape, b.shape, dy.astype(float32))
def backward_b(self, dy):
a, b = self.inputs_data
dy = (1 / (b * np.log(a))) * dy
return apply_broadcast_b(a.shape, b.shape, dy)
def retain_backward_a(self, dy):
a, b = self.inputs
dy = (-1 / (((LogN(b, a)) ** 2) * a * Log(b))) * dy
return apply_broadcast_a_tensor(a.shape, b.shape, dy)
def retain_backward_b(self, dy):
a, b = self.inputs
dy = (1 / (b * Log(a))) * dy
return apply_broadcast_b_tensor(a.shape, b.shape, dy)
class Sin(ComputeNode):
def forward(self) -> ndarray:
a, = self.inputs_data
return np.sin(a)
def backward(self, dy: ndarray):
a, = self.inputs_data
grad_a = np.cos(a)
return grad_a * dy
def retain_backward(self, dy: ndarray):
a, = self.inputs
grad_a = Cos(a)
return grad_a * dy
class Cos(ComputeNode):
def forward(self) -> ndarray:
a, = self.inputs_data
return np.cos(a)
def backward(self, dy: ndarray):
a, = self.inputs_data
grad_a = -np.sin(a)
return grad_a * dy
def retain_backward(self, dy: ndarray):
a, = self.inputs
grad_a = -Sin(a)
return grad_a * dy
class Tan(ComputeNode):
def forward(self) -> ndarray:
a, = self.inputs_data
return np.tan(a)
def backward(self, dy: ndarray):
a, = self.inputs_data
grad_a = 1 / (np.cos(a) ** 2)
return grad_a * dy
def retain_backward(self, dy: ndarray):
a, = self.inputs
grad_a = 1 / (Cos(a) ** 2)
return grad_a * dy
class ArcSin(ComputeNodeDetached):
def __init__(self, a):
super().__init__(a)
def forward(self) -> ndarray:
a, = self.inputs_data
return np.arcsin(a)
def backward(self, dy: ndarray) -> ndarray:
a = self.inputs_data[0]
grad = 1 / np.sqrt(1 - a ** 2)
return grad * dy
def retain_backward(self, dy: ndarray) -> ndarray:
a = self.inputs[0]
grad = 1 / Sqrt(1 - a ** 2)
return grad * dy
class ArcCos(ComputeNodeDetached):
def __init__(self, a):
super().__init__(a)
def forward(self) -> ndarray:
a, = self.inputs_data
return np.arccos(a)
def backward(self, dy: ndarray) -> ndarray:
a, = self.inputs_data
grad = -1 / np.sqrt(1 - a ** 2)
return grad * dy
def retain_backward(self, dy: ndarray) -> ndarray:
a, = self.inputs
grad = -1 / Sqrt(1 - a ** 2)
return grad * dy
class ArcTan(ComputeNodeDetached):
def __init__(self, a):
super().__init__(a)
def forward(self) -> ndarray:
a, = self.inputs_data
return np.arctan(a)
def backward(self, dy: ndarray) -> ndarray:
a, = self.inputs_data
grad = 1 / (1 + a ** 2)
return grad * dy
def retain_backward(self, dy: ndarray) -> ndarray:
a, = self.inputs
grad = 1 / (1 + a ** 2)
return grad * dy
class Transpose(ComputeNode):
def __init__(self, a, axes):
super().__init__(a)
self.axes = axes
def forward(self) -> ndarray:
a = self.inputs_data[0]
return np.transpose(a, self.axes)
def backward(self, dy: Union[int, ndarray] = None) -> Union[Tuple[ndarray, ...], ndarray]:
axes = list(range(len(self.axes)))
for i, old_ax in enumerate(self.axes):
axes[old_ax] = i # 当前位置i对应的旧位置是old_i, 比如当前位置1,对应的旧位置3,所以应该把1放到3位置
return np.transpose(dy, axes)
def retain_backward(self, dy: Union[int, "Tensor"] = None) -> Union[Tuple["Tensor", ...], "Tensor"]:
axes = list(range(len(self.axes)))
for i, old_ax in enumerate(self.axes):
axes[old_ax] = i # 当前位置i对应的旧位置是old_i, 比如当前位置1,对应的旧位置3,所以应该把1放到3位置
return Transpose(dy, axes)
class Index(ComputeNode):
def __init__(self, a: "Tensor", idx: Union[ndarray, "Tensor"]):
super().__init__(a)
if isinstance(idx, _tensor.Tensor):
idx = idx.data
self.idx = idx
def forward(self) -> ndarray:
return self.inputs_data[0][self.idx]
def backward(self, dy: Union[int, ndarray] = None) -> Union[Tuple[ndarray, ...], ndarray]:
a, = self.inputs_data
grad = np.zeros_like(a)
grad[self.idx] = dy
return grad
def retain_backward(self, dy: Union[int, "Tensor"] = None) -> Union[Tuple["Tensor", ...], "Tensor"]:
a, = self.inputs
grad = zeros_like(a)
grad[self.idx] = dy
return grad
class Mean(ComputeNode):
def __init__(self, a, dim=None, keepdim=False):
super().__init__(a)
self.dim = dim
self.keepdim = keepdim
def forward(self) -> ndarray:
a, = self.inputs_data
return np.mean(a, axis=self.dim, keepdims=self.keepdim)
def backward(self, dy: Union[int, ndarray] = None) -> Union[Tuple[ndarray, ...], ndarray]:
a, = self.inputs_data
if self.dim is None:
val = 1 / int(np.prod(a.shape))
else:
if isinstance(self.dim, int):
val = 1 / a.shape[self.dim]
dims = (self.dim,)
else:
dims = self.dim
val = 1
for dim in dims:
val *= 1 / a.shape[dim]
if not self.keepdim:
for dim in sorted(dims):
dy = np.expand_dims(dy, dim)
return np.full(a.shape, val, float32) * dy
def retain_backward(self, dy: Union[int, "Tensor"] = None) -> Union[Tuple["Tensor", ...], "Tensor"]:
a, = self.inputs
if self.dim is None:
val = 1 / int(
|
np.prod(a.shape)
|
numpy.prod
|
"""
@author: Timo
Beschreibung:
Simulation eines Netzwerkes bestehend aus 3 Regionen.
Grafik folgt.
Pythonversion:
3.5.1
"""
import numpy as np
import matplotlib.pyplot as plt
from programs import RK4 as RK4
#from programs import Euler as RK1
from programs import hemodynamicModel as HM
#from programs import bilinearModel as BM
#-----------------------------------------------------------------------------------------------------------------
# Parameter Beispiel 1
T = 100. # Endzeit
t0 = 0. # Anfangszeit
dt = 0.1 # Zeitschrittlaenge
t = np.arange(t0,T+dt,dt) # Zeitarray
A = np.array([[-1.,0.,0. ],
[0.3,-1,0.2],
[0.6,0.,-1.]]) # Kopplung
B1 = np.zeros((3,3)) # Induzierte Kopplungänderung durch Stimuli
B2 = np.array([[0 , 0, 0 ],
[0 , 0, 0.8],
[0.1, 0, 0 ]])
B = np.array([B1, B2]) # Zusammenfassen der ind. Kopplung in ein Array
C = np.array([[1, 0],
[0, 0],
[0, 0]]) # äußerer Einfluss auf Hirnaktivität
D1 = np.zeros((3,3)) # Neuronal induzierte Kopplungsänderung
D2 = np.array([[0 , 0, 0 ],
[0 , 0, 0.8],
[0.1, 0, 0 ]])
D3 = np.zeros((3,3))
D = np.array([D1, D2]) # Zusammenfassen der neuronalen Kopplungsänderung in ein Array
# äußerer Stimulus
u = np.zeros((len(B), len(t)))
u[0,101:-99:200] = 10. # Stimulus u1
u[1,451:550] = 2. # Stimulus u2
u[1,251:350] = 5. # Stimulus u2
u[1, 691:910] = 2. # Stimulus u2
# Anfangsbedingunden
x_0 = np.ones(15)
x_0[0:6] = 0.
# Zusammenfassen der Parameter für das "hemodynamicModel"
theta = list([A,B,C,D])
#-----------------------------------------------------------------------------------------------------------------
# Simulation
#z_0 = np.array([0,0,0])
#z = RK4.RK4(BM.bilinearModel,theta,u,z_0,t0,T,dt)
x = RK4.RK4(HM.stateEquations,theta,u,x_0,t0,T,dt) # Lösung mithilfe des RK4-Verfahrens
#x = RK1.Euler(HM.stateEquations,theta,u,x_0,t0,T,dt) # Lösung mithilfe des expl. Euler-Verfahrens
y = HM.BOLDsignal(x) # Berechnung des BOLD-Signals
plt.rcParams['figure.figsize'] = (15.0, 10.0) # Fenstergröße anpassen
#-----------------------------------------------------------------------------------------------------------------
# Plotten Bilineares Modell
#-------------------------- BOLD ------------------------------------
f1 = plt.figure(1)
f1.suptitle('Bilineares Modell', fontsize = 20)
# Stimulus
ax1 = plt.subplot(311)
ax1.tick_params(width = 1)
plt.plot(t,u[0,:])
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel('$u_1(t)$', fontsize = 16.)
plt.title('Stimuli')
ax2 = plt.subplot(312,sharex = ax1, sharey =ax1)
ax2.tick_params(width = 1)
plt.plot(t,u[1,:])
ax2.set_ylim([0,np.max(u)+1])
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel('$u_2(t)$', fontsize = 16.)
# Signal Plotten
ax3 = plt.subplot(313,sharex = ax1)
plt.setp(ax3.get_xticklabels(), fontsize = 14.)
plt.xticks(np.arange(10,110,10))
ax3.tick_params(width = 1)
# Region 1:
plt.plot(t,y[0,:],'r',label='Region 1') #BOLD-Signal
# Region 2:
plt.plot(t,y[1,:],'g',label='Region 2') #BOLD-Signal
#Region 3:
plt.plot(t,y[2,:],'b',label='Region 3') #BOLD-Signal
ax3.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),
fancybox=True, shadow=True, ncol=5)
plt.xlabel('Zeit t', fontsize = 14.)
plt.ylabel('$y(t)$', fontsize = 16.)
plt.title('BOLD-Signal nach Region')
f1.savefig('hemodynamicExample-1_bilinear_BOLD.eps')
#-------------------------- Gehirnaktivität ------------------------------------
f2 = plt.figure(2)
f2.suptitle('Bilineares Modell', fontsize = 20)
# Stimulus plotten
ax4 = plt.subplot(311)
ax4.tick_params(width = 1)
plt.plot(t,u[0,:])
plt.setp(ax4.get_xticklabels(), visible=False)
plt.ylabel('$u_1(t)$', fontsize = 16.)
plt.title('Stimuli')
ax5 = plt.subplot(312,sharex = ax4, sharey =ax4)
ax5.tick_params(width = 1)
plt.plot(t,u[1,:])
ax5.set_ylim([0,np.max(u)+1])
plt.setp(ax5.get_xticklabels(), visible=False)
plt.ylabel('$u_2(t)$', fontsize = 16.)
# Gehirnaktivität plotten
ax6 = plt.subplot(313,sharex = ax4)
plt.setp(ax6.get_xticklabels(), fontsize = 14.)
plt.xticks(np.arange(10,110,10))
ax6.tick_params(width = 1)
# Region 1:
plt.plot(t,x[0,:],'r',label='Region 1') #Gehirnaktivität
#plt.plot(t,x[3,:],'r',label='Region 1') #Vasodilatorisches Signal
#plt.plot(t,x[6,:],'r',label='Region 1') #Blutfluss
#plt.plot(t,x[9,:],'r',label='Region 1') #Blutvolumen
#plt.plot(t,x[12,:],'r',label='Region 1') #Deoxyhemoglobingehalt
# Region 2:
plt.plot(t,x[1,:],'g',label='Region 2') #Gehirnaktivität
#plt.plot(t,x[4,:],'g',label='Region 2') #Vasodilatorisches Signal
#plt.plot(t,x[7,:],'g',label='Region 2') #Blutfluss
#plt.plot(t,x[10,:],'g',label='Region 2') #Blutvolumen
#plt.plot(t,x[13,:],'g',label='Region 2') #Deoxyhemoglobingehalt
# Region 3:
plt.plot(t,x[2,:],'b',label='Region 3') #Gehirnaktivität
#plt.plot(t,x[5,:],'b',label='Region 3') #Vasodilatorisches Signal
#plt.plot(t,x[8,:],'b',label='Region 3') #Blutfluss
#plt.plot(t,x[11,:],'b',label='Region 3') #Blutvolumen
#plt.plot(t,x[14,:],'b',label='Region 3') #Deoxyhemoglobingehalt
ax6.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),
fancybox=True, shadow=True, ncol=5)
plt.xlabel('Zeit t', fontsize = 14.)
plt.ylabel('$z(t)$', fontsize = 16.)
plt.title('Gehirnaktivitaet nach Region')
#plt.show()
f2.savefig('hemodynamicExample-1_bilinear_Aktivitaet.eps')
#--------------------------------------------------------------------- Lineares Modell zum Vergleich -----------------------------------------------
# Änderung der Anfagsparameter B=0 und D=0
Blin = np.array([
|
np.zeros((3,3))
|
numpy.zeros
|
from os import read
import cv2
import os.path as osp
from abc import ABC, abstractmethod
import numpy as np
import os
import pdb
class Dataset(ABC):
def __init__(self) -> None:
pass
@abstractmethod
def get_next_image(self) -> np.ndarray:
pass
class Parking_dataset(Dataset):
def __init__(self, img_ptr=0) -> None:
super(Parking_dataset, self).__init__()
self.__dataset_root_path = "../data/parking/"
self.__img_dir_path = osp.join(self.__dataset_root_path, "images")
# load K matrix
K_path = osp.join(self.__dataset_root_path, 'K.txt')
with open(K_path) as K_file:
K_str = K_file.read()
self.__K =
|
np.fromstring(K_str, sep=',')
|
numpy.fromstring
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 14:45:32 2017
subscript 1 = R-band (has more entries)
subscript 2 = g-band
@author: stephaniekwan
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from matplotlib import gridspec
from matplotlib.ticker import AutoMinorLocator
from astropy.table import Table
# Use LaTeX font
plt.rc({'weight' : 'normal',
'size' : 15})
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
#plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
plt.rc('text', usetex = True)
table1 = np.genfromtxt('../optical/PTF_fulltable_phot1.txt', comments = '|',
skip_header = 43, skip_footer = 2)
table2 = np.genfromtxt('../optical/PTF_fulltable_phot2.txt', comments = '|',
skip_header = 43, skip_footer = 2)
flux1, flux2 = table1[:, 7], table2[:, 7]
sigflux1, sigflux2 = table1[:, 8], table2[:, 8]
mjd1, mjd2 = table1[:, 15], table2[:, 15]
snr1, snr2 = table1[:, 9], table2[:, 9]
zp1, zp2 = table1[:, 5], table2[:, 5]
snt = 3
snu = 5
flux_ref1, sigflux_ref1 = 2771.08, 205.304 #from bottom of table
flux_ref2, sigflux_ref2 = 587.622, 46.0016
mag1, mag1sig, mag1date = np.array([]), np.array([]), np.array([])
upperlim1, upperlim1date = np.array([]), np.array([])
for i in range(len(flux1)):
# Section 9: define new sigflux DC
if (sigflux1[i] > sigflux_ref1):
sigflux_DC = np.sqrt(sigflux1[i] ** 2 - sigflux_ref1 ** 2)
else:
sigflux_DC = np.sqrt(sigflux1[i] ** 2 + sigflux_ref1 ** 2)
# Section 9: redefine SNR
newSnr = (flux1[i] + flux_ref1) / sigflux_DC
if (newSnr > snt): # we have a confident detection
mag1 = np.append(mag1,
zp1[i] - 2.5 * np.log10(flux1[i] + flux_ref1))
mag1sig = np.append(mag1sig,
1.0875 * sigflux1[i] / (flux1[i] + flux_ref1))
mag1date = np.append(mag1date, mjd1[i])
else:
# compute upper flux limit and plot as arrow or triangle
upperlim1 = np.append(upperlim1,
zp1[i] - 2.5 * np.log10(snu * sigflux1[i]))
upperlim1date = np.append(upperlim1date, mjd1[i])
toosmall = []
for i in range(0, len(mag1)):
if mag1[i] < 10.0:
toosmall.append(i)
for i in toosmall[::-1]:
mag1 = np.delete(mag1, i)
mag1date = np.delete(mag1date, i)
mag1sig = np.delete(mag1sig, i)
gs = gridspec.GridSpec(2, 2, width_ratios = [5, 1])
ax1 = plt.subplot(gs[0])
ax1 = plt.gca()
ax1.text(-0.07, 1.1, '\\textbf{(a)}', transform=ax1.transAxes,
fontsize = 15, fontweight = 'bold', va = 'top', ha = 'right')
plt.scatter(mag1date, mag1, marker = 'o', s = 2, color = 'black', zorder = 3)
#plt.scatter(upperlim1date, upperlim1, color = 'grey', marker = 'v',
# facecolors = 'grey', s = 15, zorder = 4)
for i in range(0, len(upperlim1)):
ax1.arrow(upperlim1date[i], upperlim1[i],
0.0, 0.3, head_width = 20, head_length = 0.15,
fc = 'grey', ec = 'grey', linestyle = '-')
plt.errorbar(mag1date, mag1, yerr = mag1sig, linestyle = 'None',
color = 'grey', linewidth = 1, zorder = 2)
plt.axhline(y = np.median(mag1), color = 'k', ls = ':')
xlowerlim1, xupperlim1, ylowerlim1, yupperlim1 = 56400, 57800, 17.0, 20.0
ax1.set_xlim([xlowerlim1, xupperlim1])
ax1.set_ylim([ylowerlim1, yupperlim1])
ax1.invert_yaxis()
plt.xlabel('Date (MJD)', fontsize = 14)
plt.ylabel('R Magnitude', fontsize = 14)
minorLocator = AutoMinorLocator()
minorLocator2= AutoMinorLocator()
ax1.xaxis.set_minor_locator(minorLocator)
ax1.yaxis.set_minor_locator(minorLocator2)
# Shaded area to denote uncertainty of median (average of mag1sig)
ax1.add_patch(
patches.Rectangle(
(xlowerlim1, np.median(mag1) - 5 * np.average(mag1sig)), # (x, y)
xupperlim1 - xlowerlim1, # width
10 * np.average(mag1sig), # height
0.0, # angle
facecolor = 'lightgrey',
edgecolor = 'none',
zorder = 1
))
# Add inset
i1, i2 = 4, 12
x1, x2 = mag1date[i1], mag1date[i2]
axins = plt.subplot(gs[1])
axins.set_xlim(x1 + 7, x2 + 10)
axins.set_xticks(np.arange(56480, 56514, 10))
axins.set_ylim(18.35, 18.85)
plt.scatter(mag1date[i1:i2], mag1[i1:i2], color = 'black', marker = 'o',
s = 4, zorder = 3)
plt.errorbar(mag1date[i1:i2], mag1[i1:i2], yerr = mag1sig[i1:i2],
linestyle = 'None', color = 'black', zorder = 2)
plt.axhline(y = np.median(mag1), color = 'k', ls = ':')
axins.invert_yaxis()
minorLocator3 = AutoMinorLocator()
minorLocator4 = AutoMinorLocator()
axins.xaxis.set_minor_locator(minorLocator3)
axins.yaxis.set_minor_locator(minorLocator4)
# Inset: Shaded area to denote uncertainty of median (average of mag1sig)
axins.add_patch(
patches.Rectangle(
(x1 - 10, np.median(mag1) - 5 * np.average(mag1sig)), # (x, y)
xupperlim1 - xlowerlim1, # width
10 * np.average(mag1sig), # height
0.0, # angle
facecolor = 'lightgrey',
edgecolor = 'none',
zorder = 1
))
###########################
## g-band data
###########################
mag2, mag2sig, mag2date = np.array([]), np.array([]), np.array([])
upperlim2, upperlim2date = np.array([]), np.array([])
for i in range(len(flux2)):
# Section 9: define new sigflux DC
if (sigflux2[i] > sigflux_ref2):
sigflux_DC = np.sqrt(sigflux2[i] ** 2 - sigflux_ref2 ** 2)
else:
sigflux_DC = np.sqrt(sigflux2[i] ** 2 + sigflux_ref2 ** 2)
# Section 9: redefine SNR
newSnr = (flux2[i] + flux_ref2) / sigflux_DC
if (newSnr > snt): # we have a confident detection
mag2 = np.append(mag2,
zp2[i] - 2.5 * np.log10(flux2[i] + flux_ref2))
mag2sig = np.append(mag2sig,
1.0875 * sigflux2[i] / (flux2[i] + flux_ref2))
mag2date = np.append(mag2date, mjd2[i])
else:
# compute upper flux limit and plot as arrow or triangle
upperlim2 = np.append(upperlim2,
zp2[i] - 2.5 * np.log10(snu * sigflux2[i]))
upperlim2date = np.append(upperlim2date, mjd1[i])
toosmall2 = []
for i in range(0, len(mag2)):
if mag2[i] < 10.0:
toosmall2.append(i)
for i in toosmall2[::-1]:
mag2 = np.delete(mag2, i)
mag2date = np.delete(mag2date, i)
mag2sig = np.delete(mag2sig, i)
ax2 = plt.subplot(gs[2])
ax2.text(-0.07, 1.15, '\\textbf{(b)}', transform = ax2.transAxes,
fontsize = 15, fontweight = 'bold', va = 'top', ha = 'right')
xlowerlim2, xupperlim2, ylowerlim2, yupperlim2 = 55000, 58000, 18.5, 21.0
ax2.set_xlim([xlowerlim2, xupperlim2])
ax2.set_ylim([ylowerlim2, yupperlim2])
plt.axhline(y = np.median(mag2), color = 'k', ls = ':')
plt.scatter(mag2date, mag2, marker = 'o', s = 5, color = 'black', zorder = 3)
plt.errorbar(mag2date, mag2, yerr = mag2sig, linestyle = 'None',
color = 'grey', zorder = 2)
ax2.invert_yaxis()
# g-band: Shaded area to denote uncertainty of median
ax2.add_patch(
patches.Rectangle(
(xlowerlim2, np.median(mag2) - 5 * np.average(mag2sig)), # (x, y)
xupperlim2 - xlowerlim2, # width
10 *
|
np.average(mag2sig)
|
numpy.average
|
import math
import hydrostats as hs
import hydrostats.data as hd
import numpy as np
import pandas as pd
from scipy import interpolate
import warnings
__all__ = ['correct_historical', 'correct_forecast', 'statistics_tables']
def correct_historical(simulated_data: pd.DataFrame, observed_data: pd.DataFrame) -> pd.DataFrame:
"""
Accepts a historically simulated flow timeseries and observed flow timeseries and attempts to correct biases in the
simulation on a monthly basis.
Args:
simulated_data: A dataframe with a datetime index and a single column of streamflow values
observed_data: A dataframe with a datetime index and a single column of streamflow values
Returns:
pandas DataFrame with a datetime index and a single column of streamflow values
"""
# list of the unique months in the historical simulation. should always be 1->12 but just in case...
unique_simulation_months = sorted(set(simulated_data.index.strftime('%m')))
dates = []
values = []
for month in unique_simulation_months:
# filter historic data to only be current month
monthly_simulated = simulated_data[simulated_data.index.month == int(month)].dropna()
to_prob = _flow_and_probability_mapper(monthly_simulated, to_probability=True)
# filter the observations to current month
monthly_observed = observed_data[observed_data.index.month == int(month)].dropna()
to_flow = _flow_and_probability_mapper(monthly_observed, to_flow=True)
dates += monthly_simulated.index.to_list()
value = to_flow(to_prob(monthly_simulated.values))
values += value.tolist()
corrected = pd.DataFrame(data=values, index=dates, columns=['Corrected Simulated Streamflow'])
corrected.sort_index(inplace=True)
return corrected
def correct_forecast(forecast_data: pd.DataFrame, simulated_data: pd.DataFrame,
observed_data: pd.DataFrame, use_month: int = 0) -> pd.DataFrame:
"""
Accepts a short term forecast of streamflow, simulated historical flow, and observed flow timeseries and attempts
to correct biases in the forecasted data
Args:
forecast_data: A dataframe with a datetime index and any number of columns of forecasted flow. Compatible with
forecast_stats, forecast_ensembles, forecast_records
simulated_data: A dataframe with a datetime index and a single column of streamflow values
observed_data: A dataframe with a datetime index and a single column of streamflow values
use_month: Optional: either 0 for correct the forecast based on the first month of the forecast data or -1 if
you want to correct based on the ending month of the forecast data
Returns:
pandas DataFrame with a copy of forecasted data with values updated in each column
"""
# make a copy of the forecasts which we update and return so the original data is not changed
forecast_copy = forecast_data.copy()
# make the flow and probability interpolation functions
monthly_simulated = simulated_data[simulated_data.index.month == forecast_copy.index[use_month].month].dropna()
monthly_observed = observed_data[observed_data.index.month == forecast_copy.index[use_month].month].dropna()
to_prob = _flow_and_probability_mapper(monthly_simulated, to_probability=True, extrapolate=True)
to_flow = _flow_and_probability_mapper(monthly_observed, to_flow=True, extrapolate=True)
# for each column of forecast data, make the interpolation function and update the dataframe
for column in forecast_copy.columns:
tmp = forecast_copy[column].dropna()
forecast_copy.update(pd.DataFrame(to_flow(to_prob(tmp.values)), index=tmp.index, columns=[column]))
return forecast_copy
def statistics_tables(corrected: pd.DataFrame, simulated: pd.DataFrame, observed: pd.DataFrame,
merged_sim_obs: pd.DataFrame = False, merged_cor_obs: pd.DataFrame = False,
metrics: list = None) -> str:
"""
Makes an html table of various statistical metrics for corrected vs observed data alongside the same metrics for
the simulated vs observed data as a way to see the improvement made by the bias correction. This function uses
hydrostats.data.merge_data on the 3 inputs. If you have already computed these because you are doing a full
comparison of bias correction, you can provide them to save time
Args:
corrected: A dataframe with a datetime index and a single column of streamflow values
simulated: A dataframe with a datetime index and a single column of streamflow values
observed: A dataframe with a datetime index and a single column of streamflow values
merged_sim_obs: (optional) if you have already computed it, hydrostats.data.merge_data(simulated, observed)
merged_cor_obs: (optional) if you have already computed it, hydrostats.data.merge_data(corrected, observed)
metrics: A list of abbreviated statistic names. See the documentation for HydroErr
"""
if corrected is False and simulated is False and observed is False:
if merged_sim_obs is not False and merged_cor_obs is not False:
pass # if you provided the merged dataframes already, we use those
else:
# merge the datasets together
merged_sim_obs = hd.merge_data(sim_df=simulated, obs_df=observed)
merged_cor_obs = hd.merge_data(sim_df=corrected, obs_df=observed)
if metrics is None:
metrics = ['ME', 'RMSE', 'NRMSE (Mean)', 'MAPE', 'NSE', 'KGE (2009)', 'KGE (2012)']
# Merge Data
table1 = hs.make_table(merged_dataframe=merged_sim_obs, metrics=metrics)
table2 = hs.make_table(merged_dataframe=merged_cor_obs, metrics=metrics)
table2 = table2.rename(index={'Full Time Series': 'Corrected Full Time Series'})
table1 = table1.rename(index={'Full Time Series': 'Original Full Time Series'})
table1 = table1.transpose()
table2 = table2.transpose()
table_final = pd.merge(table1, table2, right_index=True, left_index=True)
return table_final.to_html()
def _flow_and_probability_mapper(monthly_data: pd.DataFrame, to_probability: bool = False,
to_flow: bool = False, extrapolate: bool = False) -> interpolate.interp1d:
if not to_flow and not to_probability:
raise ValueError('You need to specify either to_probability or to_flow as True')
# get maximum value to bound histogram
max_val = math.ceil(np.max(monthly_data.max()))
min_val = math.floor(np.min(monthly_data.min()))
if max_val == min_val:
warnings.warn('The observational data has the same max and min value. You may get unanticipated results.')
max_val += .1
# determine number of histograms bins needed
number_of_points = len(monthly_data.values)
number_of_classes = math.ceil(1 + (3.322 * math.log10(number_of_points)))
# specify the bin width for histogram (in m3/s)
step_width = (max_val - min_val) / number_of_classes
# specify histogram bins
bins = np.arange(-np.min(step_width), max_val + 2 * np.min(step_width), np.min(step_width))
if bins[0] == 0:
bins = np.concatenate((-bins[1], bins))
elif bins[0] > 0:
bins = np.concatenate((-bins[0], bins))
# make the histogram
counts, bin_edges =
|
np.histogram(monthly_data, bins=bins)
|
numpy.histogram
|
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
# Import matplotlib stuff for plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Logo-generating module and utils
import anylogo
import NB_sortseq_utils as utils
# set plotting format options
utils.set_plotting_style_emat()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#===============================================================================
# directory where emat csv files are contained
#===============================================================================
# lac
datadir_lac = '../sortseq/2011_lacZ/'
# mar
datadir_marA = '../sortseq/20150820_marRmut2only/'
datadir_mar_RNAP = '../sortseq/20150513_marRmut1only_marRdeltaRAB_marRdeltaR/'
# rel
datadir_rel = '../sortseq/20150312_relB/'
#===============================================================================
# plot energy matrices with logos on top.
#===============================================================================
# Set color scale - I want the colorbar to be symmetric and will pick values#
# that seem appropriate for all matrices.
emat_min=-0.4
emat_max=0.4
mid_val=0.0
# Create background dict
gc = .508
background_array =pd.DataFrame( [[(1-gc)/2,gc/2,gc/2,(1-gc)/2]])
#------------------------------------------------------------------------------#
# lacZ: LacI
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_LacI_O1_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'AATTGTGAGCGGATAACAATT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_lacI.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_lacI_ematonly.pdf')
#------------------------------------------------------------------------------#
# logos for CRP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_CRP_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'ATTAATGTGAGTTAGCTCACTCATTA'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_CRP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_CRP_ematonly.pdf')
#------------------------------------------------------------------------------#
# lacZ: RNAP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_RNAP_emat_mean.csv')
# energy_df['position'] = energy_df['position'] - 63
energy_df = energy_df[energy_df.position != energy_df.position.min()]
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df.reset_index(inplace=True)
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled.reset_index(inplace=True)
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TTTACACTTTATGCTTCCGGCTCGTATGTT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_RNAP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_RNAP_ematonly.pdf')
#------------------------------------------------------------------------------#
# marRAB: MarA
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_marA + '20150820_marR_MG1655_LB_na_mut2_4bins_MarA_emat_mean.csv')
# energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'ATTTAGCAAAACGTGGCATC'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_marRAB_emat_logo_marA.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_marRAB_emat_logo_marA_ematonly.pdf')
#------------------------------------------------------------------------------#
# marRAB: RNAP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_mar_RNAP + '20150513_marR_MG1655_LB_na_mut1_4bins_RNAP_emat_mean.csv')
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df.reset_index(inplace=True)
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled.reset_index(inplace=True)
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TTGACTTATACTTGCCTGGGCAATATTAT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_marRAB_emat_logo_RNAP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_marRAB_emat_logo_RNAP_ematonly.pdf')
#------------------------------------------------------------------------------#
# relB promoter: RelBE
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_rel + '20150513_relB_MG1655_M9glucose_na_mut1_4bins_RelBE_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(
|
np.array(energy_df)
|
numpy.array
|
import sys
import os
import copy
import threading
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.stats
import json
# NOTE:
# Assumes:
# * No nugget,
# * Universal kriging
# * only pre-programmed formulas
# * only Matern5_2 covariance
# * no bias correction
accel_trisolve_opencl_code=r"""
__kernel void perform_multiply(__global float *Ttranspose_inverse,__global float *new_positions_covariance,__global float *product,unsigned ncols)
{
unsigned rownum = get_global_id(0);
unsigned colnum = get_global_id(1);
product[rownum*ncols + colnum] = Ttranspose_inverse[rownum*ncols + colnum]*new_positions_covariance[colnum];
}
__kernel void perform_add(__global float *product,__global float *solution,unsigned ncols)
{
unsigned rownum = get_global_id(0);
unsigned rowoffs;
unsigned colnum;
float accum=0.0;
rowoffs = rownum*ncols;
for (colnum=0;colnum < ncols;colnum++) {
accum += product[rowoffs + colnum];
}
solution[rownum]=accum;
}
"""
def covMatern5_2(x1,x2,param,scaling_factor,var):
ecart = np.abs(x1[:,np.newaxis,:] - x2[np.newaxis,:,:]) / (param[np.newaxis,np.newaxis,:] / scaling_factor)
toadd = ecart - np.log(1.0+ecart+ecart**2.0/3.0)
s=np.sum(toadd,axis=2)
return(np.exp(-s) * var);
def covMatern5_2_deriv_x2_k(x1,x2,params,scaling_factor,var,k):
# x1 and x2 are arrays for which the second (index 1) axis
# lengths match. k is an index into that axis
k_index = {"mu_norm": 0, "log_msqrtR_norm": 1}[k]
# matern 5/2 covariance is a product
# of factors corresponding to the different indices into the
# second axis of x1 and x2
# The covariance is expressed as:
# cov = exp(-ecart[k])*exp(log(1+ecart[k]+(ecart[k]**2)/3)) * ...
# or equivalently
# cov = exp(-ecart[k]) * (1 + ecart[k] + (ecart[k]**2)/3) * ...
# where
# ecart[k] = |x1[k]-x2[k]|/(param[k]/sqrt(5))
# where sqrt(5) is the scaling_factor.
#
# There is also a pre-multiplier "var" representing the variance
#
# Evaluation of the derivative :
# dcov/decart[k] = -exp(-ecart[k])* (1 + ecart[k] + (ecart[k]**2)/3) * ...
# + exp(-ecart[k]) * (1 + 2*ecart[k]/3) * ... (... represents factors for indices other than k, and also the var factor)
#
# In each of the terms we can substitute cov back in:
# dcov/decart[k] = -cov + cov*(1 + 2*ecart[k]/3)/(1 + ecart[k] + (ecart[k]**2)/3)
# Find a common denominator for the two terms:
# dcov/decart[k] = -cov*(1 + ecart[k] + (ecart[k]**2)/3)/(1 + ecart[k] + (ecart[k]**2)/3) + cov*(1 + 2*ecart[k]/3)/(1 + ecart[k] + (ecart[k]**2)/3)
# Now add the terms
# dcov/decart[k] = - cov*(1*ecart[k]/3 + (ecart[k]**2)/3)/(1 + ecart[k] + (ecart[k]**2)/3)
# To check, the derivative should be zero at ecart[k] = 0
# ... which it is!
# Also need
# decart[k]/dx2[k] = -sign(x1[k]-x2[k])/(param[k]/scalefactor)
#
# Chain rule to put these together
# dcov/dx2[k] = dcov/decart[k] * decart[k]/dx2[k]
# dcov/dx2[k] = cov*(1*ecart[k]/3 + (ecart[k]**2)/3)/(1 + ecart[k] + (ecart[k]**2)/3) * sign(x1[k]-x2[k])/(param[k]/scalefactor)
ecart_k = np.abs(x1[:,np.newaxis,k_index] - x2[np.newaxis,:,k_index]) / (params[k_index] / scaling_factor)
sign_k = np.sign(x1[:,np.newaxis,k_index] - x2[np.newaxis,:,k_index])
return (covMatern5_2(x1,x2,params,scaling_factor,var)*(ecart_k/3.0 + (ecart_k**2.0)/3.0)/(1.0 + ecart_k + (ecart_k**2.0)/3.0))*sign_k/(params[k_index]/scaling_factor)
# centralize contexts/queues by thread so we don't create so many and overflow the low fixed limit of NVIDIA devices
clcontext_queues_bythread={} # dictionary indexed by threading.current_thread().ident of dictionary indexed by device name of (context,queue)
class surrogate_model_shear(object):
# Class representing raw DiceKriging surrogate model
closure_lowest_avg_load_used = None # Lowest load, in Pa, with data that went into closure model; in some cases may be a bound on the validity of the model/training data
# DiceKriging outputs
X = None
y = None
T = None
Ttranspose_inverse = None # cached for accel_trisolve option
#clcontext_queues=None # dictionary by tuple of device pointer ints of (context,queue)
clbuffers=None # dictionary by id(context) of (Ttranspose_inverse,new_positions_covariance_buf,product_buf,solution_buf)
#clprg = None # dictionary by id(context) of opencl program
clkern = None # dictionary by id(context) of (multply opencl kernel,add opencl kernel)
z = None
M = None
beta = None
trend_formula = None
covariance_class = None
covariance_name = None
covariance_paramset_n = None
covariance_sd2 = None
covariance_param = None
# Precalculated T_M
T_M = None
def __init__(self,**kwargs):
for argname in kwargs:
if hasattr(self,argname):
setattr(self,argname,kwargs[argname])
pass
else:
raise ValueError("Unknown attribute: %s" % (argname))
pass
# Precalculate T_M
self.T_M = scipy.linalg.cholesky(np.dot(self.M.T,self.M),lower=False)
pass
def eval_formula_values(self,new_positions):
# Can only handle pre-programmed formulas...
if self.trend_formula==[u'~x + I(x^2)']: # Simple linear + quadratic
formula_values=np.array((np.ones(new_positions.shape[0],dtype='d'),
new_positions["x"],
new_positions["x"]**2.0),dtype='d').T
pass
elif self.trend_formula==['~mu_norm + log_msqrtR_norm + log_crack_model_shear_factor + I(mu_norm^2) + ',
' I(log_msqrtR_norm^2) + I(log_crack_model_shear_factor^2) + ',
' I(mu_norm * log_msqrtR_norm) + I(mu_norm * log_crack_model_shear_factor) + ',
' I(log_msqrtR_norm * log_crack_model_shear_factor)']:
# Full linear and quadratic in mu and log_msqrtR, all normalized
formula_values=np.array((
|
np.ones(new_positions.shape[0],dtype='d')
|
numpy.ones
|
import numpy as np
from matplotlib import pyplot as plt
n=5000
#number of generated points
UnifR=np.random.uniform(size=(1,n))
#uniform array
DistR=np.array([1,n])
#output array radial
UnifT=2*np.pi*np.random.uniform(size=(1,n))
# output array angular
x=np.arange(0,10,0.1)
y=np.ones([100,1])/10
#x-y arrays currently unused
def ExpCDFinv (x, L):
return -np.log(1-x)*L
def BellCDFinv (x, s, m):
return (
|
np.log((1/x)-1)
|
numpy.log
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
def one_hot_encode_along_channel_axis(sequence, onehot_axis=1):
to_return = np.zeros((len(sequence),4), dtype=np.int8)
seq_to_one_hot_fill_in_array(zeros_array=to_return,
sequence=sequence, one_hot_axis=onehot_axis)
return to_return
def seq_to_one_hot_fill_in_array(zeros_array, sequence, one_hot_axis):
assert one_hot_axis==0 or one_hot_axis==1
if (one_hot_axis==0):
assert zeros_array.shape[1] == len(sequence)
elif (one_hot_axis==1):
assert zeros_array.shape[0] == len(sequence)
#will mutate zeros_array
for (i,char) in enumerate(sequence):
if (char=="A" or char=="a"):
char_idx = 0
elif (char=="C" or char=="c"):
char_idx = 1
elif (char=="G" or char=="g"):
char_idx = 2
elif (char=="T" or char=="t"):
char_idx = 3
elif (char=="N" or char=="n"):
continue #leave that pos as all 0's
else:
raise RuntimeError("Unsupported character: "+str(char))
if (one_hot_axis==0):
zeros_array[char_idx,i] = 1
elif (one_hot_axis==1):
zeros_array[i,char_idx] = 1
def enum(**enums):
class Enum(object):
pass
to_return = Enum
for key, val in enums.items():
if hasattr(val, '__call__'):
setattr(to_return, key, staticmethod(val))
else:
setattr(to_return, key, val)
to_return.vals = [x for x in enums.values()]
to_return.the_dict = enums
return to_return
def seq_from_onehot(onehot_data):
sequences = []
if len(onehot_data.shape) != 3:
onehot_data = onehot_data[np.newaxis, :]
for i in range(onehot_data.shape[0]):
onehot_seq = onehot_data[i, :, :]
sequence = ''
if onehot_seq.shape[0] < onehot_seq.shape[1]:
onehot_seq = np.swapaxes(onehot_seq, 0, 1)
for j in range(onehot_seq.shape[0]):
if(onehot_seq[j, 0]==1):
sequence = sequence + "A"
elif (onehot_seq[j, 1]==1):
sequence = sequence + "C"
elif (onehot_seq[j, 2]==1):
sequence = sequence + "G"
elif (onehot_seq[j, 3]==1):
sequence = sequence + "T"
sequences.append(sequence)
return sequences
def reverse_complement_seq(seq):
table = str.maketrans("ACTG", "TGAC")
return seq.translate(table)[::-1]
def reverse_complement_onehot(onehot, window_size):
dim = onehot.shape
axis_nt = dim.index(4)
axis_base = dim.index(window_size)
onehot_rc = np.flip(onehot, axis=axis_nt)
onehot_rc = np.flip(onehot_rc, axis=axis_base)
return onehot_rc
def shift_onehot(onehot_data, shift_amount, pad_value=0.0):
"""Shift a sequence left or right by shift_amount.
Args:
seq: a [batch_size, sequence_length, sequence_depth] sequence to shift
shift_amount: the signed amount to shift (tf.int32 or int)
pad_value: value to fill the padding (primitive or scalar tf.Tensor)
"""
flag_swap = False
if len(onehot_data.shape) != 3:
onehot_data = onehot_data[np.newaxis, :]
if onehot_data.shape[2] > onehot_data.shape[1]:
onehot_data = np.swapaxes(onehot_data,1,2)
flag_swap = True
input_shape = onehot_data.shape
pad = pad_value * np.ones(onehot_data[:, 0:np.abs(shift_amount), :].shape)
def _shift_right(_onehot_data):
sliced_onehot_data = _onehot_data[:, :-shift_amount:, :]
return np.concatenate((pad, sliced_onehot_data), axis=1)
def _shift_left(_onehot_data):
sliced_onehot_data = _onehot_data[:, -shift_amount:, :]
return np.concatenate((sliced_onehot_data, pad), axis=1)
if shift_amount > 0:
output = _shift_right(onehot_data)
else:
output = _shift_left(onehot_data)
output = np.reshape(output, input_shape)
if flag_swap:
output = np.swapaxes(output,1,2)
return output
def metric_pearson(obs, pred):
correlations = []
for i in range(len(pred)):
correlations.append(np.corrcoef(pred[i, :], obs[i, :])[0, 1])
return correlations
def metric_mse(obs, pred):
from sklearn.metrics import mean_squared_error
mses = []
for i in range(len(pred)):
mses.append(mean_squared_error(obs[i, :], pred[i, :]))
return mses
def metric_r2_score(obs, pred):
r2 = []
for i in range(len(pred)):
ssres = np.sum(np.square(obs[i, :] - pred[i, :]))
sstot = np.sum(np.square(obs[i, :] - np.mean(obs[i, :])))
r2.append(1 - ssres / sstot)
return r2
def compute_loss(obs, pred, combine_weighting):
correlations = metric_pearson(obs, pred)
mses = metric_mse(obs, pred)
metric_loss = 1 - np.stack(correlations) + combine_weighting*np.stack(mses)
return metric_loss
def minmax_norm(var_in):
max_val = np.amax(var_in)
min_val = np.amin(var_in)
subtracted = var_in - min_val
var_out = subtracted / (max_val - min_val)
return var_out
def minmax_scale(pred, labels):
subtracted = pred - np.min(pred, axis=-1)
max_pred = np.max(subtracted, axis=-1)
min_pred = np.min(subtracted, axis=-1)
max_true = np.max(labels, axis=-1)
min_true = np.min(labels, axis=-1)
scaled = subtracted / (max_pred - min_pred) * (max_true - min_true) + min_true
return scaled
def rounding_generator(data, y, name, batch_size):
import copy
l = len(data)
num_sample = batch_size - l % batch_size
data_out = copy.deepcopy(data)
data_out = np.concatenate((data_out, data_out[0:num_sample,:,:]), axis=0)
y_out = copy.deepcopy(y)
y_out = np.concatenate((y_out, y_out[0:num_sample,:]), axis=0)
name_out = copy.deepcopy(name)
name_out = np.concatenate((name_out, name_out[0:num_sample]), axis=0)
return data_out, y_out, name_out
# Matching the two datasets by concatenating the first samples from the same dataset
def upsample_generator(data1, data2):
l1 = len(data1)
l2 = len(data2)
#extending data 1 to the same size of data 2
sampleRelation = l2 // l1 #l1 must be bigger
if l2 % l1 > 0:
sampleRelation += 1
index_in = list(range(l1))
index_out = np.concatenate([index_in] * sampleRelation, axis=0)
index_out = index_out[:l2]
return index_out
# Use genome relationship, assuming the order is the corresponding
def interpolating_generator(data1, data2):
from scipy.interpolate import interp1d
index_in = np.linspace(0, len(data1), num=len(data1), endpoint=True)
f = interp1d(index_in, index_in)
index_new = np.linspace(0, len(data1), num=len(data2), endpoint=True)
index_out = f(index_new)
index_out = np.rint(index_out).astype(int)
index_out[index_out>=len(data1)] = len(data1) - 1
return index_out
def list_seq_to_fasta(index_seq, seq, motif_name, flag_unique, output_directory, single_filter_txt):
if flag_unique:
output_filename = motif_name + '_seq_unique' + single_filter_txt + '.fasta'
index_seq = list(set(index_seq))
else:
output_filename = motif_name + '_seq' + single_filter_txt + '.fasta'
list_seq = np.asarray(seq)[index_seq]
print(len(index_seq))
with open(output_directory + output_filename, 'w') as f:
for i in range(len(index_seq)):
f.write('>' + str(i) + '\n')
f.write(list_seq[i] + '\n')
def list_seqlet_to_fasta(index_seq, index_start, seq, motif_name, output_directory, single_filter_txt):
output_filename = motif_name + '_seqlet' + single_filter_txt + '.fasta'
list_seq = np.asarray(seq)[index_seq]
print(len(index_seq))
with open(output_directory + output_filename, 'w') as f:
for i in range(len(index_seq)):
max(0, index_start[i])
seqlet = list_seq[i][max(0, index_start[i]):min(250, index_start[i]+19)]
f.write('>' + str(i) + '\n')
f.write(seqlet + '\n')
def mut_seq(mut_dict, onehot_data, loc_txt):
output_onehot_data = copy.deepcopy(onehot_data)
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
else:
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
if output_onehot_data.shape[-1] > output_onehot_data.shape[-2]:
output_onehot_data[seq, :, mut_start:mut_end+1] = 0 # Not activation
else:
output_onehot_data[seq, mut_start:mut_end+1, :] = 0
return output_onehot_data
def mut_seq_perbase_pernucleotide(mut_dict, onehot_data, loc_txt):
len_mutation = mut_dict[0]['mut_end'] - mut_dict[0]['mut_start'] #19
output_onehot_data = np.zeros((251, 4, len_mutation, 4))
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
else:
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
for i in range(mut_start, mut_end):
for j in range(4):
tmp = copy.deepcopy(onehot_data[seq, :, :]) # 19 by 4
if tmp.shape[-1] == 4:
tmp[i, :] = 0
tmp[i, j] = 1
else: # what happens when both=4?
tmp[:, i] = 0
tmp[j, i] = 1
output_onehot_data[:,:,i-mut_start,j] = tmp
return output_onehot_data
def mut_seq_perbase_opposite(mut_dict, onehot_data, loc_txt, flag_order):
for k in mut_dict.keys():
seq = mut_dict[k]['seq']
if loc_txt == 'ocr':
mut_start = mut_dict[k]['start']
mut_end = mut_dict[k]['end']
elif loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
elif loc_txt == 'resp':
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
output_onehot_data = np.zeros((251, 4, mut_end - mut_start))
for i in range(mut_start, mut_end):
tmp = copy.deepcopy(onehot_data[seq, :, :])
if tmp.shape[-1] == 4:
if flag_order == 'ATGC':
tmp = tmp[:, [0, 3, 2, 1]]
tmp_original = np.copy(tmp[i, :])
if tmp_original[0] or tmp_original[3]: # AT->C;
tmp[i, :] = 0
tmp[i, 1] = 1
elif tmp_original[1] or tmp_original[2]: # CG->A
tmp[i, :] = 0
tmp[i, 0] = 1
else: # what happens when both=4?
if flag_order == 'ATGC':
tmp = tmp[[0, 3, 2, 1], :]
tmp_original = np.copy(tmp[:, i])
if tmp_original[0] or tmp_original[3]: # AT->C;
tmp[:, i] = 0
tmp[1, i] = 1
elif tmp_original[1] or tmp_original[2]: # CG->A
tmp[:, i] = 0
tmp[0, i] = 1
output_onehot_data[:, :, i] = tmp
return output_onehot_data
def mut_seq_perbase_opposite_hyp(mut_dict, onehot_data, hyp_score, loc_txt, flag_order):
for k in mut_dict.keys():
hyp_score_k = np.stack(hyp_score[str(k)])
seq = mut_dict[k]['seq']
if loc_txt == 'ocr':
mut_start = mut_dict[k]['start']
mut_end = mut_dict[k]['end']
elif loc_txt == 'mut':
mut_start = mut_dict[k][loc_txt+'_start']
mut_end = mut_dict[k][loc_txt+'_end']
elif loc_txt == 'resp':
mut_start = mut_dict[k][loc_txt+'_start'][0]
mut_end = mut_dict[k][loc_txt+'_end'][0]
output_onehot_data = np.zeros((251, 4, mut_end - mut_start))
for i in range(mut_start, mut_end):
tmp = copy.deepcopy(onehot_data[seq, :, :])
tmp_hyp = copy.deepcopy(hyp_score_k[seq, :, :])
if tmp.shape[-1] == 4:
if flag_order == 'ATGC':
tmp = tmp[:, [0, 3, 2, 1]]
tmp[i, :] = 0
tmp[i, np.argmin(tmp_hyp[i, :])] = 1
else: # TODO: path for edge case, that when both direction dim=4
if flag_order == 'ATGC':
tmp = tmp[[0, 3, 2, 1], :]
tmp[:, i] = 0
tmp[np.argmin(tmp_hyp[i, :]), i] = 1
output_onehot_data[:, :, i] = tmp
return output_onehot_data
# Parse meme file into array
def read_meme(file_path=None):
with open(file_path) as fp:
line = fp.readline()
motifs=[]
motif_names=[]
while line:
#determine length of next motif
if line.split(" ")[0]=='MOTIF':
#add motif number to separate array
motif_names.append(line.split(" ")[1])
#get length of motif
line2=fp.readline().split(" ")
motif_length = int(float(line2[5]))
#read in motif
current_motif=
|
np.zeros((19, 4))
|
numpy.zeros
|
#!/usr/bin/python3
# cvinfo.org
# Batch script for making colored state and county images based on covid-level2 data
#
from covid import cfe, load_json_file, save_json_file, load_state_names
import time
import os
import numpy as np
import xml.dom.minidom
import sys
import seaborn as sns
from cairosvg import svg2png
from PIL import Image, ImageFont, ImageDraw
import glob
import cv2
#################################################################################################
# GLOBAL VARS
dir_path = os.path.dirname(os.path.realpath(__file__))
if('/var/www/projects/' in dir_path):
from conf_vince import *
else:
from conf import *
def check_vals_files():
cl2 = load_json_file("json/covid-19-level2-states.json")
flat_stats = []
for state_obj in cl2:
state_code = state_obj['summary_info']['state_code']
js = load_json_file("json/" + state_code + ".json")
len_dates = len(js['js_vals']['dates'])
dates = js['js_vals']['dates']
fields = ['cases', 'deaths','cpm','dpm','cg_med', 'dg_med','mortality', 'new_cases', 'new_deaths']
for ff in fields:
wild = "anim/frames/" + state_code + "/" + state_code + "-" + ff + "*.svg"
gfiles = glob.glob(wild)
print(state_code, len_dates, len(gfiles))
for gfile in gfiles:
el = gfile.split("-")
fd = el[2]
fd = fd.replace(".svg", "")
if fd not in dates:
print(fd, "NOT FOUND!")
cmd = "rm " + gfile
print(cmd)
os.system(cmd)
def make_fb_prev_images():
#outfile = outfile.replace("frames", "png")
#svg2png(bytestring=svg_code,write_to=outfile, parent_width=ow*1.5,parent_height=oh*1.5)
js = load_json_file("json/covid-19-level2-states.json")
for data in js:
state_code = data['summary_info']['state_code']
#print("frames/" + state_code + "/" + state_code + "-" + "cpm*.svg")
files = sorted(glob.glob("anim/frames/" + state_code + "/" + state_code + "-" + "cpm*.svg"))
best_file = files[-1]
#print("BEST", best_file)
# use to delete last file if data is missing.
print("rm " + best_file)
def make_movie_from_frames(state, field):
if field != "ALL":
wild = "anim/marked/" + state + "/" + state + "-" + field
else:
wild = "anim/marked/" + state + "/" + state
files = sorted(glob.glob(wild))
frames = []
for file in files:
print(file)
# frame = cv2.imread(file)
# frames.append(frame)
outdir = "anim/mov/"
if cfe(outdir,1) == 0:
os.makedirs(outdir)
outfile = outdir + state + "-" + field + ".mp4"
cmd = """/usr/bin/ffmpeg -y -framerate 3 -pattern_type glob -i '""" + wild + """*.png' \
-c:v libx264 -r 25 -pix_fmt yuv420p """ + outfile
print(cmd)
os.system(cmd)
def preview(state_code, field,data_only=0):
print("PREVIEW", state_code,field)
mark_dir = "anim/marked/" + state_code
if cfe(mark_dir,1) == 0:
os.makedirs(mark_dir)
if state_code == "USA":
make_usa_vals_from_county()
print("STATE CODE:", state_code)
data = load_json_file("json/" + state_code + ".json")
field_desc = {
'cpm' : "Cases Per Million",
'dpm' : "Deaths Per Million",
'cases' : "Cases",
'deaths' : "Deaths",
'cg_med' : "Case Growth Percent",
'dg_med' : "Death Growth Percent",
'mortality' : "Mortality Percent",
'new_deaths' : "New Deaths Per Day",
'new_cases' : "New Cases Per Day"
}
field_alias = {
'cpm' : "3cpm",
'dpm' : "3dpm",
'cases' : "2cases",
'deaths' : "2deaths",
'cg_med' : "4cg_med",
'dg_med' : "5dg_med",
'mortality' : "6mortality",
'new_cases' : "1new_cases",
'new_deaths' : "1new_deaths"
}
dates = []
vals = []
state_names, state_codes = load_state_names()
if state_code != 'USA':
state_name = state_names[state_code]
else:
state_name = "USA"
js = load_json_file("json/" + state_code + ".json")
if state_code != 'USA':
ss = js['state_stats']
else:
ss = js['day_stats']
fields = []
if field == "ALL":
for ff in field_desc :
fields.append(ff)
else:
fields.append(field)
if "js_vals" not in data:
data['js_vals'] = {}
data['js_vals']['dates'] = []
if state_code != "USA":
cstats = js['county_stats']
cdays = {}
for county in cstats:
for row in cstats[county]['county_stats']:
day = row['day'].replace("-", "")
cdays[day] = 1
print("CDAYS", cdays)
else:
cdays = {}
if state_code != "USA":
for field in fields:
for dd in ss:
if dd['date'] in cdays and dd['cases'] > 0:
dates.append(dd['date'])
vals.append(dd[field])
if field + "_vals" not in data['js_vals']:
data['js_vals'][field + "_vals"] = []
if int(dd['cases']) > 0 and dd['date'] in cdays:
data['js_vals'][field + "_vals"].append(dd[field])
data['js_vals']['dates'].append(dd['date'])
print("JS VALS:", dd['date'], field, dd[field])
else:
print("cases < 0")
print("JS VALS:", data['js_vals'])
save_json_file("json/" + state_code + ".json", data)
else:
dates = js['js_vals']['dates']
vals = js['js_vals'][field]
print("VALS:", len(vals))
js_vals = str(vals)
if data_only == 1:
print("DONE PREV DATA:")
return()
palette = sns.color_palette("Reds", n_colors=11)
sns.palplot(palette)
frame_wild = "anim/png/" + state_code + "/" + state_code + "*-" + field + "*.png"
print("FRAME WILD:", frame_wild)
files = glob.glob(frame_wild)
if len(files) == 0:
print("NO FILES FOUND!:", frame_wild)
exit()
else:
print("FILES:", files)
imc = cv2.imread(files[0])
ih,iw = imc.shape[:2]
ih = int(ih * 1.5)
iw = int(iw * 1.5)
make_cpm_legend(palette, state_code,field,int(ih))
leg = "anim/legends/legend-" + state_code + "-" + field + ".png"
limg = cv2.imread(leg)
lh,lw = limg.shape[:2]
tw = lw + iw + 50
th = lh + 50
cc = 0
started = 0
print(files[:-1])
#for file in sorted(files[:-1]):
for file in sorted(files):
print("FILE:", file)
fn = file.split("/")[-1]
custom_frame = np.zeros((th,tw,3),dtype=np.uint8)
imc = cv2.imread(file)
cv2.imshow('pepe2', imc)
cv2.waitKey(100)
ims = cv2.resize(imc, (int(iw),int(ih)))
x1 = 10
x2 = 10 + lw
y1 = 25
y2 = 25 + lh
custom_frame[y1:y2,x1:x2] = limg.copy()
x1 = 10 + lw
x2 = 10 + lw + iw
y1 = 25
y2 = 25 + ih
custom_frame[y1:y2,x1:x2] = ims.copy()
#if cc < len(dates):
if True:
started = 1
print(cc, len(dates), len(vals))
if cc < len(dates):
desc = state_name + " " + str(dates[cc]) + " " + field_desc[field] + " " + str(vals[cc])
else:
desc = "missing data for " + str(cc)
#desc = state_name + " " + field_desc[field] + " " + str(dates[cc]) + " " + str(vals[cc])
cv2.putText(custom_frame, desc, (40,25), cv2.FONT_HERSHEY_SIMPLEX, .7, (255, 255, 255), 1)
desc = "www.cvinfo.org"
cv2.putText(custom_frame, desc, (tw-160,th-2), cv2.FONT_HERSHEY_SIMPLEX, .6, (255, 255, 255), 1)
dd = fn.split("-")
if "USA" not in fn:
state_code = dd[0]
field = dd[1]
date = dd[2]
else:
state_code = dd[0]
field = dd[2]
date = dd[3]
new_field = field_alias[field]
print("NEW FIELD:", field, new_field)
mark_fn = state_code + "-" + new_field + "-" + date + ".png"
mark_file = mark_dir + "/" + mark_fn
cv2.imwrite(mark_file, custom_frame)
cv2.imshow('pepe', custom_frame)
cv2.waitKey(250)
cc += 1
del custom_frame
def preview_data(state_code):
fields = ['cases', 'deaths','cpm','dpm','cg_med', 'dg_med','mortality', 'new_cases', 'new_deaths']
print("PREVIEW DATA")
# DELETE JS_VALS THAT PREVIOUSLY EXISTED
js = load_json_file("json/covid-19-level2-states.json")
# reset all js_vals for all states
if state_code == 'ALL':
for data in js:
state_code = data['summary_info']['state_code']
ttt = load_json_file("json/" + state_code + ".json")
if "js_vals" in ttt:
del ttt['js_vals']
ttt['js_vals'] = {}
save_json_file("json/" + state_code + ".json", ttt)
else:
# reset all js_vals for this state
ttt = load_json_file("json/" + state_code + ".json")
if "js_vals" in ttt:
del ttt['js_vals']
ttt['js_vals'] = {}
save_json_file("json/" + state_code + ".json", ttt)
if state_code == 'ALL':
for data in js:
state_code = data['summary_info']['state_code']
for ff in fields:
preview(state_code,ff,1)
else:
for ff in fields:
preview(state_code,ff,1)
def main_menu():
print("FCU")
exit()
state_code = sys.argv[1]
field = sys.argv[2]
fields = ['cases', 'deaths','cpm','dpm','cg_med', 'dg_med','mortality', 'new_cases', 'new_deaths']
if state_code == 'preview':
cmd = 'preview'
state_code = sys.argv[2]
field = sys.argv[3]
if field != "ALL":
preview(state_code, field)
else:
for field in fields:
preview(state_code, field)
exit()
if state_code == 'prev_data':
cmd = state_code
state_code = sys.argv[2]
if state_code != "ALL":
preview_data(state_code)
else:
js = load_json_file("json/covid-19-level2-states.json")
for data in js:
state_code = data['summary_info']['state_code']
preview_data(state_code)
exit()
if state_code == "USA":
print("USA")
exit()
if field != "ALL":
make_usa_map_seq(field )
else:
for field in fields:
make_usa_map_seq(field)
#make_usa_map(field, day)
#check()
#exit()
elif state_code == "ALL":
make_seq_all(field)
check_vals_files()
else:
if field != 'ALL':
make_seq(state_code, field)
else:
fields = ['cases', 'deaths','cpm','dpm','cg_med', 'dg_med','mortality', 'new_cases', 'new_deaths']
for ff in fields:
make_seq(state_code, ff)
exit()
#make_map("MD", "20200401", "cases", "1")
#make_map("MD", "20200402", "cases", "1")
print("""
cvsvg.py -- interface for making map images with covid data
Select Option:
1) Make Map For State
""")
cmd = input("Select Function : ")
if cmd == "1":
state_code = input("Select State Code (for example NY) : ")
print(""" Select Rank Scale:
1) Colors Relative to State Rank
2) Colors Relative to National Rank :
""")
scale_rank = input("Select Rank Scale: ")
print("""
Select Color Coding Data Scheme :
1) CASES -- pallet scalled to cases
2) DEATHS -- pallet scalled to deaths
3) CPM -- pallet scalled to CPM
4) DPM -- pallet scalled to DPM
5) GROWTH -- pallet scalled to DPM
6) MORTALITY -- pallet scalled to DPM
""")
data_scheme = input("Select Color Coding Data Scheme ) : ")
print("You choose : ")
print("State : ", state_code)
print("Scale Rank: ", scale_rank)
print("Data Scheme: ", data_scheme)
def get_county_field_val_for_day(data,day,field):
for d in data:
if day == d['date']:
val = d[field]
print("VAL FIND:", day, val)
def make_usa_vals_from_county():
flat_stats = []
cl2 = load_json_file("json/covid-19-level2-counties-all-days.json")
all_usa_data = {}
days = {}
for data in cl2:
days[data['day'].replace("-", "")] = 1
data['date'] = data['day'].replace("-", "")
flat_stats.append(data)
sorted_days = []
for day in days:
sorted_days.append(day)
sorted_days = sorted(sorted_days)
usa_day = {}
for s in flat_stats:
date = s['date']
cases = s['cases']
deaths = s['deaths']
new_cases = s['new_cases']
new_deaths = s['new_deaths']
if date not in usa_day:
usa_day[date] = {}
usa_day[date]['cases'] = cases
usa_day[date]['deaths'] = deaths
usa_day[date]['new_cases'] = new_cases
usa_day[date]['new_deaths'] = new_deaths
else:
usa_day[date]['cases'] += cases
usa_day[date]['deaths'] += deaths
usa_day[date]['new_cases'] += new_cases
usa_day[date]['new_deaths'] += new_deaths
case_grs = []
death_grs = []
last_cases = 0
last_deaths = 0
dc = 0
for date in sorted_days:
usa_day[date]['cpm'] = round(usa_day[date]['cases'] / 327.2, 2)
usa_day[date]['dpm'] = round(usa_day[date]['deaths'] / 327.2, 2)
usa_day[date]['mortality'] = round((usa_day[date]['deaths'] / usa_day[date]['cases']) * 100, 2)
cases = usa_day[date]['cases']
deaths = usa_day[date]['deaths']
if int(cases) > 0:
print("GROWTH:", last_cases , "/", cases)
case_growth = (1 - (int(last_cases) / int(cases))) * 100
else:
case_growth = 0
if int(deaths) > 0:
death_growth = (1 - (int(last_deaths) / int(deaths))) * 100
else:
death_growth = 0
if len(case_grs) > 3:
cg_avg = round(np.mean(case_grs[-3:]),2)
cg_med = round(np.median(case_grs[-3:]),2)
else:
cg_avg = case_growth
cg_med = case_growth
if len(death_grs) > 3:
dg_avg = round(np.mean(death_grs[-3:]),2)
dg_med = round(np.median(death_grs[-3:]),2)
else:
dg_avg = death_growth
dg_med = death_growth
# growth fields
usa_day[date]['case_growth'] = case_growth
usa_day[date]['death_growth'] = death_growth
usa_day[date]['cg_med'] = cg_med
usa_day[date]['dg_med'] = dg_med
usa_day[date]['cg_avg'] = cg_avg
usa_day[date]['dg_avg'] = dg_avg
usa_day[date]['date'] = date
if cases > 0:
case_grs.append(case_growth)
if deaths > 0:
death_grs.append(death_growth)
last_cases = int(cases)
last_deaths = int(deaths)
dc = dc + 1
js_vals = {
'dates' : [],
'cpm' : [],
'dpm' : [],
'cases' : [],
'deaths' : [],
'new_cases' : [],
'new_deaths' : [],
'case_growth' : [],
'death_growth' : [],
'cg_med' : [],
'dg_med' : [],
'mortality' : [],
'new_deaths' : [],
'new_cases' : []
}
flat_usa = []
for day in sorted_days:
print("DAY:", day)
dd = usa_day[day]
flat_usa.append(dd)
print(day, usa_day[day])
js_vals['dates'].append(day)
js_vals['cases'].append(dd['cases'])
js_vals['deaths'].append(dd['deaths'])
js_vals['new_cases'].append(dd['new_cases'])
js_vals['new_deaths'].append(dd['new_deaths'])
js_vals['cpm'].append(dd['cpm'])
js_vals['dpm'].append(dd['dpm'])
js_vals['case_growth'].append(dd['case_growth'])
js_vals['death_growth'].append(dd['death_growth'])
js_vals['cg_med'].append(dd['cg_med'])
js_vals['dg_med'].append(dd['dg_med'])
js_vals['mortality'].append(dd['mortality'])
print("JSON DATES:", js_vals['dates'])
print(4)
usa_json = load_json_file("json/USA.json")
usa_json['day_stats'] = flat_usa
usa_json['js_vals'] = js_vals
save_json_file("json/USA.json", usa_json)
def make_usa_vals_from_state():
cl2 = load_json_file("json/covid-19-level2-states.json")
flat_stats = []
for state_obj in cl2:
stats = state_obj['state_stats']
for stat in stats:
flat_stats.append(stat)
usa_day = {}
for s in flat_stats:
print(s)
date = s['date']
cases = s['cases']
deaths = s['deaths']
new_cases = s['new_cases']
new_deaths = s['new_deaths']
if date not in usa_day:
usa_day[date] = {}
usa_day[date]['cases'] = cases
usa_day[date]['deaths'] = deaths
usa_day[date]['new_cases'] = new_cases
usa_day[date]['new_deaths'] = new_deaths
else:
usa_day[date]['cases'] += cases
usa_day[date]['deaths'] += deaths
usa_day[date]['new_cases'] += new_cases
usa_day[date]['new_deaths'] += new_deaths
days = []
for dd in usa_day:
days.append(dd)
days = sorted(days)
case_grs = []
death_grs = []
last_cases = 0
last_deaths = 0
dc = 0
for date in days:
usa_day[date]['cpm'] = round(usa_day[date]['cases'] / 327.2, 2)
usa_day[date]['dpm'] = round(usa_day[date]['deaths'] / 327.2, 2)
usa_day[date]['mortality'] = round((usa_day[date]['deaths'] / usa_day[date]['cases']) * 100, 2)
cases = usa_day[date]['cases']
deaths = usa_day[date]['deaths']
if int(cases) > 0:
print("GROWTH:", last_cases , "/", cases)
case_growth = (1 - (int(last_cases) / int(cases))) * 100
else:
case_growth = 0
if int(deaths) > 0:
death_growth = (1 - (int(last_deaths) / int(deaths))) * 100
else:
death_growth = 0
if len(case_grs) > 3:
cg_avg = round(np.mean(case_grs[-3:]),2)
cg_med = round(np.median(case_grs[-3:]),2)
else:
cg_avg = case_growth
cg_med = case_growth
if len(death_grs) > 3:
dg_avg = round(
|
np.mean(death_grs[-3:])
|
numpy.mean
|
import random
import traceback
import numpy as np
from PIL import Image, ImageFilter
from torch.distributions.uniform import Uniform
from torch.distributions.normal import Normal
from torch.utils.data import Dataset
from torchvision.transforms import functional as func_transforms
from libyana.transformutils import colortrans, handutils
from meshreg.datasets.queries import BaseQueries, TransQueries, one_query_in
from meshreg.datasets import datutils
class HandObjSet(Dataset):
"""Hand-Object dataset
"""
def __init__(
self,
pose_dataset,
center_idx=9,
inp_res=(256, 256),
max_rot=np.pi,
normalize_img=False,
split="train",
scale_jittering=0.3,
center_jittering=0.2,
train=True,
hue=0.15,
saturation=0.5,
contrast=0.5,
brightness=0.5,
blur_radius=0.5,
spacing=2,
queries=[
BaseQueries.IMAGE,
TransQueries.JOINTS2D,
TransQueries.HANDVERTS3D,
TransQueries.OBJVERTS2D,
TransQueries.OBJCORNERS2D,
TransQueries.HANDVERTS2D,
TransQueries.OBJVERTS3D,
TransQueries.OBJCORNERS3D,
BaseQueries.OBJCANVERTS,
BaseQueries.OBJCANCORNERS,
TransQueries.JOINTS3D,
],
sides="both",
block_rot=False,
sample_nb=None,
has_dist2strong=False,
):
"""
Args:
sample_nb: Number of samples to return: first sample is
spacing: if 0, sample closest ground truth frame
center_idx: idx of joint on which to center 3d pose
not present
sides: if both, don't flip hands, if 'right' flip all left hands to
right hands, if 'left', do the opposite
"""
# Dataset attributes
self.pose_dataset = pose_dataset
self.inp_res = tuple(inp_res)
self.normalize_img = normalize_img
self.center_idx = center_idx
self.sides = sides
# Sequence attributes
self.sample_nb = sample_nb
self.spacing = spacing
# Color jitter attributes
self.hue = hue
self.contrast = contrast
self.brightness = brightness
self.saturation = saturation
self.blur_radius = blur_radius
self.max_rot = max_rot
self.block_rot = block_rot
# Training attributes
self.train = train
self.scale_jittering = scale_jittering
self.center_jittering = center_jittering
self.queries = queries
self.has_dist2strong = has_dist2strong
def __len__(self):
return len(self.pose_dataset)
def get_sample(self, idx, query=None, color_augm=None, space_augm=None):
if query is None:
query = self.queries
sample = {}
if BaseQueries.IMAGE in query or TransQueries.IMAGE in query:
center, scale = self.pose_dataset.get_center_scale(idx)
needs_center_scale = True
else:
needs_center_scale = False
if BaseQueries.JOINTVIS in query:
jointvis = self.pose_dataset.get_jointvis(idx)
sample[BaseQueries.JOINTVIS] = jointvis
# Get sides
if BaseQueries.SIDE in query:
hand_side = self.pose_dataset.get_sides(idx)
hand_side, flip = datutils.flip_hand_side(self.sides, hand_side)
sample[BaseQueries.SIDE] = hand_side
else:
flip = False
# Get original image
if BaseQueries.IMAGE in query or TransQueries.IMAGE in query:
img = self.pose_dataset.get_image(idx)
if flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if BaseQueries.IMAGE in query:
sample[BaseQueries.IMAGE] = np.array(img)
# Get object mask
if BaseQueries.OBJMASK in query:
mask = self.pose_dataset.get_obj_mask(idx)
if flip:
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
if BaseQueries.OBJMASK in query:
sample[BaseQueries.OBJMASK] = mask
# Get keypoint vector fields
if BaseQueries.OBJFPSVECFIELD in query:
vec_field = self.pose_dataset.get_obj_fpsvectorfield(idx)
if flip:
vec_field =
|
np.fliplr(vec_field)
|
numpy.fliplr
|
#!/usr/bin/env python3
# Copyright 2022 Xiaomi Corporation (authors: <NAME>)
import os
import numpy as np
import kaldi_native_io
base = "htk_matrix"
wspecifier = f"ark,scp:{base}.ark,{base}.scp"
rspecifier = f"scp:{base}.scp"
def test_htk_matrix_writer():
with kaldi_native_io.HtkMatrixWriter(wspecifier) as ko:
a_value = np.array([[1, 2], [3, 4]], dtype=np.float32)
# Refer to https://labrosa.ee.columbia.edu/doc/HTKBook21/node58.html
# for the format of the header
# Assume frame shift is 10ms, which is 10*10^6 ns,
# which is 100000 x 100 ns
a_header = kaldi_native_io.HtkHeader(
num_samples=2, # there are two frames, i.e., two rows
sample_period=100000, # in 100 ns
sample_size=4 * a_value.shape[1], # each column is 4 bytes
# 6 -> MFCC
# 0o100 -> has energy
sample_kind=6 | 0o100,
)
ko.write("a", (a_value, a_header))
b_value = np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32)
b_header = kaldi_native_io.HtkHeader(
num_samples=2, # there are two frames, i.e., two rows
sample_period=100000, # in 100 ns
sample_size=4 * b_value.shape[1], # each column is 4 bytes
# 6 -> MFCC
# 0o100 -> has energy
sample_kind=6 | 0o100,
)
ko["b"] = (b_value, b_header)
def test_sequential_htk_matrix_reader():
with kaldi_native_io.SequentialHtkMatrixReader(rspecifier) as ki:
for key, value in ki:
if key == "a":
assert np.array_equal(
value[0], np.array([[1, 2], [3, 4]], dtype=np.float32)
)
expected_header = kaldi_native_io.HtkHeader(
num_samples=2, # there are two frames, i.e., two rows
sample_period=100000, # in 100 ns
sample_size=4 * value[0].shape[1], # each column is 4 bytes
# 6 -> MFCC
# 0o100 -> has energy
sample_kind=6 | 0o100,
)
assert value[1] == expected_header
elif key == "b":
assert np.array_equal(
value[0],
np.array([[10, 20, 30], [40, 50, 60]], dtype=np.float32),
)
expected_header = kaldi_native_io.HtkHeader(
num_samples=2, # there are two frames, i.e., two rows
sample_period=100000, # in 100 ns
sample_size=4 * value[0].shape[1], # each column is 4 bytes
# 6 -> MFCC
# 0o100 -> has energy
sample_kind=6 | 0o100,
)
assert value[1] == expected_header
else:
raise ValueError(f"Unknown key {key} with value {value}")
def test_random_access_htk_matrix_reader():
with kaldi_native_io.RandomAccessHtkMatrixReader(rspecifier) as ki:
assert "b" in ki
assert "a" in ki
assert np.array_equal(
ki["a"][0],
|
np.array([[1, 2], [3, 4]], dtype=np.float32)
|
numpy.array
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_client import Client
import cv2
import sys
import numpy as np
import os
import time
import re
import base64
from tools.infer.predict_rec import TextRecognizer
from params import read_params
global_args = read_params()
if global_args.use_gpu:
from paddle_serving_server_gpu.web_service import WebService
else:
from paddle_serving_server.web_service import WebService
class TextRecognizerHelper(TextRecognizer):
def __init__(self, args):
super(TextRecognizerHelper, self).__init__(args)
if self.loss_type == "ctc":
self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"]
def preprocess(self, img_list):
img_num = len(img_list)
args = {}
# Calculate the aspect ratio of all text bars
width_list = []
for img in img_list:
width_list.append(img.shape[1] / float(img.shape[0]))
indices = np.argsort(np.array(width_list))
args["indices"] = indices
predict_time = 0
beg_img_no = 0
end_img_no = img_num
norm_img_batch = []
max_wh_ratio = 0
for ino in range(beg_img_no, end_img_no):
h, w = img_list[indices[ino]].shape[0:2]
wh_ratio = w * 1.0 / h
max_wh_ratio = max(max_wh_ratio, wh_ratio)
for ino in range(beg_img_no, end_img_no):
if self.loss_type != "srn":
norm_img = self.resize_norm_img(img_list[indices[ino]],
max_wh_ratio)
norm_img = norm_img[np.newaxis, :]
norm_img_batch.append(norm_img)
else:
norm_img = self.process_image_srn(img_list[indices[ino]],
self.rec_image_shape, 8, 25,
self.char_ops)
encoder_word_pos_list = []
gsrm_word_pos_list = []
gsrm_slf_attn_bias1_list = []
gsrm_slf_attn_bias2_list = []
encoder_word_pos_list.append(norm_img[1])
gsrm_word_pos_list.append(norm_img[2])
gsrm_slf_attn_bias1_list.append(norm_img[3])
gsrm_slf_attn_bias2_list.append(norm_img[4])
norm_img_batch.append(norm_img[0])
norm_img_batch = np.concatenate(norm_img_batch, axis=0).copy()
feed = {"image": norm_img_batch.copy()}
return feed, self.fetch, args
def postprocess(self, outputs, args):
if self.loss_type == "ctc":
rec_idx_batch = outputs[0]
predict_batch = outputs[1]
rec_idx_lod = args["save_infer_model/scale_0.tmp_0.lod"]
predict_lod = args["save_infer_model/scale_1.tmp_0.lod"]
indices = args["indices"]
rec_res = [['', 0.0]] * (len(rec_idx_lod) - 1)
for rno in range(len(rec_idx_lod) - 1):
beg = rec_idx_lod[rno]
end = rec_idx_lod[rno + 1]
rec_idx_tmp = rec_idx_batch[beg:end, 0]
preds_text = self.char_ops.decode(rec_idx_tmp)
beg = predict_lod[rno]
end = predict_lod[rno + 1]
probs = predict_batch[beg:end, :]
ind = np.argmax(probs, axis=1)
blank = probs.shape[1]
valid_ind = np.where(ind != (blank - 1))[0]
if len(valid_ind) == 0:
continue
score = np.mean(probs[valid_ind, ind[valid_ind]])
rec_res[indices[rno]] = [preds_text, score]
elif self.loss_type == 'srn':
char_num = self.char_ops.get_char_num()
preds = rec_idx_batch.reshape(-1)
elapse = time.time() - starttime
predict_time += elapse
total_preds = preds.copy()
for ino in range(int(len(rec_idx_batch) / self.text_len)):
preds = total_preds[ino * self.text_len:(ino + 1) *
self.text_len]
ind =
|
np.argmax(probs, axis=1)
|
numpy.argmax
|
# %%
#%%
# SCRATCH SCRIPT - Multi-Modal tree counter
# Shows proof-of-concept for training a network to look at
# big trees vs small trees vs all trees vs none-of-the-above
# using both a grid intput and user text. Isn't particularly
# effective or well-organized, but I suspect that's in large part due
# to its small and incredibly crude training corpus.
# The goal is to spring off this one into something more general,
# especially once better dataset generation has been created.
# For example:
# 1. Output list of commands and arguments via RNN/transformer decoder,
# potentially making some command functions trivial
# (e.g. instead of doing ("COUNT", "BIG_TREES"), it could
# just put ("PRINT", "2") since it's within the algorithm's
# capacity to solve if you feed CNN output into an output RNN)
# 2. Try something like "peek around corner" or "move to wall" and implement them
# ("find closest wall" would be a good one, as function
# could then pick a good one from heatmap)
# ("peek around corner" is another good one to test complex
# language understanding combined w/ map - need to understand
# it's a movement action and what terrain it's looking at)
# This needs to be broken up into a bunch of scripts, e.g.
# 1. Dataset generation script(s)
# 1.1. Possibly seperate easy-to-run dataset generation command/script
# 1.2. Alternatively a data-generator-backend tf.data.Dataset or something like that.
# 2. Model definition script(s)
# 2.1. Since we might be playing around with structure a lot, could consider a config file/dict
# 2.2. Possibly define main chunks in one file and let user compose them in their own script?
# 3. Model training script(s)
# 4. Hyperparameter optimization script(s), if different from the training one.
# Still working out best practices on this stuff for small vs large projects,
# so all that is subject to change...
# %%
# ******************************************************************
# ************************** IMPORTANT *****************************
# ******************************************************************
# NOTICE UP FRONT REGARDING DATA REQUIREMENT:
# Right now this script depends on a dataset I downloaded form
# https://huggingface.co/transformers/custom_datasets.html
# It can be replaced with __literally any batch of random sentences__ -
#
# To use the script in its current state, you need Large Movie Review Dataset
# which can be downloaded with the following commands (from that link):
# wget http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
# tar -xf aclImdb_v1.tar.gz
#
# You need the "aclImdb" folder in your working directory when you run this
# script.
#
# In the future if we use a data source like this, it should be downloaded
# as part of running it if it doesn't exist. The repository's
# /data/cache folder is .gi"tignore'd so it's the ideal place to put all
# datasets. Alternatively, there's /tmp folders for linux, but I want to be
# cross-platform friendly...
# SUGGESTION:
# try:
# import nltk
# nltk.download('brown')
# text = nltk.Text(nltk.corpus.brown.words())
# text.generate(text_length) # <- randomize amount in vaguely sentence length range
#
# There are probably better ways, too...if nothing else, download dataset in a better way!
#%%
import numpy as np
import tensorflow as tf
from tensorflow import keras
#%%
def plot_grid(grid, hide_zeros=True):
nrows, ncols = grid.shape
def get_grid_char(grid_number):
if hide_zeros and grid_number == 0:
return "."
else:
return str(grid_number)
for col_ind in range(ncols):
row_string = "".join(get_grid_char(x) for x in grid[:,col_ind])
print(row_string)
#%%
RANDOM_SEED = None
NUM_GRIDS = 10000 # Now it's just sampled (w/replacement), so keep that in mind
GRID_SIZE = 24
NUM_GRID_CHANNELS = 3 # starting at -1
MAX_SMALL_TREES_PER_MAP = 12
MAX_BIG_TREES_PER_MAP = 6
NUM_FEATURES = 5 # player x/y, num_big_trees, num_small_trees, num_all_trees
if RANDOM_SEED is None:
RANDOM_SEED = np.random.randint(low=0, high=10000)
rng =
|
np.random.default_rng(seed=RANDOM_SEED)
|
numpy.random.default_rng
|
# Autores: <NAME>, Néstor
# <NAME>, Andrés
import numpy as np
import heapq
import math
from escenario import Escenario
from copy import deepcopy
from arbol import Nodo
from timeit import default_timer as timer
def distancia(pos0, posf):
# distancia L1
return abs(posf[0] - pos0[0]) + abs(posf[1] - pos0[1])
def crearMatriz(e: Escenario):
n = e.n_minas
listaMinas = e.lista_minas
m = np.zeros((n + 1, n + 1), dtype=float)
for i in range(n + 1):
# iterar solo en la mitad superior (es una matriz simetrica por las propiedades del problema)
for j in range(i, n + 1):
if i == j: # Diagonal a infinito
m[i, j] = math.inf
else:
if i == 0: # distancia al punto inicial
d = distancia(e.pos_ini, listaMinas[j - 1])
else: # distancia entre minas
d = distancia(listaMinas[i - 1], listaMinas[j - 1])
m[i, j] = d
m[j, i] = d
return m
def reducirMatrizFilas(orig):
"""
Input: matriz original de costes
Output: Primero: matriz en la que a cada elto se le ha restado el minimo de su fila
Segundo: coste de la reduccion (suma de los minimos)
"""
m = orig
min = m.min(axis=1) # minimo en cada fila
min[min == math.inf] = 0 # si eran todo infinitos, 0
restado =
|
np.sum(min)
|
numpy.sum
|
import html
import json
from datetime import datetime, timedelta
import numpy as np
import requests
from discord.ext.commands import CommandError
from main_resources.item_use import *
class Currency(commands.Cog):
"""🤑 Everything related to da money 🤑"""
def __init__(self, bot: commands.Bot):
self.bot = bot
self.collection = database["currency"]
self.utils = currency_utils(self.collection)
self.defined_currencies = json.loads(
open('./main_resources/Assets/currency_values.json', encoding='utf-8').read())
self.items_by_id = json.loads(
open('./main_resources/Assets/shop_items.json', encoding='utf-8').read())["by_id"]
self.id_by_name = json.loads(
open('./main_resources/Assets/shop_items.json', encoding='utf-8').read())["by_name"]
self.paged_shop, self.pages = create_paged_shop(self.items_by_id)
self.houses = {
1: "5 of a kind",
2: "4 of a kind",
3: "3 of a kind and a pair",
4: "2 pairs",
5: "1 pair",
6: "None of the accepted combinations"
}
self.prizes = {
5: 2,
4: 1.5,
3: 1.3,
2: 1.2,
1: 1
}
self.quiz_categories = {
"gk": 9,
"books": 10,
"film": 11,
"music": 12,
"theatre": 13,
"tv": 14,
"games": 15,
"bgames": 16,
"sci": 17,
"cs": 18,
"math": 19,
"myth": 20,
"sports": 21,
"geography": 22,
"history": 23,
"pol": 24,
"art": 25,
"celeb": 26,
"animals": 27
}
self.quiz_help_dict = {
"gk": "General Knowldege",
"books": "Books",
"film": "Films",
"music": "Music",
"theatre": "Musicals and Theatre",
"tv": "Television",
"games": "Video Games",
"bgames": "Board Games",
"sci": "Science and Nature",
"cs": "Computer Science",
"math": "Mathematics",
"myth": "Mythology",
"sports": "Sports",
"geography": "Geography",
"history": "History",
"pol": "Politics",
"art": "Art",
"celeb": "Celebrities",
"animals": "Animals"
}
@commands.Cog.listener(name="on_message")
async def on_message(self, message:discord.Message):
await on_message(self.bot, message)
@commands.command(name="daily")
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def daily(self, ctx: commands.Context):
"""Daily dose of sweet cash 💰💰💰"""
daily_time = self.collection.find_one({"_id": ctx.author.id}, {"t_daily": 1})
if daily_time is None \
or daily_time['t_daily'] == 0 \
or (datetime.utcnow() - daily_time['t_daily']) >= timedelta(days=1):
self.utils.update_and_insert(ctx.author.id, inc_vals={"wallet": self.defined_currencies['daily']},
set_vals={"t_daily": datetime.utcnow()}, wallet=False, t_daily=False)
emb = discord.Embed(title="Enjoy your daily cold hard cash 🤑",
description=f"{self.defined_currencies['daily']} coins were placed in your wallet!",
color=discord.Colour.green())
emb.add_field(name="You can claim your daily again in:", value="24 hours")
await ctx.send(embed=emb)
else:
emb = discord.Embed(title="You have already claimed your daily coins", color=discord.Colour.red())
del_time = (daily_time['t_daily'] + timedelta(days=1)) - datetime.utcnow()
days, seconds = del_time.days, del_time.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = seconds % 60
emb.add_field(name="You can claim your daily again in:",
value=f"{hours} hours, {minutes} minutes and {seconds} seconds")
await ctx.send(embed=emb)
raise CommandError
@commands.command(name="weekly")
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def weekly(self, ctx: commands.Context):
"""Weekly dose of sweet cash 💰💰💰"""
weekly_time = self.collection.find_one({"_id": ctx.author.id}, {"t_weekly": 1})
if weekly_time is None \
or weekly_time['t_weekly'] == 0 \
or (datetime.utcnow() - weekly_time['t_weekly']) >= timedelta(days=7):
self.utils.update_and_insert(ctx.author.id, inc_vals={"wallet": self.defined_currencies['weekly']},
set_vals={"t_weekly": datetime.utcnow()}, wallet=False, t_weekly=False)
emb = discord.Embed(title="Enjoy your weekly cold hard cash 🤑",
description=f"{self.defined_currencies['weekly']} coins were placed in your wallet!",
color=discord.Colour.green())
emb.add_field(name="You can claim your weekly again in:", value="7 days")
await ctx.send(embed=emb)
else:
emb = discord.Embed(title="You have already claimed your weekly coins", color=discord.Colour.red())
del_time = (weekly_time['t_weekly'] + timedelta(days=7)) - datetime.utcnow()
days, seconds = del_time.days, del_time.seconds
hours = (days * 24 + seconds // 3600) % 24
minutes = (seconds % 3600) // 60
# seconds = seconds % 60
emb.add_field(name="You can claim your weekly again in:",
value=f"{days} days, {hours} hours and {minutes} minutes")
await ctx.send(embed=emb)
raise CommandError
@commands.command(name="monthly")
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def monthly(self, ctx: commands.Context):
"""Monthly dose of sweet cash 💰💰💰"""
monthly_time = self.collection.find_one({"_id": ctx.author.id}, {"t_monthly": 1})
if monthly_time is None \
or monthly_time['t_monthly'] == 0 \
or (datetime.utcnow() - monthly_time['t_monthly']) >= timedelta(days=30):
self.utils.update_and_insert(ctx.author.id, inc_vals={"wallet": self.defined_currencies['monthly']},
set_vals={"t_monthly": datetime.utcnow()}, wallet=False, t_monthly=False)
emb = discord.Embed(title="Enjoy your monthly cold hard cash 🤑",
description=f"{self.defined_currencies['monthly']} coins were placed in your wallet!",
color=discord.Colour.green())
emb.add_field(name="You can claim your monthly again in:", value="30 days")
await ctx.send(embed=emb)
else:
emb = discord.Embed(title="You have already claimed your monthly coins", color=discord.Colour.red())
del_time = (monthly_time['t_monthly'] + timedelta(days=30)) - datetime.utcnow()
days, seconds = del_time.days, del_time.seconds
hours = (days * 24 + seconds // 3600) % 24
minutes = (seconds % 3600) // 60
# seconds = seconds % 60
emb.add_field(name="You can claim your monthly again in:",
value=f"{days} days, {hours} hours and {minutes} minutes")
await ctx.send(embed=emb)
raise CommandError
@commands.command(name="balance", aliases=['bal'])
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def balance(self, ctx: commands.Context, targeted_user: discord.Member = None):
"""Check the balance of those pesky scrubs"""
if targeted_user is None:
targeted_user = ctx.author
wallet_coins, bank_coins = self.utils.get_balance(targeted_user.id)
desc_str = f"**Wallet: **" \
f"<a:chintucoin:839401482184163358>{wallet_coins}\n**Bank: **" \
f"<a:chintucoin:839401482184163358>{bank_coins}"
emb = discord.Embed(title=f"**{targeted_user.display_name}'s Account details**", description=desc_str,
color=discord.Colour.green())
if wallet_coins + bank_coins == 0:
emb.set_footer(text="Poor much?")
await ctx.send(embed=emb)
@commands.command(name="withdraw", aliases=['with'])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def withdraw(self, ctx: commands.Context, amount: str):
wallet_coins, bank_coins = self.utils.get_balance(ctx.author.id)
if bank_coins == 0:
await ctx.send(f"{ctx.author.mention} Your bank account is empty lmfao")
raise CommandError
if amount.lower() == "max" or amount.lower() == "all":
amount = bank_coins
else:
try:
amount = int(amount)
except ValueError:
await ctx.send(f"{ctx.author.mention} Enter a valid amount or max/all")
raise CommandError
if amount <= 0:
await ctx.send(f"{ctx.author.mention} Enter a valid amount or max/all")
raise CommandError
if amount > bank_coins:
await ctx.send(f"{ctx.author.mention} You do not have {amount} coins in your bank account")
raise CommandError
self.utils.update(ctx.author.id, inc_vals={"wallet": amount, "bank": -amount})
emb = discord.Embed(title=f"{ctx.author.display_name} Withdrew {amount} coins",
description=f"**Wallet: **<a:chintucoin:839401482184163358>"
f"{wallet_coins + amount}\n**Bank: **<a:chintucoin:839401482184163358>"
f"{bank_coins - amount}",
color=discord.Colour.green())
await ctx.send(embed=emb)
@commands.command(name="deposit", aliases=['dep'])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def deposit(self, ctx: commands.Context, amount: str):
wallet_coins, bank_coins = self.utils.get_balance(ctx.author.id)
if wallet_coins == 0:
await ctx.send(f"{ctx.author.mention} Your wallet is empty lmfao")
raise CommandError
if amount.lower() == "max" or amount.lower() == "all":
amount = wallet_coins
else:
try:
amount = int(amount)
except ValueError:
await ctx.send(f"{ctx.author.mention} Enter a valid amount or max/all")
raise CommandError
if amount <= 0:
await ctx.send(f"{ctx.author.mention} Enter a valid amount or max/all")
raise CommandError
if amount > wallet_coins:
await ctx.send(f"{ctx.author.mention} You do not have {amount} coins in your wallet")
raise CommandError
self.utils.update(ctx.author.id, inc_vals={"wallet": -amount, "bank": amount})
emb = discord.Embed(title=f"{ctx.author.display_name} Deposited {amount} coins",
description=f"**Wallet: **"
f"<a:chintucoin:839401482184163358>{wallet_coins - amount}\n**Bank: **"
f"<a:chintucoin:839401482184163358>{bank_coins + amount}",
color=discord.Colour.green())
await ctx.send(embed=emb)
@commands.command(name="give", aliases=['pay'])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def give(self, ctx: commands.Context, targeted_user: discord.Member, amount: int):
"""Give away your hard earned cash 🎁"""
if ctx.author.id == targeted_user.id:
await ctx.send(f"{ctx.author.mention}, you can't give coins to yourself. 😡")
raise CommandError
if amount <= 0:
await ctx.send(f"{ctx.author.mention}, enter a value greater than 0. You can't fool me. 😡")
raise CommandError
wallet_coins, bank_coins = self.utils.get_balance(ctx.author.id)
if wallet_coins < amount or wallet_coins == 0:
await ctx.send(f"{ctx.author.mention} You don't have enough coins lmao, get a job.")
raise CommandError
else:
self.utils.update(ctx.author.id, inc_vals={"wallet": -amount})
self.utils.update_and_insert(targeted_user.id, inc_vals={"wallet": amount}, wallet=False)
await ctx.send(
f"** {ctx.author.mention} gave {amount} coins to {targeted_user.display_name} "
f"<a:chintucoin:839401482184163358>**")
@commands.command(name="shop")
@commands.cooldown(rate=1, per=1.0, type=commands.BucketType.user)
async def shop(self, ctx: commands.Context, page: int = 1):
"""See what treasures await your purchase"""
if self.pages >= page >= 1:
embed = self.paged_shop[page - 1].set_footer(text=f"Page {page} of {self.pages}")
await ctx.send(embed=embed)
else:
await ctx.send(f"{ctx.author.mention} Enter a valid page number")
raise CommandError
@commands.command(name="gift")
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def gift(self, ctx: commands.Context, target_user: discord.Member, item: str, amount: int = 1):
"""Give away your precious items 🎁"""
item_dict = None
item_id = None
item = item.lower()
if item in self.items_by_id:
item_dict = self.items_by_id[item]
item_id = item
elif item in self.id_by_name:
item_dict = self.items_by_id[str(self.id_by_name[item])]
item_id = str(self.id_by_name[item])
if item_dict is not None and item_id is not None:
if amount > 0:
inventory = self.collection.find_one({"_id": ctx.author.id}, {"inventory": 1})
if inventory is not None:
inventory = inventory["inventory"]
if item_id in inventory and inventory[item_id] >= amount:
embed = discord.Embed(
title=f"Do you want to gift {amount} {item_dict['name']} to {target_user.name}?",
description="React with 👍 within 15 seconds to confirm", color=discord.Colour.green())
embed.set_footer(text=f"Requested by {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
message = await ctx.send(embed=embed)
await message.add_reaction("👍")
def check(reaction, user):
return user.id == ctx.author.id and str(
reaction.emoji) == '👍' and reaction.message.id == message.id
try:
await self.bot.wait_for('reaction_add', timeout=15.0, check=check)
self.utils.update(ctx.author.id, inc_vals={f"inventory.{item_id}": -amount})
self.utils.update_and_insert(target_user.id, inc_vals={f"inventory.{item_id}": amount},
inventory=False)
await ctx.send(
f"{ctx.author.mention} You have successfully "
f"gifted {amount} {item_dict['name']} to {target_user.name}")
except asyncio.TimeoutError:
embed = discord.Embed(
title=f"Do you want to gift {amount} {item_dict['name']} to {target_user.name}?",
description="Gift failed. Please try again", color=discord.Colour.red())
embed.set_footer(text=f"Requested by {ctx.author.display_name}",
icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
await message.clear_reactions()
raise CommandError
else:
await ctx.send(f"{ctx.author.mention} Lmao you don't have {amount} {item_dict['name']} to"
f" gift.")
raise CommandError
else:
self.utils.insert_new_document(ctx.author.id)
await ctx.send(f"{ctx.author.mention} Lmao you don't have {amount} {item_dict['name']} to gift.")
raise CommandError
else:
await ctx.send(f"{ctx.author.mention} Enter a valid amount")
raise CommandError
else:
await ctx.send(f"{ctx.author.mention} Enter a valid item ID or name")
raise CommandError
@commands.command(name="buy")
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def buy(self, ctx: commands.Context, item, amount: int = 1):
"""Buy the items of your dreams from the shop <a:chintucoin:839401482184163358>"""
item_dict = None
item = item.lower()
try:
item = int(item)
if str(item) in self.items_by_id:
item_dict = self.items_by_id[str(item)]
except Exception:
if item in self.id_by_name:
item_dict = self.items_by_id[str(self.id_by_name[item])]
item = self.id_by_name[item]
if item_dict is None:
await ctx.send(f"{ctx.author.mention} Enter a valid item ID or name")
raise CommandError
if amount < 0:
await ctx.send(f"{ctx.author.mention} Enter a valid amount")
raise CommandError
wallet_coins, bank_coins = self.utils.get_balance(ctx.author.id)
if wallet_coins <= 0 or wallet_coins < self.items_by_id[str(item)]["value"] * amount:
await ctx.send(
f"{ctx.author.mention} You don't have enough money" +
f" for buying {self.items_by_id[str(item)]['name']}. Get a job lmao.")
raise CommandError
embed = discord.Embed(
title=f"Do you want to purchase {amount} {item_dict['name']} for {item_dict['value'] * amount}?",
description="React with 👍 within 15 seconds to purchase", color=discord.Colour.green())
embed.set_footer(text=f"Requested by {ctx.author.display_name}", icon_url=ctx.author.avatar_url)
message = await ctx.send(embed=embed)
await message.add_reaction("👍")
def check(reaction, user):
return user.id == ctx.author.id and str(
reaction.emoji) == '👍' and reaction.message.id == message.id
try:
await self.bot.wait_for('reaction_add', timeout=15.0, check=check)
self.utils.update(ctx.author.id, inc_vals={"wallet": -item_dict["value"] * amount,
f"inventory.{str(item)}": amount})
await ctx.send(
f"{ctx.author.mention} You have successfully "
f"purchased {amount} {item_dict['name']} for {item_dict['value'] * amount}")
except asyncio.TimeoutError:
embed = discord.Embed(
title=f"Do you want to purchase {amount} {item_dict['name']} for {item_dict['value'] * amount}?",
description="Purchase failed. Please try again", color=discord.Colour.red())
embed.set_footer(text=f"Requested by {ctx.author.display_name}",
icon_url=ctx.author.avatar_url)
await message.edit(embed=embed)
await message.clear_reactions()
raise CommandError
@commands.command(name="bet")
@commands.cooldown(rate=1, per=5.0, type=commands.BucketType.user)
async def bet(self, ctx: commands.Context, amount: str):
"""Join in on some gambling action, similar to Klondike dice game"""
wallet_coins, bank_coins = self.utils.get_balance(ctx.author.id)
try:
amount = int(amount)
except ValueError:
if amount.lower() == "max" or amount.lower() == "all":
if wallet_coins <= 0:
await ctx.send(f"{ctx.author.mention} Lmao you don't have enough coins to bet.")
raise CommandError
if wallet_coins >= 250000:
amount = 250000
else:
amount = wallet_coins
else:
await ctx.send(f"{ctx.author.mention} Enter a proper amount or max/all.")
raise CommandError
if 250000 >= amount >= 50:
if wallet_coins >= amount and wallet_coins > 0:
bot_pair, user_pair = find_pairs(np.random.randint(1, 6, 5)), find_pairs(np.random.randint(1, 6, 5))
if bot_pair <= user_pair:
embed = discord.Embed(title=f"{ctx.author.display_name}'s losing bet",
description=f"You lost {amount} coins",
color=discord.Colour.red())
embed.add_field(name="Chintu rolled:", value=self.houses[bot_pair])
embed.add_field(name="You rolled:", value=self.houses[user_pair])
self.utils.update(ctx.author.id, inc_vals={"wallet": -amount})
else:
embed = discord.Embed(title=f"{ctx.author.display_name}'s winning bet",
description=f"You won {int(amount * self.prizes[bot_pair - user_pair] + amount)} coins",
color=discord.Colour.green())
embed.add_field(name="Chintu rolled:", value=self.houses[bot_pair])
embed.add_field(name="You rolled:", value=self.houses[user_pair])
self.utils.update(ctx.author.id,
inc_vals={"wallet": int(amount * self.prizes[bot_pair - user_pair])})
await ctx.send(embed=embed)
else:
await ctx.send(f"{ctx.author.mention} Lmao you don't have enough coins to bet.")
raise CommandError
elif amount >= 250000:
await ctx.send(f"{ctx.author.mention} If I let you bet more than 50,000 coins, you'd be broke in no time.")
raise CommandError
else:
await ctx.send(f"{ctx.author.mention} Enter an amount greater than 50 coins")
raise CommandError
@commands.command(name="use")
@commands.cooldown(rate=1, per=8.0, type=commands.BucketType.user)
async def use(self, ctx: commands.Context, item):
"""Use the items you got there in your inventory"""
item_dict = None
item = item.lower()
try:
item = int(item)
if str(item) in self.items_by_id:
item_dict = self.items_by_id[str(item)]
except Exception:
if item in self.id_by_name:
item_dict = self.items_by_id[str(self.id_by_name[item])]
item = self.id_by_name[item]
if item_dict:
if item_dict['type'] == "item":
await ctx.send("This item cannot be used.")
raise CommandError
inventory_dict = self.collection.find_one({"_id": ctx.author.id}, {"inventory": 1})
if inventory_dict is not None and str(item) in inventory_dict["inventory"] and inventory_dict["inventory"][str(item)] > 0:
try:
await eval(item_dict['type'] + '(self.bot, ctx, item_dict)')
except Exception as e:
if not isinstance(e, CommandError):
await ctx.send(f"Could't use {item_dict['name']}. Please report this issue using $suggest.")
raise CommandError
else:
await ctx.send(
f"You do not have {item_dict['name']}. Buy it from the shop ($shop) before trying again.")
raise CommandError
else:
await ctx.send(f"Could not find item with name or id {item}")
raise CommandError
@commands.command(name="iteminfo")
@commands.cooldown(rate=1, per=10.0, type=commands.BucketType.user)
async def iteminfo(self, ctx: commands.Context, item_name):
item_dict = None
item = item_name.lower()
try:
item = int(item)
if str(item) in self.items_by_id:
item_dict = self.items_by_id[str(item)]
except Exception:
if item in self.id_by_name:
item_dict = self.items_by_id[str(self.id_by_name[item])]
item = self.id_by_name[item]
if item_dict:
if item_dict["properties"]:
await ctx.send(eval(f"properties_{item}(ctx)"))
else:
await ctx.send(f"{ctx.author.mention} No info available for this item")
else:
await ctx.send(f"Could not find item with name or id {item}")
raise CommandError
@commands.command(name="inventory", aliases=["inv"])
@commands.cooldown(rate=1, per=3.0, type=commands.BucketType.user)
async def inventory(self, ctx: commands.Context, target_user=None, page_number=1):
"""Check what you have in your inventory"""
if target_user is None:
target_user = ctx.author
page_number = 1
else:
try:
converter = commands.MemberConverter()
target_user = await converter.convert(ctx, target_user)
page_number = page_number
except Exception:
try:
page_number = int(target_user)
target_user = ctx.author
except Exception:
await ctx.send("Enter a valid page number")
raise CommandError
inventory_dict = self.collection.find_one({"_id": target_user.id}, {"inventory": 1})
if inventory_dict is not None:
inventory_dict = inventory_dict['inventory']
inventory_dict = {key: val for key, val in inventory_dict.items() if val != 0}
total_items = len(inventory_dict)
pages = int((total_items - 1) // 5 + 1 + (total_items - 1) % 5 / 10)
if pages != 0:
if 0 < page_number <= pages:
keys = list(inventory_dict.keys())
embed = discord.Embed(title=f"{target_user.name}'s Inventory", color=discord.Colour.orange())
if page_number == pages:
limit = total_items
else:
limit = page_number * 5
for i in range((page_number - 1) * 5, limit):
item_id_str = keys[i]
embed.add_field(
name=f"{self.items_by_id[item_id_str]['emoji']} {self.items_by_id[item_id_str]['name']} ─ {inventory_dict[item_id_str]}",
value=f"(ID - {item_id_str}) {self.items_by_id[item_id_str]['description']}", inline=False)
embed.set_footer(icon_url=target_user.avatar_url,
text=f"Requested by {ctx.author.display_name} • Page {page_number}/{pages}")
await ctx.send(embed=embed)
else:
await ctx.send(f"Enter a valid page number")
raise CommandError
else:
await ctx.send("The inventory is empty lmao. To buy something use $shop")
raise CommandError
else:
self.utils.insert_new_document(target_user.id)
await ctx.send("The inventory is empty lmao. To buy something use $shop")
raise CommandError
@commands.command(name="quiz")
@commands.cooldown(rate=1, per=10.0, type=commands.BucketType.user)
async def quiz(self, ctx: commands.Context, category: str = None):
"""Get coins for answering questions."""
if category is None:
category = "none"
category = category.lower()
if category == "help":
embed = discord.Embed(title="Available Quiz Categories: ", color=discord.Colour.orange())
for avl_category in self.quiz_help_dict:
embed.add_field(name=self.quiz_help_dict[avl_category], value=f"$quiz {avl_category}")
await ctx.send(embed=embed)
return
create_footer = False
if category in self.quiz_categories:
category = self.quiz_categories[category]
else:
category = random.randint(9, 28)
create_footer = True
response = requests.get(
f"https://opentdb.com/api.php?amount=1&type=multiple&category={category}").json()[
"results"][0]
options = response["incorrect_answers"]
options.append(response["correct_answer"])
random.shuffle(options)
correct_option = options.index(response["correct_answer"])
desc_str = ""
num_to_alphabet = {0: "A", 1: "B", 2: "C", 3: "D"}
alphabet_to_num = {"A": 0, "B": 1, "C": 2, "D": 3}
for i in range(len(options)):
desc_str += f"**{num_to_alphabet[i]}: ** {options[i]}\n"
q_embed = discord.Embed(title=html.unescape(response["question"]), description=html.unescape(desc_str),
color=discord.Colour.orange())
if create_footer:
q_embed.set_footer(text="Use $quiz help to get a list of categories")
sent_embed = await ctx.send(embed=q_embed)
def check(message):
return message.channel == ctx.channel and message.author.id == ctx.author.id
try:
msg: discord.Message = await self.bot.wait_for('message', timeout=10.0, check=check)
if msg.content.upper() in alphabet_to_num:
if alphabet_to_num[msg.content.upper()] == correct_option:
r_embed = discord.Embed(title=f"{ctx.author.display_name} gave the correct answer",
description=f"{self.defined_currencies['quiz']} coins were added to your wallet",
color=discord.Colour.green())
if create_footer:
r_embed.set_footer(text="Use $quiz help to get a list of categories")
self.utils.update_and_insert(ctx.author.id, inc_vals={"wallet": self.defined_currencies['quiz']},
wallet=False)
await sent_embed.edit(embed=r_embed)
else:
r_embed = discord.Embed(title=f"{ctx.author.display_name} gave the incorrect answer",
description=f"The correct answer was **{num_to_alphabet[correct_option]}: {html.unescape(options[correct_option])}**",
color=discord.Colour.red())
if create_footer:
r_embed.set_footer(text="Use $quiz help to get a list of categories")
await sent_embed.edit(embed=r_embed)
else:
await ctx.send(f"{ctx.author.mention} Bruh enter a proper option next time (A/B/C/D)")
except asyncio.TimeoutError:
r_embed = discord.Embed(title=f"{ctx.author.display_name}'s answer time ran out",
description=f"The correct answer was **{num_to_alphabet[correct_option]}: {html.unescape(options[correct_option])}** (Timeout = 10 seconds)",
color=discord.Colour.red())
await sent_embed.edit(embed=r_embed)
raise CommandError
@commands.command(name="addmoney", hidden=True)
@commands.is_owner()
async def addmoney(self, ctx: commands.Context, amount: int, targeted_user: discord.Member = None):
if targeted_user is None:
targeted_user = ctx.author
self.utils.update_and_insert(targeted_user.id, inc_vals={"wallet": amount}, wallet=False)
emb = discord.Embed(description=f"***Added {amount} coins to {targeted_user.display_name}'s balance.***",
color=discord.Colour.green())
await ctx.send(embed=emb)
def create_paged_shop(items: dict):
items = {key: val for key, val in items.items() if not val['archive']}
shop_items_len = len(items)
pages = shop_items_len // 5
if shop_items_len % 5 != 0:
pages += 1
i = 0
j = -1
embeds = []
for item in items:
if i % 5 == 0:
j += 1
embeds.append(discord.Embed(title="Chintu Store", color=discord.Colour.green()))
embeds[j].add_field(name=f"{items[item]['name']} ─ {items[item]['value']}",
value=f"(ID - {item}) {items[item]['description']}", inline=False)
i += 1
return embeds, pages
def find_pairs(array: np.ndarray):
len_without_dup = len(set(array))
arr_set = list(set(array))
arr_sum = np.sum(array)
if len_without_dup > 3:
return len_without_dup + 1
elif len_without_dup == 1:
return len_without_dup
elif len_without_dup == 3:
set_sum =
|
np.sum(arr_set)
|
numpy.sum
|
# -*- coding: utf-8 -*-
import numpy as np
from typing import List, Tuple, Sequence
from numpy import sin, cos
import xarray
from datetime import datetime, timedelta
import astropy.units as u
from astropy.coordinates import get_sun, EarthLocation, AltAz
from astropy.time import Time
def compsolar(coord: List[float], minel: float,
hourstep: float, year: int = 2018) -> xarray.Dataset:
"""
coord: string or 2- or 3-tuple of WGS-84 coordinates in degrees optional altitude in meters
year: CE calendar year
minel: minimum solar elevation above horizon to consider usable for solar energy (degrees)
hourstep: hour increment
doplot: boolean
"""
# %% time and coords
if len(coord) == 2:
coord.append(0.) # in case altitude not specified
obs = EarthLocation(lat=coord[0]*u.deg, lon=coord[1]*u.deg, height=coord[2]*u.m)
plotperday = int(24 / hourstep)
t0 = datetime(year, 1, 1)
t1 = datetime(year+1, 1, 1)
ts = timedelta(hours=hourstep)
times = [t0 + i*ts for i in range((t1-t0) // ts)]
# %% computations
# yes, we need to feed times to observer and sun!
sun = get_sun(Time(times)).transform_to(AltAz(obstime=times, location=obs))
sunel = sun.alt.degree.reshape((plotperday, -1), order='F')
Irr = airmass(sunel, times, minel)
Irr = estenergy(Irr)
# %% collect outupt
dates = [d.date() for d in times[::plotperday]]
Irr['date'] = dates
Irr['sunel'] = (('hour', 'date'), sunel)
Irr.attrs['lat'] = coord[0]
Irr.attrs['lon'] = coord[1]
return Irr
def estenergy(Irr: xarray.Dataset) -> xarray.Dataset:
Irr['Irr'] = Irr['Irr'].fillna(0.)
Irr['Whr'] = ('date', np.trapz(Irr['Irr'], x=Irr.hour, axis=0))
return Irr
# %%
def airmass(thetadeg: float, dtime: Sequence[datetime],
minelevation_deg: float = 5.) -> xarray.Dataset:
"""
<NAME>
Those considering concentrated solar power systems need a more advanced analysis.
Aerosols, clouds, dust, etc. are not considered.
assumes observer at sea level
input: theta [deg] true (not apparent) solar elevation angle above horizon
minelevation_deg: arbitrary, since refraction is not considered, the results are highly suspect for
sun near horizon. Also consider blockage by terrain/buildings.
Note: use https://github.com/scivision/lowtran for far more precise modeling
"""
doy = Time2doy(dtime)
thd = np.atleast_1d(thetadeg)
thd[(thd < minelevation_deg) | (thd > 90)] = np.nan
thr =
|
np.radians(thd)
|
numpy.radians
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import unittest
import shutil
import platforms.local
import platforms.platform
class Platform(platforms.local.Platform):
@property
def device_str(self):
return 'cpu'
@property
def is_parallel(self):
return False
@property
def root(self):
return os.path.join(super(Platform, self).root, 'TESTING')
class TestCase(unittest.TestCase):
def setUp(self):
self.saved_platform = platforms.platform._PLATFORM
platforms.platform._PLATFORM = Platform(num_workers=4)
if os.path.exists(platforms.platform.get_platform().root):
shutil.rmtree(platforms.platform.get_platform().root)
os.makedirs(platforms.platform.get_platform().root)
self.root = platforms.platform.get_platform().root
def tearDown(self):
if os.path.exists(self.root): shutil.rmtree(self.root)
platforms.platform._PLATFORM = self.saved_platform
@staticmethod
def get_state(model):
"""Get a copy of the state of a model."""
return {k: v.clone().detach().cpu().numpy() for k, v in model.state_dict().items()}
def assertStateEqual(self, state1, state2):
"""Assert that two models states are equal."""
self.assertEqual(set(state1.keys()), set(state2.keys()))
for k in state1:
self.assertTrue(
|
np.array_equal(state1[k], state2[k])
|
numpy.array_equal
|
#!/usr/bin/env python3
import torch
import pickle
import random
import argparse
import subprocess
import numpy as np
from torch.utils import data
import matplotlib.pyplot as plt
from collections import defaultdict
class SequenceDataset(data.Dataset):
def __init__(self, entries, sequences, keywords, kw_method='permute',
include_rev=False, token_to_idx=None):
super(SequenceDataset, self).__init__()
self.amino_acids = np.unique(list(''.join(sequences)))
self.keywords = np.unique(keywords)
self.tokens = np.hstack((['<PAD>', '<UNK>', '<EOS>'],
self.amino_acids,
self.keywords))
self.token_to_idx, self.idx_to_token = self.token_idx_map(token_to_idx)
self.inputs, self.targets = self.encode_sequences(entries,
sequences,
keywords,
kw_method,
include_rev)
def token_idx_map(self, token_to_idx):
if token_to_idx is None:
token_to_idx = defaultdict(lambda: 1)
idx_to_token = defaultdict(lambda: '<UNK>')
for idx, token in enumerate(self.tokens):
token_to_idx[token] = idx
idx_to_token[idx] = token
else:
idx_to_token = defaultdict(lambda: '<UNK>')
for token, idx in token_to_idx.items():
idx_to_token[idx] = token
return token_to_idx, idx_to_token
def add_reverse(self, entries, sequences, keywords):
rev_sequences = np.empty(sequences.shape, dtype=object)
rev_entries = np.empty(entries.shape, dtype=object)
for i, sequence in enumerate(sequences):
rev_sequences[i] = sequence[::-1]
rev_entries[i] = entries[i] + '_reverse'
sequences =
|
np.hstack((sequences, rev_sequences))
|
numpy.hstack
|
#!/usr/bin/env python
""" """
###############################################################################
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
import numpy as np
# Local modules.
# Project modules
# Globals and constants variables.
class _FitObject(object):
def residual(self, parameters, y, x):
yFit = self.evaluation(x, parameters)
error = y - yFit
return error
def residualLog(self, parameters, y, x):
yFit = self.evaluation(x, parameters)
error = np.log(y) - np.log(yFit)
return error
def evaluation(self, x, parameters):
raise NotImplementedError
def function(self, x):
raise NotImplementedError
def getNumberFitParameters(self):
raise NotImplementedError
class FitFunctions(_FitObject):
def __init__(self, fitFunctions):
self._setupFitFunction(fitFunctions)
def _setupFitFunction(self, fitFunctions):
self._fitFunctions = {}
iStart = 0
iEnd = 0
for index, fitFunction in enumerate(fitFunctions):
numberParameters = fitFunction.getNumberFitParameters()
iEnd += numberParameters
self._fitFunctions[index] = (fitFunction, iStart, iEnd)
iStart = iEnd
def evaluation(self, x, parameters):
yFit = 0.0
for fitFunctionName in self._fitFunctions:
fitFunction, iStart, iEnd = self._fitFunctions[fitFunctionName]
parametersFunction = parameters[iStart:iEnd]
yFit += fitFunction.evaluation(x, parametersFunction)
return yFit
def function(self, x):
y = 0.0
for fitFunctionName in self._fitFunctions:
fitFunction, iStart, iEnd = self._fitFunctions[fitFunctionName]
y += fitFunction.function(x)
return y
def getNumberFitParameters(self):
numberParameters = 0
for fitFunctionName in self._fitFunctions:
fitFunction, dummy_iStart, dummy_iEnd = self._fitFunctions[fitFunctionName]
numberParameters += fitFunction.getNumberFitParameters()
return numberParameters
def calculateR2(ys, fitYs):
SSE = 0.0
SSR = 0.0
ybar =
|
np.mean(ys)
|
numpy.mean
|
""" this is used mainly to re-run experiments off-line from logged data, else see -> main"""
import time
import json
import sys
import math
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
from sklearn import svm
import copy
import numpy as np
from data_operators import *
from visualization import *
# print ("Random number with seed 30")
np.random.seed(123)
acc_test_dict = [
["15mil", "na"],
["8mil", "na"],
["15mil", "8mil", "na"],
["10mil", "na"],
["5mil", "na"],
["10mil", "5mil", "na"],
]
def generate_action(current_state, idx, min_vec, max_vec, number_of_actions):
if min_vec.shape[0] <= idx:
return current_state
else:
if min_vec[idx] != 0 or max_vec[idx] != 0:
if number_of_actions == 1:
actions = np.array((max_vec[idx]-min_vec[idx])/2)
else:
# actions = np.arange(min_vec[idx], max_vec[idx], (max_vec[idx]-min_vec[idx])/(number_of_actions+1))[1:]
actions = np.array(
list(np.arange(min_vec[idx], max_vec[idx], (max_vec[idx]-min_vec[idx])/(number_of_actions-1)))+[max_vec[idx]]
)
if current_state is None:
current_state = np.array([[0.0] * min_vec.shape[0]])
state = current_state.copy()
for i, act in enumerate(actions):
if i == 0:
current_state[:, idx] = act
else:
state_copy = state.copy()
state_copy[:, idx] = act
current_state = np.concatenate((current_state, state_copy), axis=0)
return generate_action(current_state, idx+1, min_vec, max_vec, number_of_actions)
# Generate action profile, by using recursive function
def get_action_profile(eta_min, eta_max, A_min, A_max, number_of_actions=3):
actions = generate_action(
None,
0,
np.concatenate((eta_min, A_min), axis=0),
|
np.concatenate((eta_max, A_max), axis=0)
|
numpy.concatenate
|
"""This script plots the household family distribution in homes. This script should help tune
pandemic_simulator.environment.make_population.make_population method of the simulator.
The overview of home assignment to be followed is as follows:
a) "Only 4.5 percent of older adults live in nursing homes and
2 percent in assisted living facilities. The majority of older adults (93.5 percent) live in the community."
- https://www.ncbi.nlm.nih.gov/books/NBK51841/
b) "In 2019, there was an average of 1.93 children under 18 per family in the United States"
- https://www.statista.com/statistics/718084/average-number-of-own-children-per-family/
c) "Almost a quarter of U.S. children under the age of 18 live with one parent and no other adults (23%)"
- https://www.pewresearch.org
/fact-tank/2019/12/12/u-s-children-more-likely-than-children-in-other-countries-to-live-with-just-one-parent/
Note: There are unittests that check the household distribution under test/environment/test_households.py
"""
import numpy as np
from matplotlib import pyplot as plt
import pandemic_simulator as ps
def plot_household_distribution() -> None:
ps.init_globals()
config = ps.sh.small_town_config
cr = ps.env.globals.registry
assert cr
ps.env.make_locations(config)
ps.env.make_population(config)
retiree_homes_list = []
minor_homes_list = []
adult_homes_list = []
homes = cr.location_ids_of_type(ps.env.Home)
tot_persons = 0
for home in homes:
household = cr.get_persons_in_location(home)
adults = 0
minors = 0
retirees = 0
for member in household:
if member.age <= 18:
minors += 1
elif 18 < member.age <= 65:
adults += 1
else:
retirees += 1
if minors > 0:
minor_homes_list.append([minors, adults, retirees])
elif adults > 0:
adult_homes_list.append([minors, adults, retirees])
elif retirees > 0:
retiree_homes_list.append([minors, adults, retirees])
tot_persons += len(household)
minor_homes =
|
np.asarray(minor_homes_list)
|
numpy.asarray
|
# coding: utf-8
from __future__ import division
import numpy as np
import scipy.spatial.distance as sd
from scipy.special import gamma
from scipy.linalg import toeplitz
from scipy.optimize import minimize
from scipy.stats import ttest_1samp as ttest
import hypertools as hyp
import pandas as pd
import warnings
from matplotlib import pyplot as plt
gaussian_params = {'var': 100}
laplace_params = {'scale': 100}
eye_params = {}
t_params = {'df': 100}
mexican_hat_params = {'sigma': 10}
uniform_params = {}
boxcar_params = {'width': 10}
def gaussian_weights(T, params=gaussian_params):
if params is None:
params = gaussian_params
c1 = np.divide(1, np.sqrt(2 * np.math.pi * params['var']))
c2 = np.divide(-1, 2 * params['var'])
sqdiffs = toeplitz(np.arange(T) ** 2)
return c1 * np.exp(c2 * sqdiffs)
def laplace_weights(T, params=laplace_params):
if params is None:
params = laplace_params
absdiffs = toeplitz(np.arange(T))
return np.multiply(np.divide(1, 2 * params['scale']), np.exp(-np.divide(absdiffs, params['scale']))) #scale by a factor of 2.5 to prevent near-zero rounding issues
def eye_weights(T, params=eye_params):
return np.eye(T)
def uniform_weights(T, params=uniform_params):
return np.ones([T, T])
def t_weights(T, params=t_params):
if params is None:
params = t_params
c1 = np.divide(gamma((params['df'] + 1) / 2), np.sqrt(params['df'] * np.math.pi) * gamma(params['df'] / 2))
c2 = np.divide(-params['df'] + 1, 2)
sqdiffs = toeplitz(np.arange(T) ** 2)
return np.multiply(c1, np.power(1 + np.divide(sqdiffs, params['df']), c2))
def mexican_hat_weights(T, params=mexican_hat_params):
if params is None:
params = mexican_hat_params
absdiffs = toeplitz(np.arange(T))
sqdiffs = toeplitz(np.arange(T) ** 2)
a = np.divide(2, np.sqrt(3 * params['sigma']) * np.power(np.math.pi, 0.25))
b = 1 - np.power(np.divide(absdiffs, params['sigma']), 2)
c = np.exp(-np.divide(sqdiffs, 2 * np.power(params['sigma'], 2)))
return np.multiply(a, np.multiply(b, c))
def boxcar_weights(T, params=boxcar_params):
if params is None:
params = boxcar_params
return np.multiply(toeplitz(np.arange(T)) < params['width']/2., 1.)
def format_data(data):
def zero_nans(x):
x[np.isnan(x)] = 0
return x
x = hyp.tools.format_data(data, ppca=False, )
return list(map(zero_nans, x))
def _is_empty(dict):
if not bool(dict):
return True
return False
def wcorr(a, b, weights):
'''
Compute moment-by-moment correlations between sets of observations
:param a: a number-of-timepoints by number-of-features observations matrix
:param b: a number-of-timepoints by number-of-features observations matrix
:param weights: a number-of-timepoints by number-of-timepoints weights matrix
specifying the per-timepoint weights to be considered (for each timepoint)
:return: a a.shape[1] by b.shape[1] by weights.shape[0] array of per-timepoint
correlation matrices.
'''
def weighted_var_diffs(x, w):
w[np.isnan(w)] = 0
if np.sum(np.abs(w)) == 0:
weights_tiled = np.ones(x.shape)
else:
weights_tiled = np.tile(w[:, np.newaxis], [1, x.shape[1]])
mx = np.sum(np.multiply(weights_tiled, x), axis=0)[:, np.newaxis].T
diffs = x - np.tile(mx, [x.shape[0], 1])
varx = np.sum(diffs ** 2, axis=0)[:, np.newaxis].T
return varx, diffs
autocorrelation = np.isclose(a, b).all()
corrs = np.zeros([a.shape[1], b.shape[1], weights.shape[1]])
for t in np.arange(weights.shape[1]):
vara, diffs_a = weighted_var_diffs(a, weights[:, t])
if autocorrelation:
varb = vara
diffs_b = diffs_a
else:
varb, diffs_b = weighted_var_diffs(b, weights[:, t])
alpha =
|
np.dot(diffs_a.T, diffs_b)
|
numpy.dot
|
import numpy as np
import matplotlib.pyplot as plt
import os
##########################################################################################
# --> compute training error
##########################################################################################
all_test_cnn = np.loadtxt('cnn_psitrain_predict.txt')
all_target_cnn = np.loadtxt('cnn_psitrain_correct.txt')
all_test_lnn = np.loadtxt('fnn_psitrain_predict.txt')
all_target_lnn = np.loadtxt('fnn_psitrain_correct.txt')
# compute percent error
perc_err_CNN_train = np.mean(np.abs((all_test_cnn - all_target_cnn) / all_target_cnn)) * 100 #1.8968585466864325
perc_err_LNN_train = np.mean(np.abs((all_test_lnn - all_target_lnn) / all_target_lnn)) * 100 #2.3877460329830593
##########################################################################################
##########################################################################################
# --> compute and plot test error
##########################################################################################
path = 'delta_psi_plots/'
if not os.path.exists(path):
os.mkdir(path)
all_test_cnn = np.loadtxt('cnn_psitest_predict.txt')
all_target_cnn =
|
np.loadtxt('cnn_psitest_correct.txt')
|
numpy.loadtxt
|
"""
@authors:
# =============================================================================
Information:
The functions in this script are used to solve to the spherical harmonic
Stokes coefficients.
todo: write acc_matrix generation AND geopot_matrix generation
# =============================================================================
"""
# =============================================================================
# LIBRARIES
# =============================================================================
import numpy as np
import numpy.linalg as npl
from numpy import sin, cos
import GH_import as imp
import GH_convert as conv
#import GH_generate as gen
#import GH_solve as solv
#import GH_displayGeoid as dgeo
#import GH_displaySat as dsat
#import GH_export as exp
#import GH_displayTopo as dtopo
import GH_terminal as term
#import GH_harmonics as harm
import GH_geoMath as gmath
#import GH_earthMap as emap
# =============================================================================
# FUNCTIONS FOR Sph Harm SOLVE
# =============================================================================
def Get_PotGradMatrix2 (lmax, Pos): #"R = 6378136.3 m):
"""
Returns the matrix of the gravitational potential gradient.
Watch out, it gets big fast.
Multiplying it with the appropriate column vector of coefficients
will return the acceleration at the given coordinates.
*There are no geoid coefficients for l=0, l=1*
*There are no sine coefficients for m=0*
Input:
lmax: max order
Pos: array of N_points positions in spherical coordinates (r, theta, phi)
*R: Reference radius in meters*
Output:
M_PotGrad: the matrix of the coefficients
"""
# constants
R = 6378.1363 # km
GM = 398600.4418 # km**3 s**-2
# wiki says : gm = 6.673*10**-11*5.975*10**24 = 398711749999999.94
N_points = len(Pos) # number of points
Cos_len = int( (lmax+1)*(lmax+2) /2 ) -3 # c20,c21,c22,c30,c31,c32, ...
Sin_len = int( (lmax )*(lmax+1) /2 ) # s21,s22,s31,s32,s33, ...
# print("cos sin lengths =",Cos_len, ",",Sin_len)
N_coef = Cos_len + Sin_len
M_PotGrad = np.ones((N_points * 3, N_coef)) # THE Potential Gradient Matrix
print(f"Generating BAM of shape = {M_PotGrad.shape}") # BAM = "Big Ass Matrix"
for i in range (0, N_points):
term.printProgressBar(i+1, N_points)
r, theta, phi = Pos[i] #spherical coordinates at the first point
Plm_z, Plm_dz = gmath.Pol_Legendre(lmax, lmax, sin(theta))
j = 0
k = Cos_len
for l in range (0, lmax +1):
for m in range (0, l +1):
# These equations were found in the GFZ document page 23
W_r = - GM/r**2 * (R/r)**l * (l+1) * Plm_z[m, l]
W_phi = -W_r * m * r / (l+1)
W_theta = GM/r * (R/r)**l * Plm_dz[m, l]
Sub_mat = np.zeros ((3,1))
Sub_mat = [ cos(m*phi)*W_r,
cos(m*phi)*W_theta,
-sin(m*phi)*W_phi] # multiply by: COS_lm_coef
M_PotGrad [3*i : 3*(i+1), j] = Sub_mat
j += 1
# for m of non-null, we get a sine coefficient
if (m != 0):
Sub_mat =
|
np.zeros ((3,1))
|
numpy.zeros
|
#!/usr/bin/env python3
import unittest
import numpy as np
import numpy.testing as nptest
import pandas as pd
import pandas.testing as pdtest
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from datafold.dynfold.transform import (
TSCApplyLambdas,
TSCFeaturePreprocess,
TSCFiniteDifference,
TSCIdentity,
TSCPolynomialFeatures,
TSCPrincipalComponent,
TSCRadialBasis,
TSCTakensEmbedding,
TSCTransformerMixin,
)
from datafold.pcfold.kernels import *
from datafold.pcfold.timeseries.collection import TSCDataFrame, TSCException
def _all_tsc_transformers():
# only finds the ones that are importated (DMAP e.g. is not here)
print(TSCTransformerMixin.__subclasses__())
class TestTSCTransform(unittest.TestCase):
def _setUp_simple_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
self.simple_df = pd.DataFrame(np.random.rand(9, 2), index=idx, columns=col)
def _setUp_takens_df(self):
idx = pd.MultiIndex.from_arrays(
[[0, 0, 1, 1, 15, 15, 45, 45, 45], [0, 1, 0, 1, 0, 1, 17, 18, 19]]
)
col = ["A", "B"]
# Requires non-random values
self.takens_df_short = pd.DataFrame(
np.arange(18).reshape([9, 2]), index=idx, columns=col
)
n_samples_timeseries = 100
idx = pd.MultiIndex.from_product(
[np.array([0, 1]), np.arange(n_samples_timeseries)]
)
self.takens_df_long = pd.DataFrame(
np.random.rand(n_samples_timeseries * 2, 2), index=idx, columns=col
)
def setUp(self) -> None:
self._setUp_simple_df()
self._setUp_takens_df()
def test_is_valid_sklearn_estimator(self):
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils.estimator_checks import check_estimator
TEST_ESTIMATORS = (
TSCIdentity(),
TSCPrincipalComponent(),
TSCFeaturePreprocess(sklearn_transformer=MinMaxScaler()),
TSCFeaturePreprocess(sklearn_transformer=StandardScaler()),
TSCPolynomialFeatures(),
)
for test_estimator in TEST_ESTIMATORS:
for estimator, check in check_estimator(test_estimator, generate_only=True):
try:
check(estimator)
except Exception as e:
print(estimator)
print(check)
raise e
def test_identity0(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity()
pdtest.assert_frame_equal(_id.fit_transform(tsc), tsc)
pdtest.assert_frame_equal(_id.inverse_transform(tsc), tsc)
def test_identity1(self):
tsc = TSCDataFrame(self.simple_df)
_id = TSCIdentity(include_const=True)
tsc_plus_const = tsc.copy(deep=True)
tsc_plus_const["const"] = 1
pdtest.assert_frame_equal(_id.fit_transform(tsc.copy()), tsc_plus_const)
pdtest.assert_frame_equal(_id.inverse_transform(tsc_plus_const), tsc)
def test_identity2(self):
data = np.random.rand(5, 5)
data_wo_const = TSCIdentity(include_const=False).fit_transform(data)
data_plus_const = TSCIdentity(include_const=True).fit_transform(data)
nptest.assert_equal(data, data_wo_const)
nptest.assert_equal(data_plus_const, np.column_stack([data, np.ones(5)]))
def test_identity3(self):
data = TSCDataFrame(self.simple_df)
data_wo_const = TSCIdentity(
include_const=False, rename_features=True
).fit_transform(data)
data_with_const = TSCIdentity(
include_const=True, rename_features=True
).fit_transform(data)
data = data.add_suffix("_id")
pdtest.assert_index_equal(data.columns, data_wo_const.columns)
data["const"] = 1
pdtest.assert_index_equal(data.columns, data_with_const.columns)
def test_scale_min_max(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("min-max")
scaled_tsc = scale.fit_transform(tsc_df)
# sanity check:
nptest.assert_allclose(scaled_tsc.min().to_numpy(), np.zeros(2), atol=1e-16)
nptest.assert_allclose(scaled_tsc.max().to_numpy(), np.ones(2), atol=1e-16)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_scale_standard(self):
tsc_df = TSCDataFrame(self.simple_df)
scale = TSCFeaturePreprocess.from_name("standard")
scaled_tsc = scale.fit_transform(tsc_df)
nptest.assert_array_equal(
scaled_tsc.to_numpy(),
StandardScaler(with_mean=True, with_std=True).fit_transform(
tsc_df.to_numpy()
),
)
# Undoing normalization must give original TSCDataFrame back
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(scaled_tsc))
def test_sklearn_scaler(self):
tsc_df = TSCDataFrame(self.simple_df)
from sklearn.preprocessing import (
MaxAbsScaler,
PowerTransformer,
QuantileTransformer,
RobustScaler,
)
# each tuple has the class and a dictionary with the init-options
scaler = [
(MaxAbsScaler, dict()),
(PowerTransformer, dict(method="yeo-johnson")),
(PowerTransformer, dict(method="box-cox")),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="uniform"),
),
(
QuantileTransformer,
dict(n_quantiles=tsc_df.shape[0], output_distribution="normal"),
),
(RobustScaler, dict()),
]
for cls, kwargs in scaler:
scale = TSCFeaturePreprocess(sklearn_transformer=cls(**kwargs))
tsc_transformed = scale.fit_transform(tsc_df)
# Check the underlying array equals:
nptest.assert_array_equal(
cls(**kwargs).fit_transform(tsc_df.to_numpy()),
tsc_transformed.to_numpy(),
)
# check inverse transform is equal the original TSCDataFrame:
pdtest.assert_frame_equal(tsc_df, scale.inverse_transform(tsc_transformed))
def test_polynomial_feature_transform01(self):
from sklearn.preprocessing import PolynomialFeatures
tsc = TSCDataFrame(self.simple_df)
for degree in [2, 3, 4]:
for include_bias in [True, False]:
actual = TSCPolynomialFeatures(
degree=degree, include_bias=include_bias, include_first_order=True
).fit_transform(tsc)
expected = PolynomialFeatures(
degree=degree, include_bias=include_bias
).fit_transform(tsc.to_numpy())
nptest.assert_array_equal(actual.to_numpy(), expected)
def test_polynomial_feature_transform02(self):
tsc = TSCDataFrame(self.simple_df)
for include_first_order in [True, False]:
poly = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=include_first_order
).fit(tsc)
actual = poly.transform(tsc)
expected = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_frame_equal(actual, expected)
def test_polynomial_feature_transform03(self):
tsc = TSCDataFrame(self.simple_df)
actual = TSCPolynomialFeatures(
degree=2, include_bias=True, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["1", "A^2", "A B", "B^2"], name="feature"),
)
actual = TSCPolynomialFeatures(
degree=2, include_bias=False, include_first_order=False
).fit_transform(tsc)
pdtest.assert_index_equal(
actual.columns,
pd.Index(["A^2", "A B", "B^2"], name="feature"),
)
def test_apply_lambda_transform01(self):
# use lambda identity function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform02(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
expected = tsc.apply(np.square, axis=0, raw=True)
expected.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
pdtest.assert_frame_equal(actual, expected)
def test_apply_lambda_transform03(self):
# use numpy function
tsc = TSCDataFrame(self.simple_df)
lambda_transform = TSCApplyLambdas(lambdas=[lambda x: x, np.square]).fit(tsc)
actual = lambda_transform.transform(tsc)
identity = tsc
identity.columns = pd.Index(
["A_lambda0", "B_lambda0"], name=TSCDataFrame.tsc_feature_col_name
)
squared = tsc.apply(np.square, axis=0, raw=True)
squared.columns = pd.Index(
["A_lambda1", "B_lambda1"], name=TSCDataFrame.tsc_feature_col_name
)
expected = pd.concat([identity, squared], axis=1)
pdtest.assert_frame_equal(actual, expected)
def test_pca_transform(self):
tsc = TSCDataFrame(self.simple_df)
pca = TSCPrincipalComponent(n_components=1).fit(tsc)
data = pca.transform(tsc)
self.assertIsInstance(data, TSCDataFrame)
pca_sklearn = PCA(n_components=1).fit(tsc.to_numpy())
data_sklearn = pca_sklearn.transform(tsc)
nptest.assert_allclose(data, data_sklearn, atol=1e-15)
nptest.assert_array_equal(
pca.inverse_transform(data).to_numpy(),
pca_sklearn.inverse_transform(data_sklearn),
)
def test_takens_embedding0(self):
simple_df = self.takens_df_short.drop("B", axis=1)
tsc_df = TSCDataFrame(simple_df)
takens = TSCTakensEmbedding(
delays=1,
lag=0,
frequency=1,
)
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array(
[
[2.0, 0.0],
[6.0, 4.0],
[10.0, 8.0],
[14.0, 12.0],
[16.0, 14.0],
]
)
nptest.assert_equal(actual_numerics, expected)
# Second test
actual_inverse = takens.inverse_transform(actual)
pdtest.assert_frame_equal(tsc_df.drop([0, 17], level=1), actual_inverse)
def test_takens_embedding1(self):
# test kappa = 1
tsc_df = TSCDataFrame.from_single_timeseries(
pd.DataFrame(
np.column_stack([[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]]),
columns=["A", "B"],
dtype=float,
)
)
takens = TSCTakensEmbedding(lag=0, delays=5, frequency=1, kappa=1)
# embedd to a single instance
actual = takens.fit_transform(tsc_df)
self.assertIsInstance(actual, TSCDataFrame)
self.assertTrue(actual.has_degenerate())
self.assertEqual(actual.n_timeseries, 1)
# First test
actual_numerics = actual.to_numpy() # only compare the numeric values
expected = np.array([[5, 4, 3, 2, 1, 0]], dtype=float) * np.exp(
-1.0 * np.array([0, 1, 2, 3, 4, 5])
)
expected = np.repeat(expected, 2, axis=1)
|
nptest.assert_equal(actual_numerics, expected)
|
numpy.testing.assert_equal
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.